diff mbox series

[net-next,2/5] net_sched: sch_fq: change fq_flow size/layout

Message ID 20200503025422.219257-3-edumazet@google.com
State New
Headers show
Series net_sched: sch_fq: round of optimizations | expand

Commit Message

Eric Dumazet May 3, 2020, 2:54 a.m. UTC
sizeof(struct fq_flow) is 112 bytes on 64bit arches.

This means that half of them use two cache lines, but 50% use
three cache lines.

This patch adds cache line alignment, and makes sure that only
the first cache line is touched by fq_enqueue(), which is more
expensive that fq_dequeue() in general.

Signed-off-by: Eric Dumazet <edumazet@google.com>
---
 net/sched/sch_fq.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 1649928fe2c1b7476050e5eee3c494c76d114c62..7a2b3195938ede3c14c37b90c9604185cfa3f651 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -66,6 +66,7 @@  static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
  * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
  */
 struct fq_flow {
+/* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */
 	struct rb_root	t_root;
 	struct sk_buff	*head;		/* list of skbs for this flow : first skb */
 	union {
@@ -74,14 +75,18 @@  struct fq_flow {
 	};
 	struct rb_node	fq_node;	/* anchor in fq_root[] trees */
 	struct sock	*sk;
+	u32		socket_hash;	/* sk_hash */
 	int		qlen;		/* number of packets in flow queue */
+
+/* Second cache line, used in fq_dequeue() */
 	int		credit;
-	u32		socket_hash;	/* sk_hash */
+	/* 32bit hole on 64bit arches */
+
 	struct fq_flow *next;		/* next pointer in RR lists */
 
 	struct rb_node  rate_node;	/* anchor in q->delayed tree */
 	u64		time_next_packet;
-};
+} ____cacheline_aligned_in_smp;
 
 struct fq_flow_head {
 	struct fq_flow *first;