aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2009-03-20 04:33:32 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-20 04:33:32 -0400
commit5e140dfc1fe87eae27846f193086724806b33c7d (patch)
treea38240220f71ac128576cd8f432248442bac2deb
parent408896d508794c98a2ac6b6e1dcd7a4888a4d32b (diff)
net: reorder struct Qdisc for better SMP performance
dev_queue_xmit() needs to dirty fields "state", "q", "bstats" and "qstats" On x86_64 arch, they currently span three cache lines, involving more cache line ping pongs than necessary, making longer holding of queue spinlock. We can reduce this to one cache line, by grouping all read-mostly fields at the beginning of structure. (Or should I say, all highly modified fields at the end :) ) Before patch : offsetof(struct Qdisc, state)=0x38 offsetof(struct Qdisc, q)=0x48 offsetof(struct Qdisc, bstats)=0x80 offsetof(struct Qdisc, qstats)=0x90 sizeof(struct Qdisc)=0xc8 After patch : offsetof(struct Qdisc, state)=0x80 offsetof(struct Qdisc, q)=0x88 offsetof(struct Qdisc, bstats)=0xa0 offsetof(struct Qdisc, qstats)=0xac sizeof(struct Qdisc)=0xc0 Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/gen_stats.h2
-rw-r--r--include/net/sch_generic.h21
2 files changed, 13 insertions, 10 deletions
diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
index 13f4e74609ac..0ffa41df0ee8 100644
--- a/include/linux/gen_stats.h
+++ b/include/linux/gen_stats.h
@@ -22,7 +22,7 @@ struct gnet_stats_basic
22{ 22{
23 __u64 bytes; 23 __u64 bytes;
24 __u32 packets; 24 __u32 packets;
25}; 25} __attribute__ ((packed));
26 26
27/** 27/**
28 * struct gnet_stats_rate_est - rate estimator 28 * struct gnet_stats_rate_est - rate estimator
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3d78a4d22460..964ffa0d8815 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -49,18 +49,10 @@ struct Qdisc
49 int padded; 49 int padded;
50 struct Qdisc_ops *ops; 50 struct Qdisc_ops *ops;
51 struct qdisc_size_table *stab; 51 struct qdisc_size_table *stab;
52 struct list_head list;
52 u32 handle; 53 u32 handle;
53 u32 parent; 54 u32 parent;
54 atomic_t refcnt; 55 atomic_t refcnt;
55 unsigned long state;
56 struct sk_buff *gso_skb;
57 struct sk_buff_head q;
58 struct netdev_queue *dev_queue;
59 struct Qdisc *next_sched;
60 struct list_head list;
61
62 struct gnet_stats_basic bstats;
63 struct gnet_stats_queue qstats;
64 struct gnet_stats_rate_est rate_est; 56 struct gnet_stats_rate_est rate_est;
65 int (*reshape_fail)(struct sk_buff *skb, 57 int (*reshape_fail)(struct sk_buff *skb,
66 struct Qdisc *q); 58 struct Qdisc *q);
@@ -71,6 +63,17 @@ struct Qdisc
71 * and it will live until better solution will be invented. 63 * and it will live until better solution will be invented.
72 */ 64 */
73 struct Qdisc *__parent; 65 struct Qdisc *__parent;
66 struct netdev_queue *dev_queue;
67 struct Qdisc *next_sched;
68
69 struct sk_buff *gso_skb;
70 /*
71 * For performance sake on SMP, we put highly modified fields at the end
72 */
73 unsigned long state;
74 struct sk_buff_head q;
75 struct gnet_stats_basic bstats;
76 struct gnet_stats_queue qstats;
74}; 77};
75 78
76struct Qdisc_class_ops 79struct Qdisc_class_ops