aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/pkt_sched.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-10-03 18:31:07 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-03 18:36:11 -0400
commit55a93b3ea780908b7d1b3a8cf1976223a9268d78 (patch)
tree2c33ad6aaf7a604185bff5659d64895f5cf9a348 /include/net/pkt_sched.h
parent6a05880a8b22c6ba2ffdabbceb4635d28abe9072 (diff)
qdisc: validate skb without holding lock
Validation of skb can be pretty expensive : GSO segmentation and/or checksum computations. We can do this without holding qdisc lock, so that other cpus can queue additional packets. Trick is that requeued packets were already validated, so we carry a boolean so that sch_direct_xmit() can validate a fresh skb list, or directly use an old one. Tested on 40Gb NIC (8 TX queues) and 200 concurrent flows, 48 threads host. Turning TSO on or off had no effect on throughput, only few more cpu cycles. Lock contention on qdisc lock disappeared. Same if disabling TX checksum offload. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/pkt_sched.h')
-rw-r--r--include/net/pkt_sched.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 8bbe626e9ece..e4b3c828c1c2 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -99,7 +99,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab);
99void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); 99void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
100int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 100int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
101 struct net_device *dev, struct netdev_queue *txq, 101 struct net_device *dev, struct netdev_queue *txq,
102 spinlock_t *root_lock); 102 spinlock_t *root_lock, bool validate);
103 103
104void __qdisc_run(struct Qdisc *q); 104void __qdisc_run(struct Qdisc *q);
105 105