aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-10-03 18:31:07 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-03 18:36:11 -0400
commit55a93b3ea780908b7d1b3a8cf1976223a9268d78 (patch)
tree2c33ad6aaf7a604185bff5659d64895f5cf9a348 /net/core
parent6a05880a8b22c6ba2ffdabbceb4635d28abe9072 (diff)
qdisc: validate skb without holding lock
Validation of skb can be pretty expensive : GSO segmentation and/or checksum computations. We can do this without holding qdisc lock, so that other cpus can queue additional packets. Trick is that requeued packets were already validated, so we carry a boolean so that sch_direct_xmit() can validate a fresh skb list, or directly use an old one. Tested on 40Gb NIC (8 TX queues) and 200 concurrent flows, 48 threads host. Turning TSO on or off had no effect on throughput, only few more cpu cycles. Lock contention on qdisc lock disappeared. Same if disabling TX checksum offload. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c29
1 files changed, 26 insertions, 3 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e55c546717d4..1a90530f83ff 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2655,7 +2655,7 @@ struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t featur
2655 return skb; 2655 return skb;
2656} 2656}
2657 2657
2658struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) 2658static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2659{ 2659{
2660 netdev_features_t features; 2660 netdev_features_t features;
2661 2661
@@ -2720,6 +2720,30 @@ out_null:
2720 return NULL; 2720 return NULL;
2721} 2721}
2722 2722
2723struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2724{
2725 struct sk_buff *next, *head = NULL, *tail;
2726
2727 while (skb) {
2728 next = skb->next;
2729 skb->next = NULL;
2730 skb = validate_xmit_skb(skb, dev);
2731 if (skb) {
2732 struct sk_buff *end = skb;
2733
2734 while (end->next)
2735 end = end->next;
2736 if (!head)
2737 head = skb;
2738 else
2739 tail->next = skb;
2740 tail = end;
2741 }
2742 skb = next;
2743 }
2744 return head;
2745}
2746
2723static void qdisc_pkt_len_init(struct sk_buff *skb) 2747static void qdisc_pkt_len_init(struct sk_buff *skb)
2724{ 2748{
2725 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2749 const struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -2786,8 +2810,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2786 2810
2787 qdisc_bstats_update(q, skb); 2811 qdisc_bstats_update(q, skb);
2788 2812
2789 skb = validate_xmit_skb(skb, dev); 2813 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2790 if (skb && sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2791 if (unlikely(contended)) { 2814 if (unlikely(contended)) {
2792 spin_unlock(&q->busylock); 2815 spin_unlock(&q->busylock);
2793 contended = false; 2816 contended = false;