aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-10-03 18:31:07 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-03 18:36:11 -0400
commit55a93b3ea780908b7d1b3a8cf1976223a9268d78 (patch)
tree2c33ad6aaf7a604185bff5659d64895f5cf9a348 /include/linux/netdevice.h
parent6a05880a8b22c6ba2ffdabbceb4635d28abe9072 (diff)
qdisc: validate skb without holding lock
Validation of skb can be pretty expensive : GSO segmentation and/or checksum computations. We can do this without holding qdisc lock, so that other cpus can queue additional packets. Trick is that requeued packets were already validated, so we carry a boolean so that sch_direct_xmit() can validate a fresh skb list, or directly use an old one. Tested on 40Gb NIC (8 TX queues) and 200 concurrent flows, 48 threads host. Turning TSO on or off had no effect on throughput, only few more cpu cycles. Lock contention on qdisc lock disappeared. Same if disabling TX checksum offload. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9b7fbacb6296..910fb17ad148 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2821,7 +2821,7 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *);
2821int dev_change_carrier(struct net_device *, bool new_carrier); 2821int dev_change_carrier(struct net_device *, bool new_carrier);
2822int dev_get_phys_port_id(struct net_device *dev, 2822int dev_get_phys_port_id(struct net_device *dev,
2823 struct netdev_phys_port_id *ppid); 2823 struct netdev_phys_port_id *ppid);
2824struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev); 2824struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
2825struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2825struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2826 struct netdev_queue *txq, int *ret); 2826 struct netdev_queue *txq, int *ret);
2827int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2827int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);