summaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-25 18:51:53 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-25 19:29:42 -0400
commit0b725a2ca61bedc33a2a63d0451d528b268cf975 (patch)
treeefe818013ee258eeff23f83ca0c8d01b5117a316 /include/linux/netdevice.h
parent44a52ffd6402a19544fb9dee081730d36d413202 (diff)
net: Remove ndo_xmit_flush netdev operation, use signalling instead.
As reported by Jesper Dangaard Brouer, for high packet rates the overhead of having another indirect call in the TX path is non-trivial. There is the indirect call itself, and then there is all of the reloading of the state to refetch the tail pointer value and then write the device register. Move to a more passive scheme, which requires very light modifications to the device drivers. The signal is a new skb->xmit_more value, if it is non-zero it means that more SKBs are pending to be transmitted on the same queue as the current SKB. And therefore, the driver may elide the tail pointer update. Right now skb->xmit_more is always zero. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h25
1 files changed, 2 insertions, 23 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 220c50984688..039b23786c22 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -782,19 +782,6 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
782 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 782 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
783 * Required can not be NULL. 783 * Required can not be NULL.
784 * 784 *
785 * void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
786 * A driver implements this function when it wishes to support
787 * deferred TX queue flushing. The idea is that the expensive
788 * operation to trigger TX queue processing can be done after
789 * N calls to ndo_start_xmit rather than being done every single
790 * time. In this regime ndo_start_xmit will be called one or more
791 * times, and then a final ndo_xmit_flush call will be made to
792 * have the driver tell the device about the new pending TX queue
793 * entries. The kernel keeps track of which queues need flushing
794 * by monitoring skb->queue_mapping of the packets it submits to
795 * ndo_start_xmit. This is the queue value that will be passed
796 * to ndo_xmit_flush.
797 *
798 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
799 * void *accel_priv, select_queue_fallback_t fallback); 786 * void *accel_priv, select_queue_fallback_t fallback);
800 * Called to decide which queue to when device supports multiple 787 * Called to decide which queue to when device supports multiple
@@ -1018,7 +1005,6 @@ struct net_device_ops {
1018 int (*ndo_stop)(struct net_device *dev); 1005 int (*ndo_stop)(struct net_device *dev);
1019 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 1006 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
1020 struct net_device *dev); 1007 struct net_device *dev);
1021 void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
1022 u16 (*ndo_select_queue)(struct net_device *dev, 1008 u16 (*ndo_select_queue)(struct net_device *dev,
1023 struct sk_buff *skb, 1009 struct sk_buff *skb,
1024 void *accel_priv, 1010 void *accel_priv,
@@ -3447,15 +3433,8 @@ int __init dev_proc_init(void);
3447static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 3433static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3448 struct sk_buff *skb, struct net_device *dev) 3434 struct sk_buff *skb, struct net_device *dev)
3449{ 3435{
3450 netdev_tx_t ret; 3436 skb->xmit_more = 0;
3451 u16 q; 3437 return ops->ndo_start_xmit(skb, dev);
3452
3453 q = skb->queue_mapping;
3454 ret = ops->ndo_start_xmit(skb, dev);
3455 if (dev_xmit_complete(ret) && ops->ndo_xmit_flush)
3456 ops->ndo_xmit_flush(dev, q);
3457
3458 return ret;
3459} 3438}
3460 3439
3461static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev) 3440static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev)