aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-22 19:21:53 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-25 02:02:45 -0400
commit4798248e4e023170e937a65a1d30fcc52496dd42 (patch)
treec043fec145a1c2035f4c9e4ff7b4fbedc07bb1d5 /include/linux/netdevice.h
parent4c83acbc565d53296f1731034c5041a0fbabcaeb (diff)
net: Add ops->ndo_xmit_flush()
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h35
1 files changed, 35 insertions, 0 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index eb73444e1bd0..220c50984688 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -782,6 +782,19 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
782 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 782 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
783 * Required can not be NULL. 783 * Required can not be NULL.
784 * 784 *
785 * void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
786 * A driver implements this function when it wishes to support
787 * deferred TX queue flushing. The idea is that the expensive
788 * operation to trigger TX queue processing can be done after
789 * N calls to ndo_start_xmit rather than being done every single
790 * time. In this regime ndo_start_xmit will be called one or more
791 * times, and then a final ndo_xmit_flush call will be made to
792 * have the driver tell the device about the new pending TX queue
793 * entries. The kernel keeps track of which queues need flushing
794 * by monitoring skb->queue_mapping of the packets it submits to
795 * ndo_start_xmit. This is the queue value that will be passed
796 * to ndo_xmit_flush.
797 *
785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 798 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
786 * void *accel_priv, select_queue_fallback_t fallback); 799 * void *accel_priv, select_queue_fallback_t fallback);
787 * Called to decide which queue to when device supports multiple 800 * Called to decide which queue to when device supports multiple
@@ -1005,6 +1018,7 @@ struct net_device_ops {
1005 int (*ndo_stop)(struct net_device *dev); 1018 int (*ndo_stop)(struct net_device *dev);
1006 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 1019 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
1007 struct net_device *dev); 1020 struct net_device *dev);
1021 void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
1008 u16 (*ndo_select_queue)(struct net_device *dev, 1022 u16 (*ndo_select_queue)(struct net_device *dev,
1009 struct sk_buff *skb, 1023 struct sk_buff *skb,
1010 void *accel_priv, 1024 void *accel_priv,
@@ -3430,6 +3444,27 @@ int __init dev_proc_init(void);
3430#define dev_proc_init() 0 3444#define dev_proc_init() 0
3431#endif 3445#endif
3432 3446
3447static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3448 struct sk_buff *skb, struct net_device *dev)
3449{
3450 netdev_tx_t ret;
3451 u16 q;
3452
3453 q = skb->queue_mapping;
3454 ret = ops->ndo_start_xmit(skb, dev);
3455 if (dev_xmit_complete(ret) && ops->ndo_xmit_flush)
3456 ops->ndo_xmit_flush(dev, q);
3457
3458 return ret;
3459}
3460
3461static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev)
3462{
3463 const struct net_device_ops *ops = dev->netdev_ops;
3464
3465 return __netdev_start_xmit(ops, skb, dev);
3466}
3467
3433int netdev_class_create_file_ns(struct class_attribute *class_attr, 3468int netdev_class_create_file_ns(struct class_attribute *class_attr,
3434 const void *ns); 3469 const void *ns);
3435void netdev_class_remove_file_ns(struct class_attribute *class_attr, 3470void netdev_class_remove_file_ns(struct class_attribute *class_attr,