aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-22 20:24:49 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-25 02:02:45 -0400
commitc1ebf46c1f72fe542853fc00f059a7d15259379d (patch)
treecb9e753c7234a1566c3f3742be3793c00cadbcc2
parent4798248e4e023170e937a65a1d30fcc52496dd42 (diff)
igb: Support netdev_ops->ndo_xmit_flush()
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c35
1 files changed, 24 insertions, 11 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cb14bbdfb056..b9c020a05fb8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -136,6 +136,7 @@ static void igb_update_phy_info(unsigned long);
136static void igb_watchdog(unsigned long); 136static void igb_watchdog(unsigned long);
137static void igb_watchdog_task(struct work_struct *); 137static void igb_watchdog_task(struct work_struct *);
138static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); 138static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
139static void igb_xmit_flush(struct net_device *netdev, u16 queue);
139static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, 140static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
140 struct rtnl_link_stats64 *stats); 141 struct rtnl_link_stats64 *stats);
141static int igb_change_mtu(struct net_device *, int); 142static int igb_change_mtu(struct net_device *, int);
@@ -2075,6 +2076,7 @@ static const struct net_device_ops igb_netdev_ops = {
2075 .ndo_open = igb_open, 2076 .ndo_open = igb_open,
2076 .ndo_stop = igb_close, 2077 .ndo_stop = igb_close,
2077 .ndo_start_xmit = igb_xmit_frame, 2078 .ndo_start_xmit = igb_xmit_frame,
2079 .ndo_xmit_flush = igb_xmit_flush,
2078 .ndo_get_stats64 = igb_get_stats64, 2080 .ndo_get_stats64 = igb_get_stats64,
2079 .ndo_set_rx_mode = igb_set_rx_mode, 2081 .ndo_set_rx_mode = igb_set_rx_mode,
2080 .ndo_set_mac_address = igb_set_mac, 2082 .ndo_set_mac_address = igb_set_mac,
@@ -4915,13 +4917,6 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4915 4917
4916 tx_ring->next_to_use = i; 4918 tx_ring->next_to_use = i;
4917 4919
4918 writel(i, tx_ring->tail);
4919
4920 /* we need this if more than one processor can write to our tail
4921 * at a time, it synchronizes IO on IA64/Altix systems
4922 */
4923 mmiowb();
4924
4925 return; 4920 return;
4926 4921
4927dma_error: 4922dma_error:
@@ -5057,17 +5052,20 @@ out_drop:
5057 return NETDEV_TX_OK; 5052 return NETDEV_TX_OK;
5058} 5053}
5059 5054
5060static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, 5055static struct igb_ring *__igb_tx_queue_mapping(struct igb_adapter *adapter, unsigned int r_idx)
5061 struct sk_buff *skb)
5062{ 5056{
5063 unsigned int r_idx = skb->queue_mapping;
5064
5065 if (r_idx >= adapter->num_tx_queues) 5057 if (r_idx >= adapter->num_tx_queues)
5066 r_idx = r_idx % adapter->num_tx_queues; 5058 r_idx = r_idx % adapter->num_tx_queues;
5067 5059
5068 return adapter->tx_ring[r_idx]; 5060 return adapter->tx_ring[r_idx];
5069} 5061}
5070 5062
5063static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
5064 struct sk_buff *skb)
5065{
5066 return __igb_tx_queue_mapping(adapter, skb->queue_mapping);
5067}
5068
5071static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, 5069static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
5072 struct net_device *netdev) 5070 struct net_device *netdev)
5073{ 5071{
@@ -5096,6 +5094,21 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
5096 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); 5094 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
5097} 5095}
5098 5096
5097static void igb_xmit_flush(struct net_device *netdev, u16 queue)
5098{
5099 struct igb_adapter *adapter = netdev_priv(netdev);
5100 struct igb_ring *tx_ring;
5101
5102 tx_ring = __igb_tx_queue_mapping(adapter, queue);
5103
5104 writel(tx_ring->next_to_use, tx_ring->tail);
5105
5106 /* we need this if more than one processor can write to our tail
5107 * at a time, it synchronizes IO on IA64/Altix systems
5108 */
5109 mmiowb();
5110}
5111
5099/** 5112/**
5100 * igb_tx_timeout - Respond to a Tx Hang 5113 * igb_tx_timeout - Respond to a Tx Hang
5101 * @netdev: network interface device structure 5114 * @netdev: network interface device structure