aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/calxeda
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2013-08-30 17:49:24 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-03 22:21:15 -0400
commitcbe157b60c8e70d4b4dc937dfdd39525d8f47b46 (patch)
tree8b60f94561cd4f481060bfc19b4e39a5e5a93bd1 /drivers/net/ethernet/calxeda
parentca32723afedd65d612029705446bf44bde5eab14 (diff)
net: calxedaxgmac: fix race with tx queue stop/wake
Since the xgmac transmit start and completion work locklessly, it is possible for xgmac_xmit to stop the tx queue after the xgmac_tx_complete has run resulting in the tx queue never being woken up. Fix this by ensuring that ring buffer index updates are visible and recheck the ring space after stopping the queue. Also fix an off-by-one bug where we need to stop the queue when the ring buffer space is equal to MAX_SKB_FRAGS. The implementation used here was copied from drivers/net/ethernet/broadcom/tg3.c. Signed-off-by: Rob Herring <rob.herring@calxeda.com> Reviewed-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/calxeda')
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index f63085571241..5d0b61a5c72b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -410,6 +410,9 @@ struct xgmac_priv {
410#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) 410#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
411#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) 411#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
412 412
413#define tx_dma_ring_space(p) \
414 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
415
413/* XGMAC Descriptor Access Helpers */ 416/* XGMAC Descriptor Access Helpers */
414static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) 417static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
415{ 418{
@@ -886,8 +889,10 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
886 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); 889 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
887 } 890 }
888 891
889 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 892 /* Ensure tx_tail is visible to xgmac_xmit */
890 MAX_SKB_FRAGS) 893 smp_mb();
894 if (unlikely(netif_queue_stopped(priv->dev) &&
895 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
891 netif_wake_queue(priv->dev); 896 netif_wake_queue(priv->dev);
892} 897}
893 898
@@ -1125,10 +1130,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1125 1130
1126 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1131 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1127 1132
1128 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < 1133 /* Ensure tx_head update is visible to tx completion */
1129 MAX_SKB_FRAGS) 1134 smp_mb();
1135 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
1130 netif_stop_queue(dev); 1136 netif_stop_queue(dev);
1131 1137 /* Ensure netif_stop_queue is visible to tx completion */
1138 smp_mb();
1139 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
1140 netif_start_queue(dev);
1141 }
1132 return NETDEV_TX_OK; 1142 return NETDEV_TX_OK;
1133} 1143}
1134 1144