aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/calxeda/xgmac.c
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2013-08-30 17:49:22 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-03 22:21:15 -0400
commit1a1d4d2f30a3c7e91bb7876df95baae363be5434 (patch)
treecf305dbbc642e118d61fa13d2865ef5b3a847df7 /drivers/net/ethernet/calxeda/xgmac.c
parent8746f671ef04114ab25f5a35ec6219efbdf3703e (diff)
net: calxedaxgmac: fix possible skb free before tx complete
The TX completion code may have freed an skb before the entire sg list was transmitted. The DMA unmap calls for the fragments could also get skipped. Now set the skb pointer on every entry in the ring, not just the head of the sg list. We then use the FS (first segment) bit in the descriptors to determine skb head vs. fragment. This also fixes similar bug in xgmac_free_tx_skbufs where clean-up of a sg list that wraps at the end of the ring buffer would not get unmapped. Signed-off-by: Rob Herring <rob.herring@calxeda.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/calxeda/xgmac.c')
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c55
1 files changed, 24 insertions, 31 deletions
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index df7e3e2579f1..64854ad84b55 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -470,6 +470,11 @@ static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
470 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; 470 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
471} 471}
472 472
473static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
474{
475 return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
476}
477
473static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) 478static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
474{ 479{
475 return le32_to_cpu(p->buf1_addr); 480 return le32_to_cpu(p->buf1_addr);
@@ -796,7 +801,7 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
796 801
797static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) 802static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
798{ 803{
799 int i, f; 804 int i;
800 struct xgmac_dma_desc *p; 805 struct xgmac_dma_desc *p;
801 806
802 if (!priv->tx_skbuff) 807 if (!priv->tx_skbuff)
@@ -807,16 +812,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
807 continue; 812 continue;
808 813
809 p = priv->dma_tx + i; 814 p = priv->dma_tx + i;
810 dma_unmap_single(priv->device, desc_get_buf_addr(p), 815 if (desc_get_tx_fs(p))
811 desc_get_buf_len(p), DMA_TO_DEVICE); 816 dma_unmap_single(priv->device, desc_get_buf_addr(p),
812 817 desc_get_buf_len(p), DMA_TO_DEVICE);
813 for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { 818 else
814 p = priv->dma_tx + i++;
815 dma_unmap_page(priv->device, desc_get_buf_addr(p), 819 dma_unmap_page(priv->device, desc_get_buf_addr(p),
816 desc_get_buf_len(p), DMA_TO_DEVICE); 820 desc_get_buf_len(p), DMA_TO_DEVICE);
817 }
818 821
819 dev_kfree_skb_any(priv->tx_skbuff[i]); 822 if (desc_get_tx_ls(p))
823 dev_kfree_skb_any(priv->tx_skbuff[i]);
820 priv->tx_skbuff[i] = NULL; 824 priv->tx_skbuff[i] = NULL;
821 } 825 }
822} 826}
@@ -853,8 +857,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
853 */ 857 */
854static void xgmac_tx_complete(struct xgmac_priv *priv) 858static void xgmac_tx_complete(struct xgmac_priv *priv)
855{ 859{
856 int i;
857
858 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 860 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
859 unsigned int entry = priv->tx_tail; 861 unsigned int entry = priv->tx_tail;
860 struct sk_buff *skb = priv->tx_skbuff[entry]; 862 struct sk_buff *skb = priv->tx_skbuff[entry];
@@ -864,33 +866,24 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
864 if (desc_get_owner(p)) 866 if (desc_get_owner(p))
865 break; 867 break;
866 868
867 /* Verify tx error by looking at the last segment */
868 if (desc_get_tx_ls(p))
869 desc_get_tx_status(priv, p);
870
871 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", 869 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
872 priv->tx_head, priv->tx_tail); 870 priv->tx_head, priv->tx_tail);
873 871
874 dma_unmap_single(priv->device, desc_get_buf_addr(p), 872 if (desc_get_tx_fs(p))
875 desc_get_buf_len(p), DMA_TO_DEVICE); 873 dma_unmap_single(priv->device, desc_get_buf_addr(p),
876 874 desc_get_buf_len(p), DMA_TO_DEVICE);
877 priv->tx_skbuff[entry] = NULL; 875 else
878 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
879
880 if (!skb) {
881 continue;
882 }
883
884 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
885 entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
886 DMA_TX_RING_SZ);
887 p = priv->dma_tx + priv->tx_tail;
888
889 dma_unmap_page(priv->device, desc_get_buf_addr(p), 876 dma_unmap_page(priv->device, desc_get_buf_addr(p),
890 desc_get_buf_len(p), DMA_TO_DEVICE); 877 desc_get_buf_len(p), DMA_TO_DEVICE);
878
879 /* Check tx error on the last segment */
880 if (desc_get_tx_ls(p)) {
881 desc_get_tx_status(priv, p);
882 dev_kfree_skb(skb);
891 } 883 }
892 884
893 dev_kfree_skb(skb); 885 priv->tx_skbuff[entry] = NULL;
886 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
894 } 887 }
895 888
896 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 889 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
@@ -1110,7 +1103,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1110 1103
1111 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1104 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1112 desc = priv->dma_tx + entry; 1105 desc = priv->dma_tx + entry;
1113 priv->tx_skbuff[entry] = NULL; 1106 priv->tx_skbuff[entry] = skb;
1114 1107
1115 desc_set_buf_addr_and_size(desc, paddr, len); 1108 desc_set_buf_addr_and_size(desc, paddr, len);
1116 if (i < (nfrags - 1)) 1109 if (i < (nfrags - 1))