aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-07-07 19:23:14 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-08 00:21:22 -0400
commit730ee3602f300b5133717048859358a02251322f (patch)
tree4b12c9f98f3f17a89be2f1467fb4a1200a3da90e /drivers/net/ethernet/freescale
parent5d165c5543fbcbd26e443ee501063decb4ef73b4 (diff)
net: fec: make rx skb handling more robust
Allocate, and then map the receive skb before writing any data to the ring descriptor or storing the skb. When freeing the receive ring entries, unmap and free the skb, and then clear the stored skb pointer. This means we have ring data and skb pointer in one of two states: either both fully setup, or nothing setup. This simplifies the cleanup, as we can use just the skb pointer to indicate whether the descriptor is setup, and thus avoids potentially calling dma_unmap_single() on a DMA error value. Acked-by: Fugang Duan <B38611@freescale.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 309aa2ff8cc9..70853a59627a 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2066,12 +2066,12 @@ static void fec_enet_free_buffers(struct net_device *ndev)
2066 bdp = fep->rx_bd_base; 2066 bdp = fep->rx_bd_base;
2067 for (i = 0; i < fep->rx_ring_size; i++) { 2067 for (i = 0; i < fep->rx_ring_size; i++) {
2068 skb = fep->rx_skbuff[i]; 2068 skb = fep->rx_skbuff[i];
2069 2069 fep->rx_skbuff[i] = NULL;
2070 if (bdp->cbd_bufaddr) 2070 if (skb) {
2071 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 2071 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
2072 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 2072 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
2073 if (skb)
2074 dev_kfree_skb(skb); 2073 dev_kfree_skb(skb);
2074 }
2075 bdp = fec_enet_get_nextdesc(bdp, fep); 2075 bdp = fec_enet_get_nextdesc(bdp, fep);
2076 } 2076 }
2077 2077
@@ -2089,21 +2089,26 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
2089 2089
2090 bdp = fep->rx_bd_base; 2090 bdp = fep->rx_bd_base;
2091 for (i = 0; i < fep->rx_ring_size; i++) { 2091 for (i = 0; i < fep->rx_ring_size; i++) {
2092 dma_addr_t addr;
2093
2092 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 2094 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2093 if (!skb) { 2095 if (!skb) {
2094 fec_enet_free_buffers(ndev); 2096 fec_enet_free_buffers(ndev);
2095 return -ENOMEM; 2097 return -ENOMEM;
2096 } 2098 }
2097 fep->rx_skbuff[i] = skb;
2098 2099
2099 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 2100 addr = dma_map_single(&fep->pdev->dev, skb->data,
2100 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 2101 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
2101 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { 2102 if (dma_mapping_error(&fep->pdev->dev, addr)) {
2103 dev_kfree_skb(skb);
2102 fec_enet_free_buffers(ndev); 2104 fec_enet_free_buffers(ndev);
2103 if (net_ratelimit()) 2105 if (net_ratelimit())
2104 netdev_err(ndev, "Rx DMA memory map failed\n"); 2106 netdev_err(ndev, "Rx DMA memory map failed\n");
2105 return -ENOMEM; 2107 return -ENOMEM;
2106 } 2108 }
2109
2110 fep->rx_skbuff[i] = skb;
2111 bdp->cbd_bufaddr = addr;
2107 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2112 bdp->cbd_sc = BD_ENET_RX_EMPTY;
2108 2113
2109 if (fep->bufdesc_ex) { 2114 if (fep->bufdesc_ex) {