aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sundance.c
diff options
context:
space:
mode:
authorDenis Kirjanov <dkirjanov@kernel.org>2010-09-20 18:56:57 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-21 21:04:46 -0400
commitd91dc27993a366565b3ed37fc62d35cf3e10ff5d (patch)
treebc30bcd2ba1f2996ebda6c4273ad7086f8de9076 /drivers/net/sundance.c
parent0c8a745f06f7f007ec492dc0606f5b1ea62f3da9 (diff)
sundance: Handle DMA mapping errors
Check for DMA mapping errors. Signed-off-by: Denis Kirjanov <dkirjanov@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sundance.c')
-rw-r--r--drivers/net/sundance.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 5b786ce2b5f1..0df8a2c23592 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1018,6 +1018,12 @@ static void init_ring(struct net_device *dev)
1018 np->rx_ring[i].frag[0].addr = cpu_to_le32( 1018 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1019 dma_map_single(&np->pci_dev->dev, skb->data, 1019 dma_map_single(&np->pci_dev->dev, skb->data,
1020 np->rx_buf_sz, DMA_FROM_DEVICE)); 1020 np->rx_buf_sz, DMA_FROM_DEVICE));
1021 if (dma_mapping_error(&np->pci_dev->dev,
1022 np->rx_ring[i].frag[0].addr)) {
1023 dev_kfree_skb(skb);
1024 np->rx_skbuff[i] = NULL;
1025 break;
1026 }
1021 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); 1027 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1022 } 1028 }
1023 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 1029 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1070,6 +1076,9 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1070 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); 1076 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1071 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, 1077 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1072 skb->data, skb->len, DMA_TO_DEVICE)); 1078 skb->data, skb->len, DMA_TO_DEVICE));
1079 if (dma_mapping_error(&np->pci_dev->dev,
1080 txdesc->frag[0].addr))
1081 goto drop_frame;
1073 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); 1082 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1074 1083
1075 /* Increment cur_tx before tasklet_schedule() */ 1084 /* Increment cur_tx before tasklet_schedule() */
@@ -1091,6 +1100,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1091 dev->name, np->cur_tx, entry); 1100 dev->name, np->cur_tx, entry);
1092 } 1101 }
1093 return NETDEV_TX_OK; 1102 return NETDEV_TX_OK;
1103
1104drop_frame:
1105 dev_kfree_skb(skb);
1106 np->tx_skbuff[entry] = NULL;
1107 dev->stats.tx_dropped++;
1108 return NETDEV_TX_OK;
1094} 1109}
1095 1110
1096/* Reset hardware tx and free all of tx buffers */ 1111/* Reset hardware tx and free all of tx buffers */
@@ -1398,6 +1413,12 @@ static void refill_rx (struct net_device *dev)
1398 np->rx_ring[entry].frag[0].addr = cpu_to_le32( 1413 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1399 dma_map_single(&np->pci_dev->dev, skb->data, 1414 dma_map_single(&np->pci_dev->dev, skb->data,
1400 np->rx_buf_sz, DMA_FROM_DEVICE)); 1415 np->rx_buf_sz, DMA_FROM_DEVICE));
1416 if (dma_mapping_error(&np->pci_dev->dev,
1417 np->rx_ring[entry].frag[0].addr)) {
1418 dev_kfree_skb_irq(skb);
1419 np->rx_skbuff[entry] = NULL;
1420 break;
1421 }
1401 } 1422 }
1402 /* Perhaps we need not reset this field. */ 1423 /* Perhaps we need not reset this field. */
1403 np->rx_ring[entry].frag[0].length = 1424 np->rx_ring[entry].frag[0].length =