aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexey Khoroshilov <khoroshilov@ispras.ru>2017-01-27 17:07:30 -0500
committerDavid S. Miller <davem@davemloft.net>2017-01-29 19:04:11 -0500
commitd1156b489fa734d1af763d6a07b1637c01bb0aed (patch)
tree5b9787eb26cd81f9ca16cf3d53f70cf8bf03c5db
parentcf626c3b252b2c9d131be0dd66096ec3bf729e54 (diff)
net: adaptec: starfire: add checks for dma mapping errors
init_ring(), refill_rx_ring() and start_tx() don't check if mapping dma memory succeed. The patch adds the checks and failure handling. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov <khoroshilov@ispras.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c45
1 files changed, 43 insertions, 2 deletions
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c12d2618eebf..3872ab96b80a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
1152 if (skb == NULL) 1152 if (skb == NULL)
1153 break; 1153 break;
1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155 if (pci_dma_mapping_error(np->pci_dev,
1156 np->rx_info[i].mapping)) {
1157 dev_kfree_skb(skb);
1158 np->rx_info[i].skb = NULL;
1159 break;
1160 }
1155 /* Grrr, we cannot offset to correctly align the IP header. */ 1161 /* Grrr, we cannot offset to correctly align the IP header. */
1156 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1162 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1157 } 1163 }
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1182{ 1188{
1183 struct netdev_private *np = netdev_priv(dev); 1189 struct netdev_private *np = netdev_priv(dev);
1184 unsigned int entry; 1190 unsigned int entry;
1191 unsigned int prev_tx;
1185 u32 status; 1192 u32 status;
1186 int i; 1193 int i, j;
1187 1194
1188 /* 1195 /*
1189 * be cautious here, wrapping the queue has weird semantics 1196 * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1201 } 1208 }
1202#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1209#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1203 1210
1211 prev_tx = np->cur_tx;
1204 entry = np->cur_tx % TX_RING_SIZE; 1212 entry = np->cur_tx % TX_RING_SIZE;
1205 for (i = 0; i < skb_num_frags(skb); i++) { 1213 for (i = 0; i < skb_num_frags(skb); i++) {
1206 int wrap_ring = 0; 1214 int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1234 skb_frag_size(this_frag), 1242 skb_frag_size(this_frag),
1235 PCI_DMA_TODEVICE); 1243 PCI_DMA_TODEVICE);
1236 } 1244 }
1245 if (pci_dma_mapping_error(np->pci_dev,
1246 np->tx_info[entry].mapping)) {
1247 dev->stats.tx_dropped++;
1248 goto err_out;
1249 }
1237 1250
1238 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); 1251 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1239 np->tx_ring[entry].status = cpu_to_le32(status); 1252 np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1268 netif_stop_queue(dev); 1281 netif_stop_queue(dev);
1269 1282
1270 return NETDEV_TX_OK; 1283 return NETDEV_TX_OK;
1271}
1272 1284
1285err_out:
1286 entry = prev_tx % TX_RING_SIZE;
1287 np->tx_info[entry].skb = NULL;
1288 if (i > 0) {
1289 pci_unmap_single(np->pci_dev,
1290 np->tx_info[entry].mapping,
1291 skb_first_frag_len(skb),
1292 PCI_DMA_TODEVICE);
1293 np->tx_info[entry].mapping = 0;
1294 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1295 for (j = 1; j < i; j++) {
1296 pci_unmap_single(np->pci_dev,
1297 np->tx_info[entry].mapping,
1298 skb_frag_size(
1299 &skb_shinfo(skb)->frags[j-1]),
1300 PCI_DMA_TODEVICE);
1301 entry++;
1302 }
1303 }
1304 dev_kfree_skb_any(skb);
1305 np->cur_tx = prev_tx;
1306 return NETDEV_TX_OK;
1307}
1273 1308
1274/* The interrupt handler does all of the Rx thread work and cleans up 1309/* The interrupt handler does all of the Rx thread work and cleans up
1275 after the Tx thread. */ 1310 after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
1569 break; /* Better luck next round. */ 1604 break; /* Better luck next round. */
1570 np->rx_info[entry].mapping = 1605 np->rx_info[entry].mapping =
1571 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1606 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1607 if (pci_dma_mapping_error(np->pci_dev,
1608 np->rx_info[entry].mapping)) {
1609 dev_kfree_skb(skb);
1610 np->rx_info[entry].skb = NULL;
1611 break;
1612 }
1572 np->rx_ring[entry].rxaddr = 1613 np->rx_ring[entry].rxaddr =
1573 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1614 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1574 } 1615 }