diff options
author | Alexey Khoroshilov <khoroshilov@ispras.ru> | 2015-11-27 17:29:30 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-12-01 15:19:16 -0500 |
commit | 5738a09d58d5ad2871f1f9a42bf6a3aa9ece5b3c (patch) | |
tree | aa5c240ceb83e10634e33e6ca7e7dd62aff94115 | |
parent | ee9159ddce14bc1dec9435ae4e3bd3153e783706 (diff) |
vmxnet3: fix checks for dma mapping errors
vmxnet3_drv does not check dma_addr with dma_mapping_error()
after mapping dma memory. The patch adds the checks and
tries to handle failures.
Found by Linux Driver Verification project (linuxtesting.org).
Signed-off-by: Alexey Khoroshilov <khoroshilov@ispras.ru>
Acked-by: Shrikrishna Khare <skhare@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 71 |
1 files changed, 60 insertions, 11 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 899ea4288197..417903715437 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | |||
587 | &adapter->pdev->dev, | 587 | &adapter->pdev->dev, |
588 | rbi->skb->data, rbi->len, | 588 | rbi->skb->data, rbi->len, |
589 | PCI_DMA_FROMDEVICE); | 589 | PCI_DMA_FROMDEVICE); |
590 | if (dma_mapping_error(&adapter->pdev->dev, | ||
591 | rbi->dma_addr)) { | ||
592 | dev_kfree_skb_any(rbi->skb); | ||
593 | rq->stats.rx_buf_alloc_failure++; | ||
594 | break; | ||
595 | } | ||
590 | } else { | 596 | } else { |
591 | /* rx buffer skipped by the device */ | 597 | /* rx buffer skipped by the device */ |
592 | } | 598 | } |
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | |||
605 | &adapter->pdev->dev, | 611 | &adapter->pdev->dev, |
606 | rbi->page, 0, PAGE_SIZE, | 612 | rbi->page, 0, PAGE_SIZE, |
607 | PCI_DMA_FROMDEVICE); | 613 | PCI_DMA_FROMDEVICE); |
614 | if (dma_mapping_error(&adapter->pdev->dev, | ||
615 | rbi->dma_addr)) { | ||
616 | put_page(rbi->page); | ||
617 | rq->stats.rx_buf_alloc_failure++; | ||
618 | break; | ||
619 | } | ||
608 | } else { | 620 | } else { |
609 | /* rx buffers skipped by the device */ | 621 | /* rx buffers skipped by the device */ |
610 | } | 622 | } |
611 | val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; | 623 | val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; |
612 | } | 624 | } |
613 | 625 | ||
614 | BUG_ON(rbi->dma_addr == 0); | ||
615 | gd->rxd.addr = cpu_to_le64(rbi->dma_addr); | 626 | gd->rxd.addr = cpu_to_le64(rbi->dma_addr); |
616 | gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) | 627 | gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) |
617 | | val | rbi->len); | 628 | | val | rbi->len); |
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, | |||
655 | } | 666 | } |
656 | 667 | ||
657 | 668 | ||
658 | static void | 669 | static int |
659 | vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | 670 | vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, |
660 | struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, | 671 | struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, |
661 | struct vmxnet3_adapter *adapter) | 672 | struct vmxnet3_adapter *adapter) |
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
715 | tbi->dma_addr = dma_map_single(&adapter->pdev->dev, | 726 | tbi->dma_addr = dma_map_single(&adapter->pdev->dev, |
716 | skb->data + buf_offset, buf_size, | 727 | skb->data + buf_offset, buf_size, |
717 | PCI_DMA_TODEVICE); | 728 | PCI_DMA_TODEVICE); |
729 | if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) | ||
730 | return -EFAULT; | ||
718 | 731 | ||
719 | tbi->len = buf_size; | 732 | tbi->len = buf_size; |
720 | 733 | ||
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
755 | tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, | 768 | tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, |
756 | buf_offset, buf_size, | 769 | buf_offset, buf_size, |
757 | DMA_TO_DEVICE); | 770 | DMA_TO_DEVICE); |
771 | if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) | ||
772 | return -EFAULT; | ||
758 | 773 | ||
759 | tbi->len = buf_size; | 774 | tbi->len = buf_size; |
760 | 775 | ||
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
782 | /* set the last buf_info for the pkt */ | 797 | /* set the last buf_info for the pkt */ |
783 | tbi->skb = skb; | 798 | tbi->skb = skb; |
784 | tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; | 799 | tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; |
800 | |||
801 | return 0; | ||
785 | } | 802 | } |
786 | 803 | ||
787 | 804 | ||
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
1020 | } | 1037 | } |
1021 | 1038 | ||
1022 | /* fill tx descs related to addr & len */ | 1039 | /* fill tx descs related to addr & len */ |
1023 | vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); | 1040 | if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) |
1041 | goto unlock_drop_pkt; | ||
1024 | 1042 | ||
1025 | /* setup the EOP desc */ | 1043 | /* setup the EOP desc */ |
1026 | ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); | 1044 | ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); |
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1231 | struct vmxnet3_rx_buf_info *rbi; | 1249 | struct vmxnet3_rx_buf_info *rbi; |
1232 | struct sk_buff *skb, *new_skb = NULL; | 1250 | struct sk_buff *skb, *new_skb = NULL; |
1233 | struct page *new_page = NULL; | 1251 | struct page *new_page = NULL; |
1252 | dma_addr_t new_dma_addr; | ||
1234 | int num_to_alloc; | 1253 | int num_to_alloc; |
1235 | struct Vmxnet3_RxDesc *rxd; | 1254 | struct Vmxnet3_RxDesc *rxd; |
1236 | u32 idx, ring_idx; | 1255 | u32 idx, ring_idx; |
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1287 | skip_page_frags = true; | 1306 | skip_page_frags = true; |
1288 | goto rcd_done; | 1307 | goto rcd_done; |
1289 | } | 1308 | } |
1309 | new_dma_addr = dma_map_single(&adapter->pdev->dev, | ||
1310 | new_skb->data, rbi->len, | ||
1311 | PCI_DMA_FROMDEVICE); | ||
1312 | if (dma_mapping_error(&adapter->pdev->dev, | ||
1313 | new_dma_addr)) { | ||
1314 | dev_kfree_skb(new_skb); | ||
1315 | /* Skb allocation failed, do not handover this | ||
1316 | * skb to stack. Reuse it. Drop the existing pkt | ||
1317 | */ | ||
1318 | rq->stats.rx_buf_alloc_failure++; | ||
1319 | ctx->skb = NULL; | ||
1320 | rq->stats.drop_total++; | ||
1321 | skip_page_frags = true; | ||
1322 | goto rcd_done; | ||
1323 | } | ||
1290 | 1324 | ||
1291 | dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, | 1325 | dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, |
1292 | rbi->len, | 1326 | rbi->len, |
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1303 | 1337 | ||
1304 | /* Immediate refill */ | 1338 | /* Immediate refill */ |
1305 | rbi->skb = new_skb; | 1339 | rbi->skb = new_skb; |
1306 | rbi->dma_addr = dma_map_single(&adapter->pdev->dev, | 1340 | rbi->dma_addr = new_dma_addr; |
1307 | rbi->skb->data, rbi->len, | ||
1308 | PCI_DMA_FROMDEVICE); | ||
1309 | rxd->addr = cpu_to_le64(rbi->dma_addr); | 1341 | rxd->addr = cpu_to_le64(rbi->dma_addr); |
1310 | rxd->len = rbi->len; | 1342 | rxd->len = rbi->len; |
1311 | if (adapter->version == 2 && | 1343 | if (adapter->version == 2 && |
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1348 | skip_page_frags = true; | 1380 | skip_page_frags = true; |
1349 | goto rcd_done; | 1381 | goto rcd_done; |
1350 | } | 1382 | } |
1383 | new_dma_addr = dma_map_page(&adapter->pdev->dev | ||
1384 | , rbi->page, | ||
1385 | 0, PAGE_SIZE, | ||
1386 | PCI_DMA_FROMDEVICE); | ||
1387 | if (dma_mapping_error(&adapter->pdev->dev, | ||
1388 | new_dma_addr)) { | ||
1389 | put_page(new_page); | ||
1390 | rq->stats.rx_buf_alloc_failure++; | ||
1391 | dev_kfree_skb(ctx->skb); | ||
1392 | ctx->skb = NULL; | ||
1393 | skip_page_frags = true; | ||
1394 | goto rcd_done; | ||
1395 | } | ||
1351 | 1396 | ||
1352 | dma_unmap_page(&adapter->pdev->dev, | 1397 | dma_unmap_page(&adapter->pdev->dev, |
1353 | rbi->dma_addr, rbi->len, | 1398 | rbi->dma_addr, rbi->len, |
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1357 | 1402 | ||
1358 | /* Immediate refill */ | 1403 | /* Immediate refill */ |
1359 | rbi->page = new_page; | 1404 | rbi->page = new_page; |
1360 | rbi->dma_addr = dma_map_page(&adapter->pdev->dev | 1405 | rbi->dma_addr = new_dma_addr; |
1361 | , rbi->page, | ||
1362 | 0, PAGE_SIZE, | ||
1363 | PCI_DMA_FROMDEVICE); | ||
1364 | rxd->addr = cpu_to_le64(rbi->dma_addr); | 1406 | rxd->addr = cpu_to_le64(rbi->dma_addr); |
1365 | rxd->len = rbi->len; | 1407 | rxd->len = rbi->len; |
1366 | } | 1408 | } |
@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2167 | PCI_DMA_TODEVICE); | 2209 | PCI_DMA_TODEVICE); |
2168 | } | 2210 | } |
2169 | 2211 | ||
2170 | if (new_table_pa) { | 2212 | if (!dma_mapping_error(&adapter->pdev->dev, |
2213 | new_table_pa)) { | ||
2171 | new_mode |= VMXNET3_RXM_MCAST; | 2214 | new_mode |= VMXNET3_RXM_MCAST; |
2172 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); | 2215 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); |
2173 | } else { | 2216 | } else { |
@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
3075 | adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, | 3118 | adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, |
3076 | sizeof(struct vmxnet3_adapter), | 3119 | sizeof(struct vmxnet3_adapter), |
3077 | PCI_DMA_TODEVICE); | 3120 | PCI_DMA_TODEVICE); |
3121 | if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { | ||
3122 | dev_err(&pdev->dev, "Failed to map dma\n"); | ||
3123 | err = -EFAULT; | ||
3124 | goto err_dma_map; | ||
3125 | } | ||
3078 | adapter->shared = dma_alloc_coherent( | 3126 | adapter->shared = dma_alloc_coherent( |
3079 | &adapter->pdev->dev, | 3127 | &adapter->pdev->dev, |
3080 | sizeof(struct Vmxnet3_DriverShared), | 3128 | sizeof(struct Vmxnet3_DriverShared), |
@@ -3233,6 +3281,7 @@ err_alloc_queue_desc: | |||
3233 | err_alloc_shared: | 3281 | err_alloc_shared: |
3234 | dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, | 3282 | dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, |
3235 | sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); | 3283 | sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); |
3284 | err_dma_map: | ||
3236 | free_netdev(netdev); | 3285 | free_netdev(netdev); |
3237 | return err; | 3286 | return err; |
3238 | } | 3287 | } |