diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2009-12-02 11:47:57 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-12-02 22:57:13 -0500 |
commit | e95524a726904a1d2b91552f0577838f67d53c6c (patch) | |
tree | ae04c29ad8125fb43bbf34f30d9a90e120737233 /drivers/net | |
parent | a7d5ca40ff56e2cd4e30bbe91f2d0deab6bfc006 (diff) |
bnx2: remove skb_dma_map/unmap calls from driver
Due to the fact that skb_dma_map/unmap do not work correctly when a HW
IOMMU is enabled it has been recommended to go about removing the calls
from the network device drivers.
[ Fix bnx2_free_tx_skbs() ring indexing and use NETDEV_TX_OK return
code in bnx2_start_xmit() after cleaning up DMA mapping errors. -Mchan ]
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/bnx2.c | 72 | ||||
-rw-r--r-- | drivers/net/bnx2.h | 1 |
2 files changed, 61 insertions, 12 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 539d23b594ce..f47bf50602d9 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -2815,13 +2815,21 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2815 | } | 2815 | } |
2816 | } | 2816 | } |
2817 | 2817 | ||
2818 | skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); | 2818 | pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), |
2819 | skb_headlen(skb), PCI_DMA_TODEVICE); | ||
2819 | 2820 | ||
2820 | tx_buf->skb = NULL; | 2821 | tx_buf->skb = NULL; |
2821 | last = tx_buf->nr_frags; | 2822 | last = tx_buf->nr_frags; |
2822 | 2823 | ||
2823 | for (i = 0; i < last; i++) { | 2824 | for (i = 0; i < last; i++) { |
2824 | sw_cons = NEXT_TX_BD(sw_cons); | 2825 | sw_cons = NEXT_TX_BD(sw_cons); |
2826 | |||
2827 | pci_unmap_page(bp->pdev, | ||
2828 | pci_unmap_addr( | ||
2829 | &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], | ||
2830 | mapping), | ||
2831 | skb_shinfo(skb)->frags[i].size, | ||
2832 | PCI_DMA_TODEVICE); | ||
2825 | } | 2833 | } |
2826 | 2834 | ||
2827 | sw_cons = NEXT_TX_BD(sw_cons); | 2835 | sw_cons = NEXT_TX_BD(sw_cons); |
@@ -5295,17 +5303,29 @@ bnx2_free_tx_skbs(struct bnx2 *bp) | |||
5295 | for (j = 0; j < TX_DESC_CNT; ) { | 5303 | for (j = 0; j < TX_DESC_CNT; ) { |
5296 | struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; | 5304 | struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; |
5297 | struct sk_buff *skb = tx_buf->skb; | 5305 | struct sk_buff *skb = tx_buf->skb; |
5306 | int k, last; | ||
5298 | 5307 | ||
5299 | if (skb == NULL) { | 5308 | if (skb == NULL) { |
5300 | j++; | 5309 | j++; |
5301 | continue; | 5310 | continue; |
5302 | } | 5311 | } |
5303 | 5312 | ||
5304 | skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); | 5313 | pci_unmap_single(bp->pdev, |
5314 | pci_unmap_addr(tx_buf, mapping), | ||
5315 | skb_headlen(skb), | ||
5316 | PCI_DMA_TODEVICE); | ||
5305 | 5317 | ||
5306 | tx_buf->skb = NULL; | 5318 | tx_buf->skb = NULL; |
5307 | 5319 | ||
5308 | j += skb_shinfo(skb)->nr_frags + 1; | 5320 | last = tx_buf->nr_frags; |
5321 | j++; | ||
5322 | for (k = 0; k < last; k++, j++) { | ||
5323 | tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; | ||
5324 | pci_unmap_page(bp->pdev, | ||
5325 | pci_unmap_addr(tx_buf, mapping), | ||
5326 | skb_shinfo(skb)->frags[k].size, | ||
5327 | PCI_DMA_TODEVICE); | ||
5328 | } | ||
5309 | dev_kfree_skb(skb); | 5329 | dev_kfree_skb(skb); |
5310 | } | 5330 | } |
5311 | } | 5331 | } |
@@ -5684,11 +5704,12 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
5684 | for (i = 14; i < pkt_size; i++) | 5704 | for (i = 14; i < pkt_size; i++) |
5685 | packet[i] = (unsigned char) (i & 0xff); | 5705 | packet[i] = (unsigned char) (i & 0xff); |
5686 | 5706 | ||
5687 | if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) { | 5707 | map = pci_map_single(bp->pdev, skb->data, pkt_size, |
5708 | PCI_DMA_TODEVICE); | ||
5709 | if (pci_dma_mapping_error(bp->pdev, map)) { | ||
5688 | dev_kfree_skb(skb); | 5710 | dev_kfree_skb(skb); |
5689 | return -EIO; | 5711 | return -EIO; |
5690 | } | 5712 | } |
5691 | map = skb_shinfo(skb)->dma_head; | ||
5692 | 5713 | ||
5693 | REG_WR(bp, BNX2_HC_COMMAND, | 5714 | REG_WR(bp, BNX2_HC_COMMAND, |
5694 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); | 5715 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); |
@@ -5723,7 +5744,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
5723 | 5744 | ||
5724 | udelay(5); | 5745 | udelay(5); |
5725 | 5746 | ||
5726 | skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); | 5747 | pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); |
5727 | dev_kfree_skb(skb); | 5748 | dev_kfree_skb(skb); |
5728 | 5749 | ||
5729 | if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) | 5750 | if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) |
@@ -6302,7 +6323,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6302 | struct bnx2_napi *bnapi; | 6323 | struct bnx2_napi *bnapi; |
6303 | struct bnx2_tx_ring_info *txr; | 6324 | struct bnx2_tx_ring_info *txr; |
6304 | struct netdev_queue *txq; | 6325 | struct netdev_queue *txq; |
6305 | struct skb_shared_info *sp; | ||
6306 | 6326 | ||
6307 | /* Determine which tx ring we will be placed on */ | 6327 | /* Determine which tx ring we will be placed on */ |
6308 | i = skb_get_queue_mapping(skb); | 6328 | i = skb_get_queue_mapping(skb); |
@@ -6367,16 +6387,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6367 | } else | 6387 | } else |
6368 | mss = 0; | 6388 | mss = 0; |
6369 | 6389 | ||
6370 | if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) { | 6390 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
6391 | if (pci_dma_mapping_error(bp->pdev, mapping)) { | ||
6371 | dev_kfree_skb(skb); | 6392 | dev_kfree_skb(skb); |
6372 | return NETDEV_TX_OK; | 6393 | return NETDEV_TX_OK; |
6373 | } | 6394 | } |
6374 | 6395 | ||
6375 | sp = skb_shinfo(skb); | ||
6376 | mapping = sp->dma_head; | ||
6377 | |||
6378 | tx_buf = &txr->tx_buf_ring[ring_prod]; | 6396 | tx_buf = &txr->tx_buf_ring[ring_prod]; |
6379 | tx_buf->skb = skb; | 6397 | tx_buf->skb = skb; |
6398 | pci_unmap_addr_set(tx_buf, mapping, mapping); | ||
6380 | 6399 | ||
6381 | txbd = &txr->tx_desc_ring[ring_prod]; | 6400 | txbd = &txr->tx_desc_ring[ring_prod]; |
6382 | 6401 | ||
@@ -6397,7 +6416,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6397 | txbd = &txr->tx_desc_ring[ring_prod]; | 6416 | txbd = &txr->tx_desc_ring[ring_prod]; |
6398 | 6417 | ||
6399 | len = frag->size; | 6418 | len = frag->size; |
6400 | mapping = sp->dma_maps[i]; | 6419 | mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, |
6420 | len, PCI_DMA_TODEVICE); | ||
6421 | if (pci_dma_mapping_error(bp->pdev, mapping)) | ||
6422 | goto dma_error; | ||
6423 | pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, | ||
6424 | mapping); | ||
6401 | 6425 | ||
6402 | txbd->tx_bd_haddr_hi = (u64) mapping >> 32; | 6426 | txbd->tx_bd_haddr_hi = (u64) mapping >> 32; |
6403 | txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; | 6427 | txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; |
@@ -6424,6 +6448,30 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6424 | } | 6448 | } |
6425 | 6449 | ||
6426 | return NETDEV_TX_OK; | 6450 | return NETDEV_TX_OK; |
6451 | dma_error: | ||
6452 | /* save value of frag that failed */ | ||
6453 | last_frag = i; | ||
6454 | |||
6455 | /* start back at beginning and unmap skb */ | ||
6456 | prod = txr->tx_prod; | ||
6457 | ring_prod = TX_RING_IDX(prod); | ||
6458 | tx_buf = &txr->tx_buf_ring[ring_prod]; | ||
6459 | tx_buf->skb = NULL; | ||
6460 | pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), | ||
6461 | skb_headlen(skb), PCI_DMA_TODEVICE); | ||
6462 | |||
6463 | /* unmap remaining mapped pages */ | ||
6464 | for (i = 0; i < last_frag; i++) { | ||
6465 | prod = NEXT_TX_BD(prod); | ||
6466 | ring_prod = TX_RING_IDX(prod); | ||
6467 | tx_buf = &txr->tx_buf_ring[ring_prod]; | ||
6468 | pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping), | ||
6469 | skb_shinfo(skb)->frags[i].size, | ||
6470 | PCI_DMA_TODEVICE); | ||
6471 | } | ||
6472 | |||
6473 | dev_kfree_skb(skb); | ||
6474 | return NETDEV_TX_OK; | ||
6427 | } | 6475 | } |
6428 | 6476 | ||
6429 | /* Called with rtnl_lock */ | 6477 | /* Called with rtnl_lock */ |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index a4d83409f205..4908b9f74260 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6559,6 +6559,7 @@ struct sw_pg { | |||
6559 | 6559 | ||
6560 | struct sw_tx_bd { | 6560 | struct sw_tx_bd { |
6561 | struct sk_buff *skb; | 6561 | struct sk_buff *skb; |
6562 | DECLARE_PCI_UNMAP_ADDR(mapping) | ||
6562 | unsigned short is_gso; | 6563 | unsigned short is_gso; |
6563 | unsigned short nr_frags; | 6564 | unsigned short nr_frags; |
6564 | }; | 6565 | }; |