diff options
author | Benjamin Li <benli@broadcom.com> | 2008-10-09 15:26:41 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-10-09 15:26:41 -0400 |
commit | 3d16af8665504c89f9ef3aae56f54fb93e48da61 (patch) | |
tree | 465889b9ddae41530e7203d0eb97e7661fb2db93 /drivers/net | |
parent | a1efb4b686babf38e5e63add8b990f18e38becc4 (diff) |
bnx2: Handle DMA mapping errors.
Before, the driver would not care about the return codes from pci_map_*
functions. This could be potentially dangerous if a mapping failed.
Now, we will check all pci_map_* calls. On the transmit side, we switch
to use the new function skb_dma_map(). On the receive side, we add
pci_dma_mapping_error().
Signed-off-by: Benjamin Li <benli@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/bnx2.c | 114 | ||||
-rw-r--r-- | drivers/net/bnx2.h | 8 |
2 files changed, 67 insertions, 55 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index f147204e1e47..a95ca4fa249f 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -2476,6 +2476,11 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) | |||
2476 | return -ENOMEM; | 2476 | return -ENOMEM; |
2477 | mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, | 2477 | mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, |
2478 | PCI_DMA_FROMDEVICE); | 2478 | PCI_DMA_FROMDEVICE); |
2479 | if (pci_dma_mapping_error(bp->pdev, mapping)) { | ||
2480 | __free_page(page); | ||
2481 | return -EIO; | ||
2482 | } | ||
2483 | |||
2479 | rx_pg->page = page; | 2484 | rx_pg->page = page; |
2480 | pci_unmap_addr_set(rx_pg, mapping, mapping); | 2485 | pci_unmap_addr_set(rx_pg, mapping, mapping); |
2481 | rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; | 2486 | rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; |
@@ -2518,6 +2523,10 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) | |||
2518 | 2523 | ||
2519 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 2524 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
2520 | PCI_DMA_FROMDEVICE); | 2525 | PCI_DMA_FROMDEVICE); |
2526 | if (pci_dma_mapping_error(bp->pdev, mapping)) { | ||
2527 | dev_kfree_skb(skb); | ||
2528 | return -EIO; | ||
2529 | } | ||
2521 | 2530 | ||
2522 | rx_buf->skb = skb; | 2531 | rx_buf->skb = skb; |
2523 | pci_unmap_addr_set(rx_buf, mapping, mapping); | 2532 | pci_unmap_addr_set(rx_buf, mapping, mapping); |
@@ -2592,7 +2601,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2592 | sw_cons = txr->tx_cons; | 2601 | sw_cons = txr->tx_cons; |
2593 | 2602 | ||
2594 | while (sw_cons != hw_cons) { | 2603 | while (sw_cons != hw_cons) { |
2595 | struct sw_bd *tx_buf; | 2604 | struct sw_tx_bd *tx_buf; |
2596 | struct sk_buff *skb; | 2605 | struct sk_buff *skb; |
2597 | int i, last; | 2606 | int i, last; |
2598 | 2607 | ||
@@ -2617,21 +2626,13 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2617 | } | 2626 | } |
2618 | } | 2627 | } |
2619 | 2628 | ||
2620 | pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), | 2629 | skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); |
2621 | skb_headlen(skb), PCI_DMA_TODEVICE); | ||
2622 | 2630 | ||
2623 | tx_buf->skb = NULL; | 2631 | tx_buf->skb = NULL; |
2624 | last = skb_shinfo(skb)->nr_frags; | 2632 | last = skb_shinfo(skb)->nr_frags; |
2625 | 2633 | ||
2626 | for (i = 0; i < last; i++) { | 2634 | for (i = 0; i < last; i++) { |
2627 | sw_cons = NEXT_TX_BD(sw_cons); | 2635 | sw_cons = NEXT_TX_BD(sw_cons); |
2628 | |||
2629 | pci_unmap_page(bp->pdev, | ||
2630 | pci_unmap_addr( | ||
2631 | &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], | ||
2632 | mapping), | ||
2633 | skb_shinfo(skb)->frags[i].size, | ||
2634 | PCI_DMA_TODEVICE); | ||
2635 | } | 2636 | } |
2636 | 2637 | ||
2637 | sw_cons = NEXT_TX_BD(sw_cons); | 2638 | sw_cons = NEXT_TX_BD(sw_cons); |
@@ -2672,11 +2673,31 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, | |||
2672 | { | 2673 | { |
2673 | struct sw_pg *cons_rx_pg, *prod_rx_pg; | 2674 | struct sw_pg *cons_rx_pg, *prod_rx_pg; |
2674 | struct rx_bd *cons_bd, *prod_bd; | 2675 | struct rx_bd *cons_bd, *prod_bd; |
2675 | dma_addr_t mapping; | ||
2676 | int i; | 2676 | int i; |
2677 | u16 hw_prod = rxr->rx_pg_prod, prod; | 2677 | u16 hw_prod, prod; |
2678 | u16 cons = rxr->rx_pg_cons; | 2678 | u16 cons = rxr->rx_pg_cons; |
2679 | 2679 | ||
2680 | cons_rx_pg = &rxr->rx_pg_ring[cons]; | ||
2681 | |||
2682 | /* The caller was unable to allocate a new page to replace the | ||
2683 | * last one in the frags array, so we need to recycle that page | ||
2684 | * and then free the skb. | ||
2685 | */ | ||
2686 | if (skb) { | ||
2687 | struct page *page; | ||
2688 | struct skb_shared_info *shinfo; | ||
2689 | |||
2690 | shinfo = skb_shinfo(skb); | ||
2691 | shinfo->nr_frags--; | ||
2692 | page = shinfo->frags[shinfo->nr_frags].page; | ||
2693 | shinfo->frags[shinfo->nr_frags].page = NULL; | ||
2694 | |||
2695 | cons_rx_pg->page = page; | ||
2696 | dev_kfree_skb(skb); | ||
2697 | } | ||
2698 | |||
2699 | hw_prod = rxr->rx_pg_prod; | ||
2700 | |||
2680 | for (i = 0; i < count; i++) { | 2701 | for (i = 0; i < count; i++) { |
2681 | prod = RX_PG_RING_IDX(hw_prod); | 2702 | prod = RX_PG_RING_IDX(hw_prod); |
2682 | 2703 | ||
@@ -2685,20 +2706,6 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, | |||
2685 | cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; | 2706 | cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; |
2686 | prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; | 2707 | prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; |
2687 | 2708 | ||
2688 | if (i == 0 && skb) { | ||
2689 | struct page *page; | ||
2690 | struct skb_shared_info *shinfo; | ||
2691 | |||
2692 | shinfo = skb_shinfo(skb); | ||
2693 | shinfo->nr_frags--; | ||
2694 | page = shinfo->frags[shinfo->nr_frags].page; | ||
2695 | shinfo->frags[shinfo->nr_frags].page = NULL; | ||
2696 | mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, | ||
2697 | PCI_DMA_FROMDEVICE); | ||
2698 | cons_rx_pg->page = page; | ||
2699 | pci_unmap_addr_set(cons_rx_pg, mapping, mapping); | ||
2700 | dev_kfree_skb(skb); | ||
2701 | } | ||
2702 | if (prod != cons) { | 2709 | if (prod != cons) { |
2703 | prod_rx_pg->page = cons_rx_pg->page; | 2710 | prod_rx_pg->page = cons_rx_pg->page; |
2704 | cons_rx_pg->page = NULL; | 2711 | cons_rx_pg->page = NULL; |
@@ -2784,6 +2791,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, | |||
2784 | skb_put(skb, hdr_len); | 2791 | skb_put(skb, hdr_len); |
2785 | 2792 | ||
2786 | for (i = 0; i < pages; i++) { | 2793 | for (i = 0; i < pages; i++) { |
2794 | dma_addr_t mapping_old; | ||
2795 | |||
2787 | frag_len = min(frag_size, (unsigned int) PAGE_SIZE); | 2796 | frag_len = min(frag_size, (unsigned int) PAGE_SIZE); |
2788 | if (unlikely(frag_len <= 4)) { | 2797 | if (unlikely(frag_len <= 4)) { |
2789 | unsigned int tail = 4 - frag_len; | 2798 | unsigned int tail = 4 - frag_len; |
@@ -2806,9 +2815,10 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, | |||
2806 | } | 2815 | } |
2807 | rx_pg = &rxr->rx_pg_ring[pg_cons]; | 2816 | rx_pg = &rxr->rx_pg_ring[pg_cons]; |
2808 | 2817 | ||
2809 | pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), | 2818 | /* Don't unmap yet. If we're unable to allocate a new |
2810 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 2819 | * page, we need to recycle the page and the DMA addr. |
2811 | 2820 | */ | |
2821 | mapping_old = pci_unmap_addr(rx_pg, mapping); | ||
2812 | if (i == pages - 1) | 2822 | if (i == pages - 1) |
2813 | frag_len -= 4; | 2823 | frag_len -= 4; |
2814 | 2824 | ||
@@ -2825,6 +2835,9 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, | |||
2825 | return err; | 2835 | return err; |
2826 | } | 2836 | } |
2827 | 2837 | ||
2838 | pci_unmap_page(bp->pdev, mapping_old, | ||
2839 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
2840 | |||
2828 | frag_size -= frag_len; | 2841 | frag_size -= frag_len; |
2829 | skb->data_len += frag_len; | 2842 | skb->data_len += frag_len; |
2830 | skb->truesize += frag_len; | 2843 | skb->truesize += frag_len; |
@@ -4971,31 +4984,20 @@ bnx2_free_tx_skbs(struct bnx2 *bp) | |||
4971 | continue; | 4984 | continue; |
4972 | 4985 | ||
4973 | for (j = 0; j < TX_DESC_CNT; ) { | 4986 | for (j = 0; j < TX_DESC_CNT; ) { |
4974 | struct sw_bd *tx_buf = &txr->tx_buf_ring[j]; | 4987 | struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; |
4975 | struct sk_buff *skb = tx_buf->skb; | 4988 | struct sk_buff *skb = tx_buf->skb; |
4976 | int k, last; | ||
4977 | 4989 | ||
4978 | if (skb == NULL) { | 4990 | if (skb == NULL) { |
4979 | j++; | 4991 | j++; |
4980 | continue; | 4992 | continue; |
4981 | } | 4993 | } |
4982 | 4994 | ||
4983 | pci_unmap_single(bp->pdev, | 4995 | skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); |
4984 | pci_unmap_addr(tx_buf, mapping), | ||
4985 | skb_headlen(skb), PCI_DMA_TODEVICE); | ||
4986 | 4996 | ||
4987 | tx_buf->skb = NULL; | 4997 | tx_buf->skb = NULL; |
4988 | 4998 | ||
4989 | last = skb_shinfo(skb)->nr_frags; | 4999 | j += skb_shinfo(skb)->nr_frags + 1; |
4990 | for (k = 0; k < last; k++) { | ||
4991 | tx_buf = &txr->tx_buf_ring[j + k + 1]; | ||
4992 | pci_unmap_page(bp->pdev, | ||
4993 | pci_unmap_addr(tx_buf, mapping), | ||
4994 | skb_shinfo(skb)->frags[j].size, | ||
4995 | PCI_DMA_TODEVICE); | ||
4996 | } | ||
4997 | dev_kfree_skb(skb); | 5000 | dev_kfree_skb(skb); |
4998 | j += k + 1; | ||
4999 | } | 5001 | } |
5000 | } | 5002 | } |
5001 | } | 5003 | } |
@@ -5373,8 +5375,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
5373 | for (i = 14; i < pkt_size; i++) | 5375 | for (i = 14; i < pkt_size; i++) |
5374 | packet[i] = (unsigned char) (i & 0xff); | 5376 | packet[i] = (unsigned char) (i & 0xff); |
5375 | 5377 | ||
5376 | map = pci_map_single(bp->pdev, skb->data, pkt_size, | 5378 | if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) { |
5377 | PCI_DMA_TODEVICE); | 5379 | dev_kfree_skb(skb); |
5380 | return -EIO; | ||
5381 | } | ||
5382 | map = skb_shinfo(skb)->dma_maps[0]; | ||
5378 | 5383 | ||
5379 | REG_WR(bp, BNX2_HC_COMMAND, | 5384 | REG_WR(bp, BNX2_HC_COMMAND, |
5380 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); | 5385 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); |
@@ -5409,7 +5414,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
5409 | 5414 | ||
5410 | udelay(5); | 5415 | udelay(5); |
5411 | 5416 | ||
5412 | pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); | 5417 | skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); |
5413 | dev_kfree_skb(skb); | 5418 | dev_kfree_skb(skb); |
5414 | 5419 | ||
5415 | if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) | 5420 | if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) |
@@ -5970,13 +5975,14 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
5970 | struct bnx2 *bp = netdev_priv(dev); | 5975 | struct bnx2 *bp = netdev_priv(dev); |
5971 | dma_addr_t mapping; | 5976 | dma_addr_t mapping; |
5972 | struct tx_bd *txbd; | 5977 | struct tx_bd *txbd; |
5973 | struct sw_bd *tx_buf; | 5978 | struct sw_tx_bd *tx_buf; |
5974 | u32 len, vlan_tag_flags, last_frag, mss; | 5979 | u32 len, vlan_tag_flags, last_frag, mss; |
5975 | u16 prod, ring_prod; | 5980 | u16 prod, ring_prod; |
5976 | int i; | 5981 | int i; |
5977 | struct bnx2_napi *bnapi; | 5982 | struct bnx2_napi *bnapi; |
5978 | struct bnx2_tx_ring_info *txr; | 5983 | struct bnx2_tx_ring_info *txr; |
5979 | struct netdev_queue *txq; | 5984 | struct netdev_queue *txq; |
5985 | struct skb_shared_info *sp; | ||
5980 | 5986 | ||
5981 | /* Determine which tx ring we will be placed on */ | 5987 | /* Determine which tx ring we will be placed on */ |
5982 | i = skb_get_queue_mapping(skb); | 5988 | i = skb_get_queue_mapping(skb); |
@@ -6041,11 +6047,16 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6041 | } else | 6047 | } else |
6042 | mss = 0; | 6048 | mss = 0; |
6043 | 6049 | ||
6044 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 6050 | if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) { |
6051 | dev_kfree_skb(skb); | ||
6052 | return NETDEV_TX_OK; | ||
6053 | } | ||
6054 | |||
6055 | sp = skb_shinfo(skb); | ||
6056 | mapping = sp->dma_maps[0]; | ||
6045 | 6057 | ||
6046 | tx_buf = &txr->tx_buf_ring[ring_prod]; | 6058 | tx_buf = &txr->tx_buf_ring[ring_prod]; |
6047 | tx_buf->skb = skb; | 6059 | tx_buf->skb = skb; |
6048 | pci_unmap_addr_set(tx_buf, mapping, mapping); | ||
6049 | 6060 | ||
6050 | txbd = &txr->tx_desc_ring[ring_prod]; | 6061 | txbd = &txr->tx_desc_ring[ring_prod]; |
6051 | 6062 | ||
@@ -6064,10 +6075,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6064 | txbd = &txr->tx_desc_ring[ring_prod]; | 6075 | txbd = &txr->tx_desc_ring[ring_prod]; |
6065 | 6076 | ||
6066 | len = frag->size; | 6077 | len = frag->size; |
6067 | mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, | 6078 | mapping = sp->dma_maps[i + 1]; |
6068 | len, PCI_DMA_TODEVICE); | ||
6069 | pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], | ||
6070 | mapping, mapping); | ||
6071 | 6079 | ||
6072 | txbd->tx_bd_haddr_hi = (u64) mapping >> 32; | 6080 | txbd->tx_bd_haddr_hi = (u64) mapping >> 32; |
6073 | txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; | 6081 | txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index edc7774f2f21..617d95340160 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6526,10 +6526,14 @@ struct sw_pg { | |||
6526 | DECLARE_PCI_UNMAP_ADDR(mapping) | 6526 | DECLARE_PCI_UNMAP_ADDR(mapping) |
6527 | }; | 6527 | }; |
6528 | 6528 | ||
6529 | struct sw_tx_bd { | ||
6530 | struct sk_buff *skb; | ||
6531 | }; | ||
6532 | |||
6529 | #define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT) | 6533 | #define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT) |
6530 | #define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT) | 6534 | #define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT) |
6531 | #define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) | 6535 | #define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) |
6532 | #define SW_TXBD_RING_SIZE (sizeof(struct sw_bd) * TX_DESC_CNT) | 6536 | #define SW_TXBD_RING_SIZE (sizeof(struct sw_tx_bd) * TX_DESC_CNT) |
6533 | #define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) | 6537 | #define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) |
6534 | 6538 | ||
6535 | /* Buffered flash (Atmel: AT45DB011B) specific information */ | 6539 | /* Buffered flash (Atmel: AT45DB011B) specific information */ |
@@ -6609,7 +6613,7 @@ struct bnx2_tx_ring_info { | |||
6609 | u32 tx_bseq_addr; | 6613 | u32 tx_bseq_addr; |
6610 | 6614 | ||
6611 | struct tx_bd *tx_desc_ring; | 6615 | struct tx_bd *tx_desc_ring; |
6612 | struct sw_bd *tx_buf_ring; | 6616 | struct sw_tx_bd *tx_buf_ring; |
6613 | 6617 | ||
6614 | u16 tx_cons; | 6618 | u16 tx_cons; |
6615 | u16 hw_tx_cons; | 6619 | u16 hw_tx_cons; |