diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/bnx2.c | 111 |
1 files changed, 58 insertions, 53 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index a7df539f29d3..ce3217b441a4 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -692,9 +692,9 @@ bnx2_free_tx_mem(struct bnx2 *bp) | |||
692 | struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; | 692 | struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; |
693 | 693 | ||
694 | if (txr->tx_desc_ring) { | 694 | if (txr->tx_desc_ring) { |
695 | pci_free_consistent(bp->pdev, TXBD_RING_SIZE, | 695 | dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE, |
696 | txr->tx_desc_ring, | 696 | txr->tx_desc_ring, |
697 | txr->tx_desc_mapping); | 697 | txr->tx_desc_mapping); |
698 | txr->tx_desc_ring = NULL; | 698 | txr->tx_desc_ring = NULL; |
699 | } | 699 | } |
700 | kfree(txr->tx_buf_ring); | 700 | kfree(txr->tx_buf_ring); |
@@ -714,9 +714,9 @@ bnx2_free_rx_mem(struct bnx2 *bp) | |||
714 | 714 | ||
715 | for (j = 0; j < bp->rx_max_ring; j++) { | 715 | for (j = 0; j < bp->rx_max_ring; j++) { |
716 | if (rxr->rx_desc_ring[j]) | 716 | if (rxr->rx_desc_ring[j]) |
717 | pci_free_consistent(bp->pdev, RXBD_RING_SIZE, | 717 | dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, |
718 | rxr->rx_desc_ring[j], | 718 | rxr->rx_desc_ring[j], |
719 | rxr->rx_desc_mapping[j]); | 719 | rxr->rx_desc_mapping[j]); |
720 | rxr->rx_desc_ring[j] = NULL; | 720 | rxr->rx_desc_ring[j] = NULL; |
721 | } | 721 | } |
722 | vfree(rxr->rx_buf_ring); | 722 | vfree(rxr->rx_buf_ring); |
@@ -724,9 +724,9 @@ bnx2_free_rx_mem(struct bnx2 *bp) | |||
724 | 724 | ||
725 | for (j = 0; j < bp->rx_max_pg_ring; j++) { | 725 | for (j = 0; j < bp->rx_max_pg_ring; j++) { |
726 | if (rxr->rx_pg_desc_ring[j]) | 726 | if (rxr->rx_pg_desc_ring[j]) |
727 | pci_free_consistent(bp->pdev, RXBD_RING_SIZE, | 727 | dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, |
728 | rxr->rx_pg_desc_ring[j], | 728 | rxr->rx_pg_desc_ring[j], |
729 | rxr->rx_pg_desc_mapping[j]); | 729 | rxr->rx_pg_desc_mapping[j]); |
730 | rxr->rx_pg_desc_ring[j] = NULL; | 730 | rxr->rx_pg_desc_ring[j] = NULL; |
731 | } | 731 | } |
732 | vfree(rxr->rx_pg_ring); | 732 | vfree(rxr->rx_pg_ring); |
@@ -748,8 +748,8 @@ bnx2_alloc_tx_mem(struct bnx2 *bp) | |||
748 | return -ENOMEM; | 748 | return -ENOMEM; |
749 | 749 | ||
750 | txr->tx_desc_ring = | 750 | txr->tx_desc_ring = |
751 | pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE, | 751 | dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, |
752 | &txr->tx_desc_mapping); | 752 | &txr->tx_desc_mapping, GFP_KERNEL); |
753 | if (txr->tx_desc_ring == NULL) | 753 | if (txr->tx_desc_ring == NULL) |
754 | return -ENOMEM; | 754 | return -ENOMEM; |
755 | } | 755 | } |
@@ -776,8 +776,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) | |||
776 | 776 | ||
777 | for (j = 0; j < bp->rx_max_ring; j++) { | 777 | for (j = 0; j < bp->rx_max_ring; j++) { |
778 | rxr->rx_desc_ring[j] = | 778 | rxr->rx_desc_ring[j] = |
779 | pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, | 779 | dma_alloc_coherent(&bp->pdev->dev, |
780 | &rxr->rx_desc_mapping[j]); | 780 | RXBD_RING_SIZE, |
781 | &rxr->rx_desc_mapping[j], | ||
782 | GFP_KERNEL); | ||
781 | if (rxr->rx_desc_ring[j] == NULL) | 783 | if (rxr->rx_desc_ring[j] == NULL) |
782 | return -ENOMEM; | 784 | return -ENOMEM; |
783 | 785 | ||
@@ -795,8 +797,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) | |||
795 | 797 | ||
796 | for (j = 0; j < bp->rx_max_pg_ring; j++) { | 798 | for (j = 0; j < bp->rx_max_pg_ring; j++) { |
797 | rxr->rx_pg_desc_ring[j] = | 799 | rxr->rx_pg_desc_ring[j] = |
798 | pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, | 800 | dma_alloc_coherent(&bp->pdev->dev, |
799 | &rxr->rx_pg_desc_mapping[j]); | 801 | RXBD_RING_SIZE, |
802 | &rxr->rx_pg_desc_mapping[j], | ||
803 | GFP_KERNEL); | ||
800 | if (rxr->rx_pg_desc_ring[j] == NULL) | 804 | if (rxr->rx_pg_desc_ring[j] == NULL) |
801 | return -ENOMEM; | 805 | return -ENOMEM; |
802 | 806 | ||
@@ -816,16 +820,16 @@ bnx2_free_mem(struct bnx2 *bp) | |||
816 | 820 | ||
817 | for (i = 0; i < bp->ctx_pages; i++) { | 821 | for (i = 0; i < bp->ctx_pages; i++) { |
818 | if (bp->ctx_blk[i]) { | 822 | if (bp->ctx_blk[i]) { |
819 | pci_free_consistent(bp->pdev, BCM_PAGE_SIZE, | 823 | dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE, |
820 | bp->ctx_blk[i], | 824 | bp->ctx_blk[i], |
821 | bp->ctx_blk_mapping[i]); | 825 | bp->ctx_blk_mapping[i]); |
822 | bp->ctx_blk[i] = NULL; | 826 | bp->ctx_blk[i] = NULL; |
823 | } | 827 | } |
824 | } | 828 | } |
825 | if (bnapi->status_blk.msi) { | 829 | if (bnapi->status_blk.msi) { |
826 | pci_free_consistent(bp->pdev, bp->status_stats_size, | 830 | dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, |
827 | bnapi->status_blk.msi, | 831 | bnapi->status_blk.msi, |
828 | bp->status_blk_mapping); | 832 | bp->status_blk_mapping); |
829 | bnapi->status_blk.msi = NULL; | 833 | bnapi->status_blk.msi = NULL; |
830 | bp->stats_blk = NULL; | 834 | bp->stats_blk = NULL; |
831 | } | 835 | } |
@@ -846,8 +850,8 @@ bnx2_alloc_mem(struct bnx2 *bp) | |||
846 | bp->status_stats_size = status_blk_size + | 850 | bp->status_stats_size = status_blk_size + |
847 | sizeof(struct statistics_block); | 851 | sizeof(struct statistics_block); |
848 | 852 | ||
849 | status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size, | 853 | status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, |
850 | &bp->status_blk_mapping); | 854 | &bp->status_blk_mapping, GFP_KERNEL); |
851 | if (status_blk == NULL) | 855 | if (status_blk == NULL) |
852 | goto alloc_mem_err; | 856 | goto alloc_mem_err; |
853 | 857 | ||
@@ -885,9 +889,10 @@ bnx2_alloc_mem(struct bnx2 *bp) | |||
885 | if (bp->ctx_pages == 0) | 889 | if (bp->ctx_pages == 0) |
886 | bp->ctx_pages = 1; | 890 | bp->ctx_pages = 1; |
887 | for (i = 0; i < bp->ctx_pages; i++) { | 891 | for (i = 0; i < bp->ctx_pages; i++) { |
888 | bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev, | 892 | bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, |
889 | BCM_PAGE_SIZE, | 893 | BCM_PAGE_SIZE, |
890 | &bp->ctx_blk_mapping[i]); | 894 | &bp->ctx_blk_mapping[i], |
895 | GFP_KERNEL); | ||
891 | if (bp->ctx_blk[i] == NULL) | 896 | if (bp->ctx_blk[i] == NULL) |
892 | goto alloc_mem_err; | 897 | goto alloc_mem_err; |
893 | } | 898 | } |
@@ -2674,9 +2679,9 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf | |||
2674 | 2679 | ||
2675 | if (!page) | 2680 | if (!page) |
2676 | return -ENOMEM; | 2681 | return -ENOMEM; |
2677 | mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, | 2682 | mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, |
2678 | PCI_DMA_FROMDEVICE); | 2683 | PCI_DMA_FROMDEVICE); |
2679 | if (pci_dma_mapping_error(bp->pdev, mapping)) { | 2684 | if (dma_mapping_error(&bp->pdev->dev, mapping)) { |
2680 | __free_page(page); | 2685 | __free_page(page); |
2681 | return -EIO; | 2686 | return -EIO; |
2682 | } | 2687 | } |
@@ -2697,8 +2702,8 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) | |||
2697 | if (!page) | 2702 | if (!page) |
2698 | return; | 2703 | return; |
2699 | 2704 | ||
2700 | pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE, | 2705 | dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), |
2701 | PCI_DMA_FROMDEVICE); | 2706 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
2702 | 2707 | ||
2703 | __free_page(page); | 2708 | __free_page(page); |
2704 | rx_pg->page = NULL; | 2709 | rx_pg->page = NULL; |
@@ -2721,9 +2726,9 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp | |||
2721 | if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) | 2726 | if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) |
2722 | skb_reserve(skb, BNX2_RX_ALIGN - align); | 2727 | skb_reserve(skb, BNX2_RX_ALIGN - align); |
2723 | 2728 | ||
2724 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 2729 | mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size, |
2725 | PCI_DMA_FROMDEVICE); | 2730 | PCI_DMA_FROMDEVICE); |
2726 | if (pci_dma_mapping_error(bp->pdev, mapping)) { | 2731 | if (dma_mapping_error(&bp->pdev->dev, mapping)) { |
2727 | dev_kfree_skb(skb); | 2732 | dev_kfree_skb(skb); |
2728 | return -EIO; | 2733 | return -EIO; |
2729 | } | 2734 | } |
@@ -2829,7 +2834,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2829 | } | 2834 | } |
2830 | } | 2835 | } |
2831 | 2836 | ||
2832 | pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping), | 2837 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), |
2833 | skb_headlen(skb), PCI_DMA_TODEVICE); | 2838 | skb_headlen(skb), PCI_DMA_TODEVICE); |
2834 | 2839 | ||
2835 | tx_buf->skb = NULL; | 2840 | tx_buf->skb = NULL; |
@@ -2838,7 +2843,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
2838 | for (i = 0; i < last; i++) { | 2843 | for (i = 0; i < last; i++) { |
2839 | sw_cons = NEXT_TX_BD(sw_cons); | 2844 | sw_cons = NEXT_TX_BD(sw_cons); |
2840 | 2845 | ||
2841 | pci_unmap_page(bp->pdev, | 2846 | dma_unmap_page(&bp->pdev->dev, |
2842 | dma_unmap_addr( | 2847 | dma_unmap_addr( |
2843 | &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], | 2848 | &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], |
2844 | mapping), | 2849 | mapping), |
@@ -2945,7 +2950,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, | |||
2945 | cons_rx_buf = &rxr->rx_buf_ring[cons]; | 2950 | cons_rx_buf = &rxr->rx_buf_ring[cons]; |
2946 | prod_rx_buf = &rxr->rx_buf_ring[prod]; | 2951 | prod_rx_buf = &rxr->rx_buf_ring[prod]; |
2947 | 2952 | ||
2948 | pci_dma_sync_single_for_device(bp->pdev, | 2953 | dma_sync_single_for_device(&bp->pdev->dev, |
2949 | dma_unmap_addr(cons_rx_buf, mapping), | 2954 | dma_unmap_addr(cons_rx_buf, mapping), |
2950 | BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 2955 | BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); |
2951 | 2956 | ||
@@ -2987,7 +2992,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, | |||
2987 | } | 2992 | } |
2988 | 2993 | ||
2989 | skb_reserve(skb, BNX2_RX_OFFSET); | 2994 | skb_reserve(skb, BNX2_RX_OFFSET); |
2990 | pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, | 2995 | dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, |
2991 | PCI_DMA_FROMDEVICE); | 2996 | PCI_DMA_FROMDEVICE); |
2992 | 2997 | ||
2993 | if (hdr_len == 0) { | 2998 | if (hdr_len == 0) { |
@@ -3049,7 +3054,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, | |||
3049 | return err; | 3054 | return err; |
3050 | } | 3055 | } |
3051 | 3056 | ||
3052 | pci_unmap_page(bp->pdev, mapping_old, | 3057 | dma_unmap_page(&bp->pdev->dev, mapping_old, |
3053 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 3058 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
3054 | 3059 | ||
3055 | frag_size -= frag_len; | 3060 | frag_size -= frag_len; |
@@ -3120,7 +3125,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) | |||
3120 | 3125 | ||
3121 | dma_addr = dma_unmap_addr(rx_buf, mapping); | 3126 | dma_addr = dma_unmap_addr(rx_buf, mapping); |
3122 | 3127 | ||
3123 | pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, | 3128 | dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, |
3124 | BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, | 3129 | BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, |
3125 | PCI_DMA_FROMDEVICE); | 3130 | PCI_DMA_FROMDEVICE); |
3126 | 3131 | ||
@@ -5338,7 +5343,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) | |||
5338 | continue; | 5343 | continue; |
5339 | } | 5344 | } |
5340 | 5345 | ||
5341 | pci_unmap_single(bp->pdev, | 5346 | dma_unmap_single(&bp->pdev->dev, |
5342 | dma_unmap_addr(tx_buf, mapping), | 5347 | dma_unmap_addr(tx_buf, mapping), |
5343 | skb_headlen(skb), | 5348 | skb_headlen(skb), |
5344 | PCI_DMA_TODEVICE); | 5349 | PCI_DMA_TODEVICE); |
@@ -5349,7 +5354,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) | |||
5349 | j++; | 5354 | j++; |
5350 | for (k = 0; k < last; k++, j++) { | 5355 | for (k = 0; k < last; k++, j++) { |
5351 | tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; | 5356 | tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; |
5352 | pci_unmap_page(bp->pdev, | 5357 | dma_unmap_page(&bp->pdev->dev, |
5353 | dma_unmap_addr(tx_buf, mapping), | 5358 | dma_unmap_addr(tx_buf, mapping), |
5354 | skb_shinfo(skb)->frags[k].size, | 5359 | skb_shinfo(skb)->frags[k].size, |
5355 | PCI_DMA_TODEVICE); | 5360 | PCI_DMA_TODEVICE); |
@@ -5379,7 +5384,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp) | |||
5379 | if (skb == NULL) | 5384 | if (skb == NULL) |
5380 | continue; | 5385 | continue; |
5381 | 5386 | ||
5382 | pci_unmap_single(bp->pdev, | 5387 | dma_unmap_single(&bp->pdev->dev, |
5383 | dma_unmap_addr(rx_buf, mapping), | 5388 | dma_unmap_addr(rx_buf, mapping), |
5384 | bp->rx_buf_use_size, | 5389 | bp->rx_buf_use_size, |
5385 | PCI_DMA_FROMDEVICE); | 5390 | PCI_DMA_FROMDEVICE); |
@@ -5732,9 +5737,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
5732 | for (i = 14; i < pkt_size; i++) | 5737 | for (i = 14; i < pkt_size; i++) |
5733 | packet[i] = (unsigned char) (i & 0xff); | 5738 | packet[i] = (unsigned char) (i & 0xff); |
5734 | 5739 | ||
5735 | map = pci_map_single(bp->pdev, skb->data, pkt_size, | 5740 | map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, |
5736 | PCI_DMA_TODEVICE); | 5741 | PCI_DMA_TODEVICE); |
5737 | if (pci_dma_mapping_error(bp->pdev, map)) { | 5742 | if (dma_mapping_error(&bp->pdev->dev, map)) { |
5738 | dev_kfree_skb(skb); | 5743 | dev_kfree_skb(skb); |
5739 | return -EIO; | 5744 | return -EIO; |
5740 | } | 5745 | } |
@@ -5772,7 +5777,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
5772 | 5777 | ||
5773 | udelay(5); | 5778 | udelay(5); |
5774 | 5779 | ||
5775 | pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); | 5780 | dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); |
5776 | dev_kfree_skb(skb); | 5781 | dev_kfree_skb(skb); |
5777 | 5782 | ||
5778 | if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) | 5783 | if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) |
@@ -5789,7 +5794,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
5789 | rx_hdr = rx_buf->desc; | 5794 | rx_hdr = rx_buf->desc; |
5790 | skb_reserve(rx_skb, BNX2_RX_OFFSET); | 5795 | skb_reserve(rx_skb, BNX2_RX_OFFSET); |
5791 | 5796 | ||
5792 | pci_dma_sync_single_for_cpu(bp->pdev, | 5797 | dma_sync_single_for_cpu(&bp->pdev->dev, |
5793 | dma_unmap_addr(rx_buf, mapping), | 5798 | dma_unmap_addr(rx_buf, mapping), |
5794 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); | 5799 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); |
5795 | 5800 | ||
@@ -6457,8 +6462,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6457 | } else | 6462 | } else |
6458 | mss = 0; | 6463 | mss = 0; |
6459 | 6464 | ||
6460 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 6465 | mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); |
6461 | if (pci_dma_mapping_error(bp->pdev, mapping)) { | 6466 | if (dma_mapping_error(&bp->pdev->dev, mapping)) { |
6462 | dev_kfree_skb(skb); | 6467 | dev_kfree_skb(skb); |
6463 | return NETDEV_TX_OK; | 6468 | return NETDEV_TX_OK; |
6464 | } | 6469 | } |
@@ -6486,9 +6491,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6486 | txbd = &txr->tx_desc_ring[ring_prod]; | 6491 | txbd = &txr->tx_desc_ring[ring_prod]; |
6487 | 6492 | ||
6488 | len = frag->size; | 6493 | len = frag->size; |
6489 | mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, | 6494 | mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset, |
6490 | len, PCI_DMA_TODEVICE); | 6495 | len, PCI_DMA_TODEVICE); |
6491 | if (pci_dma_mapping_error(bp->pdev, mapping)) | 6496 | if (dma_mapping_error(&bp->pdev->dev, mapping)) |
6492 | goto dma_error; | 6497 | goto dma_error; |
6493 | dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, | 6498 | dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, |
6494 | mapping); | 6499 | mapping); |
@@ -6527,7 +6532,7 @@ dma_error: | |||
6527 | ring_prod = TX_RING_IDX(prod); | 6532 | ring_prod = TX_RING_IDX(prod); |
6528 | tx_buf = &txr->tx_buf_ring[ring_prod]; | 6533 | tx_buf = &txr->tx_buf_ring[ring_prod]; |
6529 | tx_buf->skb = NULL; | 6534 | tx_buf->skb = NULL; |
6530 | pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping), | 6535 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), |
6531 | skb_headlen(skb), PCI_DMA_TODEVICE); | 6536 | skb_headlen(skb), PCI_DMA_TODEVICE); |
6532 | 6537 | ||
6533 | /* unmap remaining mapped pages */ | 6538 | /* unmap remaining mapped pages */ |
@@ -6535,7 +6540,7 @@ dma_error: | |||
6535 | prod = NEXT_TX_BD(prod); | 6540 | prod = NEXT_TX_BD(prod); |
6536 | ring_prod = TX_RING_IDX(prod); | 6541 | ring_prod = TX_RING_IDX(prod); |
6537 | tx_buf = &txr->tx_buf_ring[ring_prod]; | 6542 | tx_buf = &txr->tx_buf_ring[ring_prod]; |
6538 | pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping), | 6543 | dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), |
6539 | skb_shinfo(skb)->frags[i].size, | 6544 | skb_shinfo(skb)->frags[i].size, |
6540 | PCI_DMA_TODEVICE); | 6545 | PCI_DMA_TODEVICE); |
6541 | } | 6546 | } |