aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2010-04-01 12:56:57 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-08 00:05:35 -0400
commit1a4ccc2d460f252853dfa2fb38b4ea881916713d (patch)
treea08c297a00cac40459a79ce23bab077bf48f5595 /drivers
parent5e01d2f91df62be4d6f282149bc2a8858992ceca (diff)
bnx2: use the dma state API instead of the pci equivalents
The DMA API is preferred. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2.c40
-rw-r--r--drivers/net/bnx2.h6
2 files changed, 23 insertions, 23 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 802b538502eb..53326fed6c81 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2670,7 +2670,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2670 } 2670 }
2671 2671
2672 rx_pg->page = page; 2672 rx_pg->page = page;
2673 pci_unmap_addr_set(rx_pg, mapping, mapping); 2673 dma_unmap_addr_set(rx_pg, mapping, mapping);
2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2676 return 0; 2676 return 0;
@@ -2685,7 +2685,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2685 if (!page) 2685 if (!page)
2686 return; 2686 return;
2687 2687
2688 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, 2688 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2689 PCI_DMA_FROMDEVICE); 2689 PCI_DMA_FROMDEVICE);
2690 2690
2691 __free_page(page); 2691 __free_page(page);
@@ -2717,7 +2717,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2717 } 2717 }
2718 2718
2719 rx_buf->skb = skb; 2719 rx_buf->skb = skb;
2720 pci_unmap_addr_set(rx_buf, mapping, mapping); 2720 dma_unmap_addr_set(rx_buf, mapping, mapping);
2721 2721
2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2816,7 +2816,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2816 } 2816 }
2817 } 2817 }
2818 2818
2819 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 2819 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2820 skb_headlen(skb), PCI_DMA_TODEVICE); 2820 skb_headlen(skb), PCI_DMA_TODEVICE);
2821 2821
2822 tx_buf->skb = NULL; 2822 tx_buf->skb = NULL;
@@ -2826,7 +2826,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2826 sw_cons = NEXT_TX_BD(sw_cons); 2826 sw_cons = NEXT_TX_BD(sw_cons);
2827 2827
2828 pci_unmap_page(bp->pdev, 2828 pci_unmap_page(bp->pdev,
2829 pci_unmap_addr( 2829 dma_unmap_addr(
2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], 2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2831 mapping), 2831 mapping),
2832 skb_shinfo(skb)->frags[i].size, 2832 skb_shinfo(skb)->frags[i].size,
@@ -2908,8 +2908,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2908 if (prod != cons) { 2908 if (prod != cons) {
2909 prod_rx_pg->page = cons_rx_pg->page; 2909 prod_rx_pg->page = cons_rx_pg->page;
2910 cons_rx_pg->page = NULL; 2910 cons_rx_pg->page = NULL;
2911 pci_unmap_addr_set(prod_rx_pg, mapping, 2911 dma_unmap_addr_set(prod_rx_pg, mapping,
2912 pci_unmap_addr(cons_rx_pg, mapping)); 2912 dma_unmap_addr(cons_rx_pg, mapping));
2913 2913
2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2933,7 +2933,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2933 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2933 prod_rx_buf = &rxr->rx_buf_ring[prod];
2934 2934
2935 pci_dma_sync_single_for_device(bp->pdev, 2935 pci_dma_sync_single_for_device(bp->pdev,
2936 pci_unmap_addr(cons_rx_buf, mapping), 2936 dma_unmap_addr(cons_rx_buf, mapping),
2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2938 2938
2939 rxr->rx_prod_bseq += bp->rx_buf_use_size; 2939 rxr->rx_prod_bseq += bp->rx_buf_use_size;
@@ -2943,8 +2943,8 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2943 if (cons == prod) 2943 if (cons == prod)
2944 return; 2944 return;
2945 2945
2946 pci_unmap_addr_set(prod_rx_buf, mapping, 2946 dma_unmap_addr_set(prod_rx_buf, mapping,
2947 pci_unmap_addr(cons_rx_buf, mapping)); 2947 dma_unmap_addr(cons_rx_buf, mapping));
2948 2948
2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3017,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3017 /* Don't unmap yet. If we're unable to allocate a new 3017 /* Don't unmap yet. If we're unable to allocate a new
3018 * page, we need to recycle the page and the DMA addr. 3018 * page, we need to recycle the page and the DMA addr.
3019 */ 3019 */
3020 mapping_old = pci_unmap_addr(rx_pg, mapping); 3020 mapping_old = dma_unmap_addr(rx_pg, mapping);
3021 if (i == pages - 1) 3021 if (i == pages - 1)
3022 frag_len -= 4; 3022 frag_len -= 4;
3023 3023
@@ -3098,7 +3098,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3098 3098
3099 rx_buf->skb = NULL; 3099 rx_buf->skb = NULL;
3100 3100
3101 dma_addr = pci_unmap_addr(rx_buf, mapping); 3101 dma_addr = dma_unmap_addr(rx_buf, mapping);
3102 3102
3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
@@ -5311,7 +5311,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5311 } 5311 }
5312 5312
5313 pci_unmap_single(bp->pdev, 5313 pci_unmap_single(bp->pdev,
5314 pci_unmap_addr(tx_buf, mapping), 5314 dma_unmap_addr(tx_buf, mapping),
5315 skb_headlen(skb), 5315 skb_headlen(skb),
5316 PCI_DMA_TODEVICE); 5316 PCI_DMA_TODEVICE);
5317 5317
@@ -5322,7 +5322,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5322 for (k = 0; k < last; k++, j++) { 5322 for (k = 0; k < last; k++, j++) {
5323 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5323 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5324 pci_unmap_page(bp->pdev, 5324 pci_unmap_page(bp->pdev,
5325 pci_unmap_addr(tx_buf, mapping), 5325 dma_unmap_addr(tx_buf, mapping),
5326 skb_shinfo(skb)->frags[k].size, 5326 skb_shinfo(skb)->frags[k].size,
5327 PCI_DMA_TODEVICE); 5327 PCI_DMA_TODEVICE);
5328 } 5328 }
@@ -5352,7 +5352,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5352 continue; 5352 continue;
5353 5353
5354 pci_unmap_single(bp->pdev, 5354 pci_unmap_single(bp->pdev,
5355 pci_unmap_addr(rx_buf, mapping), 5355 dma_unmap_addr(rx_buf, mapping),
5356 bp->rx_buf_use_size, 5356 bp->rx_buf_use_size,
5357 PCI_DMA_FROMDEVICE); 5357 PCI_DMA_FROMDEVICE);
5358 5358
@@ -5762,7 +5762,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5762 skb_reserve(rx_skb, BNX2_RX_OFFSET); 5762 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5763 5763
5764 pci_dma_sync_single_for_cpu(bp->pdev, 5764 pci_dma_sync_single_for_cpu(bp->pdev,
5765 pci_unmap_addr(rx_buf, mapping), 5765 dma_unmap_addr(rx_buf, mapping),
5766 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5766 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5767 5767
5768 if (rx_hdr->l2_fhdr_status & 5768 if (rx_hdr->l2_fhdr_status &
@@ -6422,7 +6422,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6422 6422
6423 tx_buf = &txr->tx_buf_ring[ring_prod]; 6423 tx_buf = &txr->tx_buf_ring[ring_prod];
6424 tx_buf->skb = skb; 6424 tx_buf->skb = skb;
6425 pci_unmap_addr_set(tx_buf, mapping, mapping); 6425 dma_unmap_addr_set(tx_buf, mapping, mapping);
6426 6426
6427 txbd = &txr->tx_desc_ring[ring_prod]; 6427 txbd = &txr->tx_desc_ring[ring_prod];
6428 6428
@@ -6447,7 +6447,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6447 len, PCI_DMA_TODEVICE); 6447 len, PCI_DMA_TODEVICE);
6448 if (pci_dma_mapping_error(bp->pdev, mapping)) 6448 if (pci_dma_mapping_error(bp->pdev, mapping))
6449 goto dma_error; 6449 goto dma_error;
6450 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, 6450 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6451 mapping); 6451 mapping);
6452 6452
6453 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6453 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6484,7 +6484,7 @@ dma_error:
6484 ring_prod = TX_RING_IDX(prod); 6484 ring_prod = TX_RING_IDX(prod);
6485 tx_buf = &txr->tx_buf_ring[ring_prod]; 6485 tx_buf = &txr->tx_buf_ring[ring_prod];
6486 tx_buf->skb = NULL; 6486 tx_buf->skb = NULL;
6487 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6487 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6488 skb_headlen(skb), PCI_DMA_TODEVICE); 6488 skb_headlen(skb), PCI_DMA_TODEVICE);
6489 6489
6490 /* unmap remaining mapped pages */ 6490 /* unmap remaining mapped pages */
@@ -6492,7 +6492,7 @@ dma_error:
6492 prod = NEXT_TX_BD(prod); 6492 prod = NEXT_TX_BD(prod);
6493 ring_prod = TX_RING_IDX(prod); 6493 ring_prod = TX_RING_IDX(prod);
6494 tx_buf = &txr->tx_buf_ring[ring_prod]; 6494 tx_buf = &txr->tx_buf_ring[ring_prod];
6495 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6495 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6496 skb_shinfo(skb)->frags[i].size, 6496 skb_shinfo(skb)->frags[i].size,
6497 PCI_DMA_TODEVICE); 6497 PCI_DMA_TODEVICE);
6498 } 6498 }
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index cd4b0e4637ab..ab34a5d86f86 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6551,17 +6551,17 @@ struct l2_fhdr {
6551 6551
6552struct sw_bd { 6552struct sw_bd {
6553 struct sk_buff *skb; 6553 struct sk_buff *skb;
6554 DECLARE_PCI_UNMAP_ADDR(mapping) 6554 DEFINE_DMA_UNMAP_ADDR(mapping);
6555}; 6555};
6556 6556
6557struct sw_pg { 6557struct sw_pg {
6558 struct page *page; 6558 struct page *page;
6559 DECLARE_PCI_UNMAP_ADDR(mapping) 6559 DEFINE_DMA_UNMAP_ADDR(mapping);
6560}; 6560};
6561 6561
6562struct sw_tx_bd { 6562struct sw_tx_bd {
6563 struct sk_buff *skb; 6563 struct sk_buff *skb;
6564 DECLARE_PCI_UNMAP_ADDR(mapping) 6564 DEFINE_DMA_UNMAP_ADDR(mapping);
6565 unsigned short is_gso; 6565 unsigned short is_gso;
6566 unsigned short nr_frags; 6566 unsigned short nr_frags;
6567}; 6567};