aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2010-04-12 10:32:09 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-13 05:54:16 -0400
commit4e5e4f0d65975ce092202cce48b42571bf84591e (patch)
tree2546d327dd63fa837a458fbe0601ebade386d265 /drivers
parent8595805aafc8b077e01804c9a3668e9aa3510e89 (diff)
tg3: use the DMA state API instead of the pci equivalents
This replace the PCI DMA state API (include/linux/pci-dma.h) with the DMA equivalents since the PCI DMA state API will be obsolete. No functional change. For further information about the background: http://marc.info/?l=linux-netdev&m=127037540020276&w=2 Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Matt Carlson <mcarlson@broadcom.com> Cc: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/tg3.c42
-rw-r--r--drivers/net/tg3.h2
2 files changed, 22 insertions, 22 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 61089fd90907..0fea6854c4aa 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4400,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4400 } 4400 }
4401 4401
4402 pci_unmap_single(tp->pdev, 4402 pci_unmap_single(tp->pdev,
4403 pci_unmap_addr(ri, mapping), 4403 dma_unmap_addr(ri, mapping),
4404 skb_headlen(skb), 4404 skb_headlen(skb),
4405 PCI_DMA_TODEVICE); 4405 PCI_DMA_TODEVICE);
4406 4406
@@ -4414,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4414 tx_bug = 1; 4414 tx_bug = 1;
4415 4415
4416 pci_unmap_page(tp->pdev, 4416 pci_unmap_page(tp->pdev,
4417 pci_unmap_addr(ri, mapping), 4417 dma_unmap_addr(ri, mapping),
4418 skb_shinfo(skb)->frags[i].size, 4418 skb_shinfo(skb)->frags[i].size,
4419 PCI_DMA_TODEVICE); 4419 PCI_DMA_TODEVICE);
4420 sw_idx = NEXT_TX(sw_idx); 4420 sw_idx = NEXT_TX(sw_idx);
@@ -4452,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4452 if (!ri->skb) 4452 if (!ri->skb)
4453 return; 4453 return;
4454 4454
4455 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), 4455 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4456 map_sz, PCI_DMA_FROMDEVICE); 4456 map_sz, PCI_DMA_FROMDEVICE);
4457 dev_kfree_skb_any(ri->skb); 4457 dev_kfree_skb_any(ri->skb);
4458 ri->skb = NULL; 4458 ri->skb = NULL;
@@ -4518,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4518 } 4518 }
4519 4519
4520 map->skb = skb; 4520 map->skb = skb;
4521 pci_unmap_addr_set(map, mapping, mapping); 4521 dma_unmap_addr_set(map, mapping, mapping);
4522 4522
4523 desc->addr_hi = ((u64)mapping >> 32); 4523 desc->addr_hi = ((u64)mapping >> 32);
4524 desc->addr_lo = ((u64)mapping & 0xffffffff); 4524 desc->addr_lo = ((u64)mapping & 0xffffffff);
@@ -4563,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4563 } 4563 }
4564 4564
4565 dest_map->skb = src_map->skb; 4565 dest_map->skb = src_map->skb;
4566 pci_unmap_addr_set(dest_map, mapping, 4566 dma_unmap_addr_set(dest_map, mapping,
4567 pci_unmap_addr(src_map, mapping)); 4567 dma_unmap_addr(src_map, mapping));
4568 dest_desc->addr_hi = src_desc->addr_hi; 4568 dest_desc->addr_hi = src_desc->addr_hi;
4569 dest_desc->addr_lo = src_desc->addr_lo; 4569 dest_desc->addr_lo = src_desc->addr_lo;
4570 4570
@@ -4634,13 +4634,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4634 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4634 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4635 if (opaque_key == RXD_OPAQUE_RING_STD) { 4635 if (opaque_key == RXD_OPAQUE_RING_STD) {
4636 ri = &tp->prodring[0].rx_std_buffers[desc_idx]; 4636 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4637 dma_addr = pci_unmap_addr(ri, mapping); 4637 dma_addr = dma_unmap_addr(ri, mapping);
4638 skb = ri->skb; 4638 skb = ri->skb;
4639 post_ptr = &std_prod_idx; 4639 post_ptr = &std_prod_idx;
4640 rx_std_posted++; 4640 rx_std_posted++;
4641 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4641 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4642 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; 4642 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4643 dma_addr = pci_unmap_addr(ri, mapping); 4643 dma_addr = dma_unmap_addr(ri, mapping);
4644 skb = ri->skb; 4644 skb = ri->skb;
4645 post_ptr = &jmb_prod_idx; 4645 post_ptr = &jmb_prod_idx;
4646 } else 4646 } else
@@ -5474,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5474 len = skb_shinfo(skb)->frags[i-1].size; 5474 len = skb_shinfo(skb)->frags[i-1].size;
5475 5475
5476 pci_unmap_single(tp->pdev, 5476 pci_unmap_single(tp->pdev,
5477 pci_unmap_addr(&tnapi->tx_buffers[entry], 5477 dma_unmap_addr(&tnapi->tx_buffers[entry],
5478 mapping), 5478 mapping),
5479 len, PCI_DMA_TODEVICE); 5479 len, PCI_DMA_TODEVICE);
5480 if (i == 0) { 5480 if (i == 0) {
5481 tnapi->tx_buffers[entry].skb = new_skb; 5481 tnapi->tx_buffers[entry].skb = new_skb;
5482 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5482 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5483 new_addr); 5483 new_addr);
5484 } else { 5484 } else {
5485 tnapi->tx_buffers[entry].skb = NULL; 5485 tnapi->tx_buffers[entry].skb = NULL;
@@ -5609,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5609 } 5609 }
5610 5610
5611 tnapi->tx_buffers[entry].skb = skb; 5611 tnapi->tx_buffers[entry].skb = skb;
5612 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5612 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5613 5613
5614 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5614 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5615 !mss && skb->len > ETH_DATA_LEN) 5615 !mss && skb->len > ETH_DATA_LEN)
@@ -5635,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5635 goto dma_error; 5635 goto dma_error;
5636 5636
5637 tnapi->tx_buffers[entry].skb = NULL; 5637 tnapi->tx_buffers[entry].skb = NULL;
5638 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5638 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5639 mapping); 5639 mapping);
5640 5640
5641 tg3_set_txd(tnapi, entry, mapping, len, 5641 tg3_set_txd(tnapi, entry, mapping, len,
@@ -5665,7 +5665,7 @@ dma_error:
5665 entry = tnapi->tx_prod; 5665 entry = tnapi->tx_prod;
5666 tnapi->tx_buffers[entry].skb = NULL; 5666 tnapi->tx_buffers[entry].skb = NULL;
5667 pci_unmap_single(tp->pdev, 5667 pci_unmap_single(tp->pdev,
5668 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), 5668 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5669 skb_headlen(skb), 5669 skb_headlen(skb),
5670 PCI_DMA_TODEVICE); 5670 PCI_DMA_TODEVICE);
5671 for (i = 0; i <= last; i++) { 5671 for (i = 0; i <= last; i++) {
@@ -5673,7 +5673,7 @@ dma_error:
5673 entry = NEXT_TX(entry); 5673 entry = NEXT_TX(entry);
5674 5674
5675 pci_unmap_page(tp->pdev, 5675 pci_unmap_page(tp->pdev,
5676 pci_unmap_addr(&tnapi->tx_buffers[entry], 5676 dma_unmap_addr(&tnapi->tx_buffers[entry],
5677 mapping), 5677 mapping),
5678 frag->size, PCI_DMA_TODEVICE); 5678 frag->size, PCI_DMA_TODEVICE);
5679 } 5679 }
@@ -5835,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5835 } 5835 }
5836 5836
5837 tnapi->tx_buffers[entry].skb = skb; 5837 tnapi->tx_buffers[entry].skb = skb;
5838 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5838 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5839 5839
5840 would_hit_hwbug = 0; 5840 would_hit_hwbug = 0;
5841 5841
@@ -5871,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5871 len, PCI_DMA_TODEVICE); 5871 len, PCI_DMA_TODEVICE);
5872 5872
5873 tnapi->tx_buffers[entry].skb = NULL; 5873 tnapi->tx_buffers[entry].skb = NULL;
5874 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5874 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5875 mapping); 5875 mapping);
5876 if (pci_dma_mapping_error(tp->pdev, mapping)) 5876 if (pci_dma_mapping_error(tp->pdev, mapping))
5877 goto dma_error; 5877 goto dma_error;
@@ -5936,7 +5936,7 @@ dma_error:
5936 entry = tnapi->tx_prod; 5936 entry = tnapi->tx_prod;
5937 tnapi->tx_buffers[entry].skb = NULL; 5937 tnapi->tx_buffers[entry].skb = NULL;
5938 pci_unmap_single(tp->pdev, 5938 pci_unmap_single(tp->pdev,
5939 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), 5939 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5940 skb_headlen(skb), 5940 skb_headlen(skb),
5941 PCI_DMA_TODEVICE); 5941 PCI_DMA_TODEVICE);
5942 for (i = 0; i <= last; i++) { 5942 for (i = 0; i <= last; i++) {
@@ -5944,7 +5944,7 @@ dma_error:
5944 entry = NEXT_TX(entry); 5944 entry = NEXT_TX(entry);
5945 5945
5946 pci_unmap_page(tp->pdev, 5946 pci_unmap_page(tp->pdev,
5947 pci_unmap_addr(&tnapi->tx_buffers[entry], 5947 dma_unmap_addr(&tnapi->tx_buffers[entry],
5948 mapping), 5948 mapping),
5949 frag->size, PCI_DMA_TODEVICE); 5949 frag->size, PCI_DMA_TODEVICE);
5950 } 5950 }
@@ -6229,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp)
6229 } 6229 }
6230 6230
6231 pci_unmap_single(tp->pdev, 6231 pci_unmap_single(tp->pdev,
6232 pci_unmap_addr(txp, mapping), 6232 dma_unmap_addr(txp, mapping),
6233 skb_headlen(skb), 6233 skb_headlen(skb),
6234 PCI_DMA_TODEVICE); 6234 PCI_DMA_TODEVICE);
6235 txp->skb = NULL; 6235 txp->skb = NULL;
@@ -6239,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp)
6239 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { 6239 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6240 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; 6240 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6241 pci_unmap_page(tp->pdev, 6241 pci_unmap_page(tp->pdev,
6242 pci_unmap_addr(txp, mapping), 6242 dma_unmap_addr(txp, mapping),
6243 skb_shinfo(skb)->frags[k].size, 6243 skb_shinfo(skb)->frags[k].size,
6244 PCI_DMA_TODEVICE); 6244 PCI_DMA_TODEVICE);
6245 i++; 6245 i++;
@@ -10742,7 +10742,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10742 10742
10743 rx_skb = tpr->rx_std_buffers[desc_idx].skb; 10743 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10744 10744
10745 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); 10745 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10746 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 10746 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10747 10747
10748 for (i = 14; i < tx_len; i++) { 10748 for (i = 14; i < tx_len; i++) {
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index b71083d9a118..ce9c4918c318 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2512,7 +2512,7 @@ struct tg3_hw_stats {
2512 */ 2512 */
2513struct ring_info { 2513struct ring_info {
2514 struct sk_buff *skb; 2514 struct sk_buff *skb;
2515 DECLARE_PCI_UNMAP_ADDR(mapping) 2515 DEFINE_DMA_UNMAP_ADDR(mapping);
2516}; 2516};
2517 2517
2518struct tg3_config_info { 2518struct tg3_config_info {