aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbevf
diff options
context:
space:
mode:
authorNick Nunley <nicholasx.d.nunley@intel.com>2010-04-27 09:10:50 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-27 19:29:52 -0400
commit2a1f8794161d9d5d46881160279df62767197526 (patch)
tree0e863ce339722ee11a214a48f76c31286ce642d7 /drivers/net/ixgbevf
parent1b507730b7a9dfc00142283d5f4fc24e6553f3f4 (diff)
ixgbevf: use DMA API instead of PCI DMA functions
Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbevf')
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c68
1 files changed, 35 insertions, 33 deletions
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index f484161418b6..08707402eb18 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
139{ 139{
140 if (tx_buffer_info->dma) { 140 if (tx_buffer_info->dma) {
141 if (tx_buffer_info->mapped_as_page) 141 if (tx_buffer_info->mapped_as_page)
142 pci_unmap_page(adapter->pdev, 142 dma_unmap_page(&adapter->pdev->dev,
143 tx_buffer_info->dma, 143 tx_buffer_info->dma,
144 tx_buffer_info->length, 144 tx_buffer_info->length,
145 PCI_DMA_TODEVICE); 145 DMA_TO_DEVICE);
146 else 146 else
147 pci_unmap_single(adapter->pdev, 147 dma_unmap_single(&adapter->pdev->dev,
148 tx_buffer_info->dma, 148 tx_buffer_info->dma,
149 tx_buffer_info->length, 149 tx_buffer_info->length,
150 PCI_DMA_TODEVICE); 150 DMA_TO_DEVICE);
151 tx_buffer_info->dma = 0; 151 tx_buffer_info->dma = 0;
152 } 152 }
153 if (tx_buffer_info->skb) { 153 if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
416 bi->page_offset ^= (PAGE_SIZE / 2); 416 bi->page_offset ^= (PAGE_SIZE / 2);
417 } 417 }
418 418
419 bi->page_dma = pci_map_page(pdev, bi->page, 419 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
420 bi->page_offset, 420 bi->page_offset,
421 (PAGE_SIZE / 2), 421 (PAGE_SIZE / 2),
422 PCI_DMA_FROMDEVICE); 422 DMA_FROM_DEVICE);
423 } 423 }
424 424
425 skb = bi->skb; 425 skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
442 bi->skb = skb; 442 bi->skb = skb;
443 } 443 }
444 if (!bi->dma) { 444 if (!bi->dma) {
445 bi->dma = pci_map_single(pdev, skb->data, 445 bi->dma = dma_map_single(&pdev->dev, skb->data,
446 rx_ring->rx_buf_len, 446 rx_ring->rx_buf_len,
447 PCI_DMA_FROMDEVICE); 447 DMA_FROM_DEVICE);
448 } 448 }
449 /* Refresh the desc even if buffer_addrs didn't change because 449 /* Refresh the desc even if buffer_addrs didn't change because
450 * each write-back erases this info. */ 450 * each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
536 rx_buffer_info->skb = NULL; 536 rx_buffer_info->skb = NULL;
537 537
538 if (rx_buffer_info->dma) { 538 if (rx_buffer_info->dma) {
539 pci_unmap_single(pdev, rx_buffer_info->dma, 539 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
540 rx_ring->rx_buf_len, 540 rx_ring->rx_buf_len,
541 PCI_DMA_FROMDEVICE); 541 DMA_FROM_DEVICE);
542 rx_buffer_info->dma = 0; 542 rx_buffer_info->dma = 0;
543 skb_put(skb, len); 543 skb_put(skb, len);
544 } 544 }
545 545
546 if (upper_len) { 546 if (upper_len) {
547 pci_unmap_page(pdev, rx_buffer_info->page_dma, 547 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
548 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 548 PAGE_SIZE / 2, DMA_FROM_DEVICE);
549 rx_buffer_info->page_dma = 0; 549 rx_buffer_info->page_dma = 0;
550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
551 rx_buffer_info->page, 551 rx_buffer_info->page,
@@ -1721,9 +1721,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1721 1721
1722 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1722 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1723 if (rx_buffer_info->dma) { 1723 if (rx_buffer_info->dma) {
1724 pci_unmap_single(pdev, rx_buffer_info->dma, 1724 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1725 rx_ring->rx_buf_len, 1725 rx_ring->rx_buf_len,
1726 PCI_DMA_FROMDEVICE); 1726 DMA_FROM_DEVICE);
1727 rx_buffer_info->dma = 0; 1727 rx_buffer_info->dma = 0;
1728 } 1728 }
1729 if (rx_buffer_info->skb) { 1729 if (rx_buffer_info->skb) {
@@ -1737,8 +1737,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1737 } 1737 }
1738 if (!rx_buffer_info->page) 1738 if (!rx_buffer_info->page)
1739 continue; 1739 continue;
1740 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, 1740 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1741 PCI_DMA_FROMDEVICE); 1741 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1742 rx_buffer_info->page_dma = 0; 1742 rx_buffer_info->page_dma = 0;
1743 put_page(rx_buffer_info->page); 1743 put_page(rx_buffer_info->page);
1744 rx_buffer_info->page = NULL; 1744 rx_buffer_info->page = NULL;
@@ -2445,7 +2445,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2445 vfree(tx_ring->tx_buffer_info); 2445 vfree(tx_ring->tx_buffer_info);
2446 tx_ring->tx_buffer_info = NULL; 2446 tx_ring->tx_buffer_info = NULL;
2447 2447
2448 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2448 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2449 tx_ring->dma);
2449 2450
2450 tx_ring->desc = NULL; 2451 tx_ring->desc = NULL;
2451} 2452}
@@ -2490,8 +2491,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2490 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2491 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2491 tx_ring->size = ALIGN(tx_ring->size, 4096); 2492 tx_ring->size = ALIGN(tx_ring->size, 4096);
2492 2493
2493 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 2494 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2494 &tx_ring->dma); 2495 &tx_ring->dma, GFP_KERNEL);
2495 if (!tx_ring->desc) 2496 if (!tx_ring->desc)
2496 goto err; 2497 goto err;
2497 2498
@@ -2561,8 +2562,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2561 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2562 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2562 rx_ring->size = ALIGN(rx_ring->size, 4096); 2563 rx_ring->size = ALIGN(rx_ring->size, 4096);
2563 2564
2564 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2565 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2565 &rx_ring->dma); 2566 &rx_ring->dma, GFP_KERNEL);
2566 2567
2567 if (!rx_ring->desc) { 2568 if (!rx_ring->desc) {
2568 hw_dbg(&adapter->hw, 2569 hw_dbg(&adapter->hw,
@@ -2623,7 +2624,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2623 vfree(rx_ring->rx_buffer_info); 2624 vfree(rx_ring->rx_buffer_info);
2624 rx_ring->rx_buffer_info = NULL; 2625 rx_ring->rx_buffer_info = NULL;
2625 2626
2626 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2627 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2628 rx_ring->dma);
2627 2629
2628 rx_ring->desc = NULL; 2630 rx_ring->desc = NULL;
2629} 2631}
@@ -2935,10 +2937,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2935 2937
2936 tx_buffer_info->length = size; 2938 tx_buffer_info->length = size;
2937 tx_buffer_info->mapped_as_page = false; 2939 tx_buffer_info->mapped_as_page = false;
2938 tx_buffer_info->dma = pci_map_single(adapter->pdev, 2940 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2939 skb->data + offset, 2941 skb->data + offset,
2940 size, PCI_DMA_TODEVICE); 2942 size, DMA_TO_DEVICE);
2941 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 2943 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2942 goto dma_error; 2944 goto dma_error;
2943 tx_buffer_info->time_stamp = jiffies; 2945 tx_buffer_info->time_stamp = jiffies;
2944 tx_buffer_info->next_to_watch = i; 2946 tx_buffer_info->next_to_watch = i;
@@ -2964,13 +2966,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2964 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2966 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2965 2967
2966 tx_buffer_info->length = size; 2968 tx_buffer_info->length = size;
2967 tx_buffer_info->dma = pci_map_page(adapter->pdev, 2969 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
2968 frag->page, 2970 frag->page,
2969 offset, 2971 offset,
2970 size, 2972 size,
2971 PCI_DMA_TODEVICE); 2973 DMA_TO_DEVICE);
2972 tx_buffer_info->mapped_as_page = true; 2974 tx_buffer_info->mapped_as_page = true;
2973 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 2975 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2974 goto dma_error; 2976 goto dma_error;
2975 tx_buffer_info->time_stamp = jiffies; 2977 tx_buffer_info->time_stamp = jiffies;
2976 tx_buffer_info->next_to_watch = i; 2978 tx_buffer_info->next_to_watch = i;
@@ -3311,14 +3313,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3311 if (err) 3313 if (err)
3312 return err; 3314 return err;
3313 3315
3314 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3316 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3315 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3317 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3316 pci_using_dac = 1; 3318 pci_using_dac = 1;
3317 } else { 3319 } else {
3318 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3320 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3319 if (err) { 3321 if (err) {
3320 err = pci_set_consistent_dma_mask(pdev, 3322 err = dma_set_coherent_mask(&pdev->dev,
3321 DMA_BIT_MASK(32)); 3323 DMA_BIT_MASK(32));
3322 if (err) { 3324 if (err) {
3323 dev_err(&pdev->dev, "No usable DMA " 3325 dev_err(&pdev->dev, "No usable DMA "
3324 "configuration, aborting\n"); 3326 "configuration, aborting\n");