diff options
author | Nick Nunley <nicholasx.d.nunley@intel.com> | 2010-04-27 09:10:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-27 19:29:51 -0400 |
commit | 47631f854ff1938770f185afde4857018827eba3 (patch) | |
tree | 5dc80c34441e10752de68770d4adb228cbe5ae06 /drivers/net/ixgb | |
parent | 123e9f1afe7b86f7c719d1289434c5c040758334 (diff) |
ixgb: use DMA API instead of PCI DMA functions
Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgb')
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 70 |
1 files changed, 39 insertions, 31 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 3cf7951ac152..d58ca6b578cc 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -368,16 +368,22 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
368 | if (err) | 368 | if (err) |
369 | return err; | 369 | return err; |
370 | 370 | ||
371 | if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) && | 371 | pci_using_dac = 0; |
372 | !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) { | 372 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
373 | pci_using_dac = 1; | 373 | if (!err) { |
374 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
375 | if (!err) | ||
376 | pci_using_dac = 1; | ||
374 | } else { | 377 | } else { |
375 | if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) || | 378 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
376 | (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) { | 379 | if (err) { |
377 | pr_err("No usable DMA configuration, aborting\n"); | 380 | err = dma_set_coherent_mask(&pdev->dev, |
378 | goto err_dma_mask; | 381 | DMA_BIT_MASK(32)); |
382 | if (err) { | ||
383 | pr_err("No usable DMA configuration, aborting\n"); | ||
384 | goto err_dma_mask; | ||
385 | } | ||
379 | } | 386 | } |
380 | pci_using_dac = 0; | ||
381 | } | 387 | } |
382 | 388 | ||
383 | err = pci_request_regions(pdev, ixgb_driver_name); | 389 | err = pci_request_regions(pdev, ixgb_driver_name); |
@@ -673,7 +679,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
673 | txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); | 679 | txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); |
674 | txdr->size = ALIGN(txdr->size, 4096); | 680 | txdr->size = ALIGN(txdr->size, 4096); |
675 | 681 | ||
676 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 682 | txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, |
683 | GFP_KERNEL); | ||
677 | if (!txdr->desc) { | 684 | if (!txdr->desc) { |
678 | vfree(txdr->buffer_info); | 685 | vfree(txdr->buffer_info); |
679 | netif_err(adapter, probe, adapter->netdev, | 686 | netif_err(adapter, probe, adapter->netdev, |
@@ -762,7 +769,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
762 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | 769 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); |
763 | rxdr->size = ALIGN(rxdr->size, 4096); | 770 | rxdr->size = ALIGN(rxdr->size, 4096); |
764 | 771 | ||
765 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); | 772 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
773 | GFP_KERNEL); | ||
766 | 774 | ||
767 | if (!rxdr->desc) { | 775 | if (!rxdr->desc) { |
768 | vfree(rxdr->buffer_info); | 776 | vfree(rxdr->buffer_info); |
@@ -883,8 +891,8 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter) | |||
883 | vfree(adapter->tx_ring.buffer_info); | 891 | vfree(adapter->tx_ring.buffer_info); |
884 | adapter->tx_ring.buffer_info = NULL; | 892 | adapter->tx_ring.buffer_info = NULL; |
885 | 893 | ||
886 | pci_free_consistent(pdev, adapter->tx_ring.size, | 894 | dma_free_coherent(&pdev->dev, adapter->tx_ring.size, |
887 | adapter->tx_ring.desc, adapter->tx_ring.dma); | 895 | adapter->tx_ring.desc, adapter->tx_ring.dma); |
888 | 896 | ||
889 | adapter->tx_ring.desc = NULL; | 897 | adapter->tx_ring.desc = NULL; |
890 | } | 898 | } |
@@ -895,12 +903,11 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, | |||
895 | { | 903 | { |
896 | if (buffer_info->dma) { | 904 | if (buffer_info->dma) { |
897 | if (buffer_info->mapped_as_page) | 905 | if (buffer_info->mapped_as_page) |
898 | pci_unmap_page(adapter->pdev, buffer_info->dma, | 906 | dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, |
899 | buffer_info->length, PCI_DMA_TODEVICE); | 907 | buffer_info->length, DMA_TO_DEVICE); |
900 | else | 908 | else |
901 | pci_unmap_single(adapter->pdev, buffer_info->dma, | 909 | dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, |
902 | buffer_info->length, | 910 | buffer_info->length, DMA_TO_DEVICE); |
903 | PCI_DMA_TODEVICE); | ||
904 | buffer_info->dma = 0; | 911 | buffer_info->dma = 0; |
905 | } | 912 | } |
906 | 913 | ||
@@ -966,7 +973,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter) | |||
966 | vfree(rx_ring->buffer_info); | 973 | vfree(rx_ring->buffer_info); |
967 | rx_ring->buffer_info = NULL; | 974 | rx_ring->buffer_info = NULL; |
968 | 975 | ||
969 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); | 976 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
977 | rx_ring->dma); | ||
970 | 978 | ||
971 | rx_ring->desc = NULL; | 979 | rx_ring->desc = NULL; |
972 | } | 980 | } |
@@ -990,10 +998,10 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter) | |||
990 | for (i = 0; i < rx_ring->count; i++) { | 998 | for (i = 0; i < rx_ring->count; i++) { |
991 | buffer_info = &rx_ring->buffer_info[i]; | 999 | buffer_info = &rx_ring->buffer_info[i]; |
992 | if (buffer_info->dma) { | 1000 | if (buffer_info->dma) { |
993 | pci_unmap_single(pdev, | 1001 | dma_unmap_single(&pdev->dev, |
994 | buffer_info->dma, | 1002 | buffer_info->dma, |
995 | buffer_info->length, | 1003 | buffer_info->length, |
996 | PCI_DMA_FROMDEVICE); | 1004 | DMA_FROM_DEVICE); |
997 | buffer_info->dma = 0; | 1005 | buffer_info->dma = 0; |
998 | buffer_info->length = 0; | 1006 | buffer_info->length = 0; |
999 | } | 1007 | } |
@@ -1300,9 +1308,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1300 | WARN_ON(buffer_info->dma != 0); | 1308 | WARN_ON(buffer_info->dma != 0); |
1301 | buffer_info->time_stamp = jiffies; | 1309 | buffer_info->time_stamp = jiffies; |
1302 | buffer_info->mapped_as_page = false; | 1310 | buffer_info->mapped_as_page = false; |
1303 | buffer_info->dma = pci_map_single(pdev, skb->data + offset, | 1311 | buffer_info->dma = dma_map_single(&pdev->dev, |
1304 | size, PCI_DMA_TODEVICE); | 1312 | skb->data + offset, |
1305 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 1313 | size, DMA_TO_DEVICE); |
1314 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | ||
1306 | goto dma_error; | 1315 | goto dma_error; |
1307 | buffer_info->next_to_watch = 0; | 1316 | buffer_info->next_to_watch = 0; |
1308 | 1317 | ||
@@ -1341,10 +1350,9 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1341 | buffer_info->time_stamp = jiffies; | 1350 | buffer_info->time_stamp = jiffies; |
1342 | buffer_info->mapped_as_page = true; | 1351 | buffer_info->mapped_as_page = true; |
1343 | buffer_info->dma = | 1352 | buffer_info->dma = |
1344 | pci_map_page(pdev, frag->page, | 1353 | dma_map_page(&pdev->dev, frag->page, |
1345 | offset, size, | 1354 | offset, size, DMA_TO_DEVICE); |
1346 | PCI_DMA_TODEVICE); | 1355 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
1347 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
1348 | goto dma_error; | 1356 | goto dma_error; |
1349 | buffer_info->next_to_watch = 0; | 1357 | buffer_info->next_to_watch = 0; |
1350 | 1358 | ||
@@ -1962,10 +1970,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) | |||
1962 | cleaned = true; | 1970 | cleaned = true; |
1963 | cleaned_count++; | 1971 | cleaned_count++; |
1964 | 1972 | ||
1965 | pci_unmap_single(pdev, | 1973 | dma_unmap_single(&pdev->dev, |
1966 | buffer_info->dma, | 1974 | buffer_info->dma, |
1967 | buffer_info->length, | 1975 | buffer_info->length, |
1968 | PCI_DMA_FROMDEVICE); | 1976 | DMA_FROM_DEVICE); |
1969 | buffer_info->dma = 0; | 1977 | buffer_info->dma = 0; |
1970 | 1978 | ||
1971 | length = le16_to_cpu(rx_desc->length); | 1979 | length = le16_to_cpu(rx_desc->length); |
@@ -2088,10 +2096,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count) | |||
2088 | buffer_info->skb = skb; | 2096 | buffer_info->skb = skb; |
2089 | buffer_info->length = adapter->rx_buffer_len; | 2097 | buffer_info->length = adapter->rx_buffer_len; |
2090 | map_skb: | 2098 | map_skb: |
2091 | buffer_info->dma = pci_map_single(pdev, | 2099 | buffer_info->dma = dma_map_single(&pdev->dev, |
2092 | skb->data, | 2100 | skb->data, |
2093 | adapter->rx_buffer_len, | 2101 | adapter->rx_buffer_len, |
2094 | PCI_DMA_FROMDEVICE); | 2102 | DMA_FROM_DEVICE); |
2095 | 2103 | ||
2096 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | 2104 | rx_desc = IXGB_RX_DESC(*rx_ring, i); |
2097 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | 2105 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); |