diff options
author | Nick Nunley <nicholasx.d.nunley@intel.com> | 2010-04-27 09:09:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-27 19:29:50 -0400 |
commit | 0be3f55f8aa5f9d1882255128bd79d4885b0cbe4 (patch) | |
tree | a8e5239ba790ec5f2b1b42d638edd20ba4acbd5e /drivers | |
parent | b16f53bef9be0a756a0672e27d0a526686040e02 (diff) |
e1000e: use DMA API instead of PCI DMA functions
Signed-off-by: Nicholas Nunley <nicholasx.d.nunley@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 30 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 99 |
2 files changed, 66 insertions, 63 deletions
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 983493f2330c..7f9915cf2a59 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1069,10 +1069,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) | |||
1069 | if (tx_ring->desc && tx_ring->buffer_info) { | 1069 | if (tx_ring->desc && tx_ring->buffer_info) { |
1070 | for (i = 0; i < tx_ring->count; i++) { | 1070 | for (i = 0; i < tx_ring->count; i++) { |
1071 | if (tx_ring->buffer_info[i].dma) | 1071 | if (tx_ring->buffer_info[i].dma) |
1072 | pci_unmap_single(pdev, | 1072 | dma_unmap_single(&pdev->dev, |
1073 | tx_ring->buffer_info[i].dma, | 1073 | tx_ring->buffer_info[i].dma, |
1074 | tx_ring->buffer_info[i].length, | 1074 | tx_ring->buffer_info[i].length, |
1075 | PCI_DMA_TODEVICE); | 1075 | DMA_TO_DEVICE); |
1076 | if (tx_ring->buffer_info[i].skb) | 1076 | if (tx_ring->buffer_info[i].skb) |
1077 | dev_kfree_skb(tx_ring->buffer_info[i].skb); | 1077 | dev_kfree_skb(tx_ring->buffer_info[i].skb); |
1078 | } | 1078 | } |
@@ -1081,9 +1081,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) | |||
1081 | if (rx_ring->desc && rx_ring->buffer_info) { | 1081 | if (rx_ring->desc && rx_ring->buffer_info) { |
1082 | for (i = 0; i < rx_ring->count; i++) { | 1082 | for (i = 0; i < rx_ring->count; i++) { |
1083 | if (rx_ring->buffer_info[i].dma) | 1083 | if (rx_ring->buffer_info[i].dma) |
1084 | pci_unmap_single(pdev, | 1084 | dma_unmap_single(&pdev->dev, |
1085 | rx_ring->buffer_info[i].dma, | 1085 | rx_ring->buffer_info[i].dma, |
1086 | 2048, PCI_DMA_FROMDEVICE); | 1086 | 2048, DMA_FROM_DEVICE); |
1087 | if (rx_ring->buffer_info[i].skb) | 1087 | if (rx_ring->buffer_info[i].skb) |
1088 | dev_kfree_skb(rx_ring->buffer_info[i].skb); | 1088 | dev_kfree_skb(rx_ring->buffer_info[i].skb); |
1089 | } | 1089 | } |
@@ -1163,9 +1163,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1163 | tx_ring->buffer_info[i].skb = skb; | 1163 | tx_ring->buffer_info[i].skb = skb; |
1164 | tx_ring->buffer_info[i].length = skb->len; | 1164 | tx_ring->buffer_info[i].length = skb->len; |
1165 | tx_ring->buffer_info[i].dma = | 1165 | tx_ring->buffer_info[i].dma = |
1166 | pci_map_single(pdev, skb->data, skb->len, | 1166 | dma_map_single(&pdev->dev, skb->data, skb->len, |
1167 | PCI_DMA_TODEVICE); | 1167 | DMA_TO_DEVICE); |
1168 | if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { | 1168 | if (dma_mapping_error(&pdev->dev, |
1169 | tx_ring->buffer_info[i].dma)) { | ||
1169 | ret_val = 4; | 1170 | ret_val = 4; |
1170 | goto err_nomem; | 1171 | goto err_nomem; |
1171 | } | 1172 | } |
@@ -1226,9 +1227,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1226 | skb_reserve(skb, NET_IP_ALIGN); | 1227 | skb_reserve(skb, NET_IP_ALIGN); |
1227 | rx_ring->buffer_info[i].skb = skb; | 1228 | rx_ring->buffer_info[i].skb = skb; |
1228 | rx_ring->buffer_info[i].dma = | 1229 | rx_ring->buffer_info[i].dma = |
1229 | pci_map_single(pdev, skb->data, 2048, | 1230 | dma_map_single(&pdev->dev, skb->data, 2048, |
1230 | PCI_DMA_FROMDEVICE); | 1231 | DMA_FROM_DEVICE); |
1231 | if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { | 1232 | if (dma_mapping_error(&pdev->dev, |
1233 | rx_ring->buffer_info[i].dma)) { | ||
1232 | ret_val = 8; | 1234 | ret_val = 8; |
1233 | goto err_nomem; | 1235 | goto err_nomem; |
1234 | } | 1236 | } |
@@ -1556,10 +1558,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1556 | for (i = 0; i < 64; i++) { /* send the packets */ | 1558 | for (i = 0; i < 64; i++) { /* send the packets */ |
1557 | e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, | 1559 | e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, |
1558 | 1024); | 1560 | 1024); |
1559 | pci_dma_sync_single_for_device(pdev, | 1561 | dma_sync_single_for_device(&pdev->dev, |
1560 | tx_ring->buffer_info[k].dma, | 1562 | tx_ring->buffer_info[k].dma, |
1561 | tx_ring->buffer_info[k].length, | 1563 | tx_ring->buffer_info[k].length, |
1562 | PCI_DMA_TODEVICE); | 1564 | DMA_TO_DEVICE); |
1563 | k++; | 1565 | k++; |
1564 | if (k == tx_ring->count) | 1566 | if (k == tx_ring->count) |
1565 | k = 0; | 1567 | k = 0; |
@@ -1569,9 +1571,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1569 | time = jiffies; /* set the start time for the receive */ | 1571 | time = jiffies; /* set the start time for the receive */ |
1570 | good_cnt = 0; | 1572 | good_cnt = 0; |
1571 | do { /* receive the sent packets */ | 1573 | do { /* receive the sent packets */ |
1572 | pci_dma_sync_single_for_cpu(pdev, | 1574 | dma_sync_single_for_cpu(&pdev->dev, |
1573 | rx_ring->buffer_info[l].dma, 2048, | 1575 | rx_ring->buffer_info[l].dma, 2048, |
1574 | PCI_DMA_FROMDEVICE); | 1576 | DMA_FROM_DEVICE); |
1575 | 1577 | ||
1576 | ret_val = e1000_check_lbtest_frame( | 1578 | ret_val = e1000_check_lbtest_frame( |
1577 | rx_ring->buffer_info[l].skb, 1024); | 1579 | rx_ring->buffer_info[l].skb, 1024); |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 2476f8c24c54..3a712157b6a1 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -181,10 +181,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
181 | 181 | ||
182 | buffer_info->skb = skb; | 182 | buffer_info->skb = skb; |
183 | map_skb: | 183 | map_skb: |
184 | buffer_info->dma = pci_map_single(pdev, skb->data, | 184 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, |
185 | adapter->rx_buffer_len, | 185 | adapter->rx_buffer_len, |
186 | PCI_DMA_FROMDEVICE); | 186 | DMA_FROM_DEVICE); |
187 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { | 187 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
188 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 188 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
189 | adapter->rx_dma_failed++; | 189 | adapter->rx_dma_failed++; |
190 | break; | 190 | break; |
@@ -250,11 +250,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
250 | adapter->alloc_rx_buff_failed++; | 250 | adapter->alloc_rx_buff_failed++; |
251 | goto no_buffers; | 251 | goto no_buffers; |
252 | } | 252 | } |
253 | ps_page->dma = pci_map_page(pdev, | 253 | ps_page->dma = dma_map_page(&pdev->dev, |
254 | ps_page->page, | 254 | ps_page->page, |
255 | 0, PAGE_SIZE, | 255 | 0, PAGE_SIZE, |
256 | PCI_DMA_FROMDEVICE); | 256 | DMA_FROM_DEVICE); |
257 | if (pci_dma_mapping_error(pdev, ps_page->dma)) { | 257 | if (dma_mapping_error(&pdev->dev, |
258 | ps_page->dma)) { | ||
258 | dev_err(&adapter->pdev->dev, | 259 | dev_err(&adapter->pdev->dev, |
259 | "RX DMA page map failed\n"); | 260 | "RX DMA page map failed\n"); |
260 | adapter->rx_dma_failed++; | 261 | adapter->rx_dma_failed++; |
@@ -279,10 +280,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
279 | } | 280 | } |
280 | 281 | ||
281 | buffer_info->skb = skb; | 282 | buffer_info->skb = skb; |
282 | buffer_info->dma = pci_map_single(pdev, skb->data, | 283 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, |
283 | adapter->rx_ps_bsize0, | 284 | adapter->rx_ps_bsize0, |
284 | PCI_DMA_FROMDEVICE); | 285 | DMA_FROM_DEVICE); |
285 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { | 286 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
286 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 287 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
287 | adapter->rx_dma_failed++; | 288 | adapter->rx_dma_failed++; |
288 | /* cleanup skb */ | 289 | /* cleanup skb */ |
@@ -369,10 +370,10 @@ check_page: | |||
369 | } | 370 | } |
370 | 371 | ||
371 | if (!buffer_info->dma) | 372 | if (!buffer_info->dma) |
372 | buffer_info->dma = pci_map_page(pdev, | 373 | buffer_info->dma = dma_map_page(&pdev->dev, |
373 | buffer_info->page, 0, | 374 | buffer_info->page, 0, |
374 | PAGE_SIZE, | 375 | PAGE_SIZE, |
375 | PCI_DMA_FROMDEVICE); | 376 | DMA_FROM_DEVICE); |
376 | 377 | ||
377 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 378 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
378 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 379 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
@@ -446,10 +447,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
446 | 447 | ||
447 | cleaned = 1; | 448 | cleaned = 1; |
448 | cleaned_count++; | 449 | cleaned_count++; |
449 | pci_unmap_single(pdev, | 450 | dma_unmap_single(&pdev->dev, |
450 | buffer_info->dma, | 451 | buffer_info->dma, |
451 | adapter->rx_buffer_len, | 452 | adapter->rx_buffer_len, |
452 | PCI_DMA_FROMDEVICE); | 453 | DMA_FROM_DEVICE); |
453 | buffer_info->dma = 0; | 454 | buffer_info->dma = 0; |
454 | 455 | ||
455 | length = le16_to_cpu(rx_desc->length); | 456 | length = le16_to_cpu(rx_desc->length); |
@@ -550,12 +551,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter, | |||
550 | { | 551 | { |
551 | if (buffer_info->dma) { | 552 | if (buffer_info->dma) { |
552 | if (buffer_info->mapped_as_page) | 553 | if (buffer_info->mapped_as_page) |
553 | pci_unmap_page(adapter->pdev, buffer_info->dma, | 554 | dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, |
554 | buffer_info->length, PCI_DMA_TODEVICE); | 555 | buffer_info->length, DMA_TO_DEVICE); |
555 | else | 556 | else |
556 | pci_unmap_single(adapter->pdev, buffer_info->dma, | 557 | dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, |
557 | buffer_info->length, | 558 | buffer_info->length, DMA_TO_DEVICE); |
558 | PCI_DMA_TODEVICE); | ||
559 | buffer_info->dma = 0; | 559 | buffer_info->dma = 0; |
560 | } | 560 | } |
561 | if (buffer_info->skb) { | 561 | if (buffer_info->skb) { |
@@ -756,9 +756,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
756 | 756 | ||
757 | cleaned = 1; | 757 | cleaned = 1; |
758 | cleaned_count++; | 758 | cleaned_count++; |
759 | pci_unmap_single(pdev, buffer_info->dma, | 759 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
760 | adapter->rx_ps_bsize0, | 760 | adapter->rx_ps_bsize0, |
761 | PCI_DMA_FROMDEVICE); | 761 | DMA_FROM_DEVICE); |
762 | buffer_info->dma = 0; | 762 | buffer_info->dma = 0; |
763 | 763 | ||
764 | /* see !EOP comment in other rx routine */ | 764 | /* see !EOP comment in other rx routine */ |
@@ -814,13 +814,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
814 | * kmap_atomic, so we can't hold the mapping | 814 | * kmap_atomic, so we can't hold the mapping |
815 | * very long | 815 | * very long |
816 | */ | 816 | */ |
817 | pci_dma_sync_single_for_cpu(pdev, ps_page->dma, | 817 | dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, |
818 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 818 | PAGE_SIZE, DMA_FROM_DEVICE); |
819 | vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); | 819 | vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); |
820 | memcpy(skb_tail_pointer(skb), vaddr, l1); | 820 | memcpy(skb_tail_pointer(skb), vaddr, l1); |
821 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | 821 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); |
822 | pci_dma_sync_single_for_device(pdev, ps_page->dma, | 822 | dma_sync_single_for_device(&pdev->dev, ps_page->dma, |
823 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 823 | PAGE_SIZE, DMA_FROM_DEVICE); |
824 | 824 | ||
825 | /* remove the CRC */ | 825 | /* remove the CRC */ |
826 | if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) | 826 | if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) |
@@ -837,8 +837,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
837 | break; | 837 | break; |
838 | 838 | ||
839 | ps_page = &buffer_info->ps_pages[j]; | 839 | ps_page = &buffer_info->ps_pages[j]; |
840 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, | 840 | dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, |
841 | PCI_DMA_FROMDEVICE); | 841 | DMA_FROM_DEVICE); |
842 | ps_page->dma = 0; | 842 | ps_page->dma = 0; |
843 | skb_fill_page_desc(skb, j, ps_page->page, 0, length); | 843 | skb_fill_page_desc(skb, j, ps_page->page, 0, length); |
844 | ps_page->page = NULL; | 844 | ps_page->page = NULL; |
@@ -956,8 +956,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | |||
956 | 956 | ||
957 | cleaned = true; | 957 | cleaned = true; |
958 | cleaned_count++; | 958 | cleaned_count++; |
959 | pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, | 959 | dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, |
960 | PCI_DMA_FROMDEVICE); | 960 | DMA_FROM_DEVICE); |
961 | buffer_info->dma = 0; | 961 | buffer_info->dma = 0; |
962 | 962 | ||
963 | length = le16_to_cpu(rx_desc->length); | 963 | length = le16_to_cpu(rx_desc->length); |
@@ -1093,17 +1093,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1093 | buffer_info = &rx_ring->buffer_info[i]; | 1093 | buffer_info = &rx_ring->buffer_info[i]; |
1094 | if (buffer_info->dma) { | 1094 | if (buffer_info->dma) { |
1095 | if (adapter->clean_rx == e1000_clean_rx_irq) | 1095 | if (adapter->clean_rx == e1000_clean_rx_irq) |
1096 | pci_unmap_single(pdev, buffer_info->dma, | 1096 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
1097 | adapter->rx_buffer_len, | 1097 | adapter->rx_buffer_len, |
1098 | PCI_DMA_FROMDEVICE); | 1098 | DMA_FROM_DEVICE); |
1099 | else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) | 1099 | else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) |
1100 | pci_unmap_page(pdev, buffer_info->dma, | 1100 | dma_unmap_page(&pdev->dev, buffer_info->dma, |
1101 | PAGE_SIZE, | 1101 | PAGE_SIZE, |
1102 | PCI_DMA_FROMDEVICE); | 1102 | DMA_FROM_DEVICE); |
1103 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) | 1103 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) |
1104 | pci_unmap_single(pdev, buffer_info->dma, | 1104 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
1105 | adapter->rx_ps_bsize0, | 1105 | adapter->rx_ps_bsize0, |
1106 | PCI_DMA_FROMDEVICE); | 1106 | DMA_FROM_DEVICE); |
1107 | buffer_info->dma = 0; | 1107 | buffer_info->dma = 0; |
1108 | } | 1108 | } |
1109 | 1109 | ||
@@ -1121,8 +1121,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1121 | ps_page = &buffer_info->ps_pages[j]; | 1121 | ps_page = &buffer_info->ps_pages[j]; |
1122 | if (!ps_page->page) | 1122 | if (!ps_page->page) |
1123 | break; | 1123 | break; |
1124 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, | 1124 | dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, |
1125 | PCI_DMA_FROMDEVICE); | 1125 | DMA_FROM_DEVICE); |
1126 | ps_page->dma = 0; | 1126 | ps_page->dma = 0; |
1127 | put_page(ps_page->page); | 1127 | put_page(ps_page->page); |
1128 | ps_page->page = NULL; | 1128 | ps_page->page = NULL; |
@@ -3917,10 +3917,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3917 | buffer_info->length = size; | 3917 | buffer_info->length = size; |
3918 | buffer_info->time_stamp = jiffies; | 3918 | buffer_info->time_stamp = jiffies; |
3919 | buffer_info->next_to_watch = i; | 3919 | buffer_info->next_to_watch = i; |
3920 | buffer_info->dma = pci_map_single(pdev, skb->data + offset, | 3920 | buffer_info->dma = dma_map_single(&pdev->dev, |
3921 | size, PCI_DMA_TODEVICE); | 3921 | skb->data + offset, |
3922 | size, DMA_TO_DEVICE); | ||
3922 | buffer_info->mapped_as_page = false; | 3923 | buffer_info->mapped_as_page = false; |
3923 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 3924 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
3924 | goto dma_error; | 3925 | goto dma_error; |
3925 | 3926 | ||
3926 | len -= size; | 3927 | len -= size; |
@@ -3952,11 +3953,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3952 | buffer_info->length = size; | 3953 | buffer_info->length = size; |
3953 | buffer_info->time_stamp = jiffies; | 3954 | buffer_info->time_stamp = jiffies; |
3954 | buffer_info->next_to_watch = i; | 3955 | buffer_info->next_to_watch = i; |
3955 | buffer_info->dma = pci_map_page(pdev, frag->page, | 3956 | buffer_info->dma = dma_map_page(&pdev->dev, frag->page, |
3956 | offset, size, | 3957 | offset, size, |
3957 | PCI_DMA_TODEVICE); | 3958 | DMA_TO_DEVICE); |
3958 | buffer_info->mapped_as_page = true; | 3959 | buffer_info->mapped_as_page = true; |
3959 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 3960 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
3960 | goto dma_error; | 3961 | goto dma_error; |
3961 | 3962 | ||
3962 | len -= size; | 3963 | len -= size; |
@@ -5050,16 +5051,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5050 | return err; | 5051 | return err; |
5051 | 5052 | ||
5052 | pci_using_dac = 0; | 5053 | pci_using_dac = 0; |
5053 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | 5054 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
5054 | if (!err) { | 5055 | if (!err) { |
5055 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 5056 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
5056 | if (!err) | 5057 | if (!err) |
5057 | pci_using_dac = 1; | 5058 | pci_using_dac = 1; |
5058 | } else { | 5059 | } else { |
5059 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 5060 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
5060 | if (err) { | 5061 | if (err) { |
5061 | err = pci_set_consistent_dma_mask(pdev, | 5062 | err = dma_set_coherent_mask(&pdev->dev, |
5062 | DMA_BIT_MASK(32)); | 5063 | DMA_BIT_MASK(32)); |
5063 | if (err) { | 5064 | if (err) { |
5064 | dev_err(&pdev->dev, "No usable DMA " | 5065 | dev_err(&pdev->dev, "No usable DMA " |
5065 | "configuration, aborting\n"); | 5066 | "configuration, aborting\n"); |