diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-07-25 22:44:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-26 15:00:03 -0400 |
commit | 8d8bb39b9eba32dd70e87fd5ad5c5dd4ba118e06 (patch) | |
tree | 64090a84f4c4466f9f30ff46c993e0cede379052 | |
parent | c485b465a031b6f9b9a51300e0ee1f86efc6db87 (diff) |
dma-mapping: add the device argument to dma_mapping_error()
Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER
architecture does:
This enables us to cleanly fix the Calgary IOMMU issue that some devices
are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423).
I think that per-device dma_mapping_ops support would be also helpful for
KVM people to support PCI passthrough but Andi thinks that this makes it
difficult to support the PCI passthrough (see the above thread). So I
CC'ed this to KVM camp. Comments are appreciated.
A pointer to dma_mapping_ops to struct dev_archdata is added. If the
pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's
NULL, the system-wide dma_ops pointer is used as before.
If it's useful for KVM people, I plan to implement a mechanism to register
a hook called when a new pci (or dma capable) device is created (it works
with hot plugging). It enables IOMMUs to set up an appropriate
dma_mapping_ops per device.
The major obstacle is that dma_mapping_error doesn't take a pointer to the
device unlike other DMA operations. So x86 can't have dma_mapping_ops per
device. Note all the POWER IOMMUs use the same dma_mapping_error function
so this is not a problem for POWER but x86 IOMMUs use different
dma_mapping_error functions.
The first patch adds the device argument to dma_mapping_error. The patch
is trivial but large since it touches lots of drivers and dma-mapping.h in
all the architecture.
This patch:
dma_mapping_error() doesn't take a pointer to the device unlike other DMA
operations. So we can't have dma_mapping_ops per device.
Note that POWER already has dma_mapping_ops per device but all the POWER
IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device
argument.
[akpm@linux-foundation.org: fix sge]
[akpm@linux-foundation.org: fix svc_rdma]
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix bnx2x]
[akpm@linux-foundation.org: fix s2io]
[akpm@linux-foundation.org: fix pasemi_mac]
[akpm@linux-foundation.org: fix sdhci]
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix sparc]
[akpm@linux-foundation.org: fix ibmvscsi]
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Muli Ben-Yehuda <muli@il.ibm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Avi Kivity <avi@qumranet.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
76 files changed, 256 insertions, 210 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 80d150458c80..d8b63d164e41 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -298,10 +298,10 @@ recommended that you never use these unless you really know what the | |||
298 | cache width is. | 298 | cache width is. |
299 | 299 | ||
300 | int | 300 | int |
301 | dma_mapping_error(dma_addr_t dma_addr) | 301 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
302 | 302 | ||
303 | int | 303 | int |
304 | pci_dma_mapping_error(dma_addr_t dma_addr) | 304 | pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr) |
305 | 305 | ||
306 | In some circumstances dma_map_single and dma_map_page will fail to create | 306 | In some circumstances dma_map_single and dma_map_page will fail to create |
307 | a mapping. A driver can check for these errors by testing the returned | 307 | a mapping. A driver can check for these errors by testing the returned |
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index dd2947342604..69130f365904 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -280,7 +280,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
280 | /* | 280 | /* |
281 | * Trying to unmap an invalid mapping | 281 | * Trying to unmap an invalid mapping |
282 | */ | 282 | */ |
283 | if (dma_mapping_error(dma_addr)) { | 283 | if (dma_mapping_error(dev, dma_addr)) { |
284 | dev_err(dev, "Trying to unmap invalid mapping\n"); | 284 | dev_err(dev, "Trying to unmap invalid mapping\n"); |
285 | return; | 285 | return; |
286 | } | 286 | } |
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 1c44ec2a1d58..88b6e6f3fd88 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -186,9 +186,10 @@ hwsw_dma_supported (struct device *dev, u64 mask) | |||
186 | } | 186 | } |
187 | 187 | ||
188 | int | 188 | int |
189 | hwsw_dma_mapping_error (dma_addr_t dma_addr) | 189 | hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
190 | { | 190 | { |
191 | return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr); | 191 | return hwiommu_dma_mapping_error(dev, dma_addr) || |
192 | swiotlb_dma_mapping_error(dev, dma_addr); | ||
192 | } | 193 | } |
193 | 194 | ||
194 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | 195 | EXPORT_SYMBOL(hwsw_dma_mapping_error); |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 34421aed1e2a..4956be40d7b5 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -2147,7 +2147,7 @@ sba_dma_supported (struct device *dev, u64 mask) | |||
2147 | } | 2147 | } |
2148 | 2148 | ||
2149 | int | 2149 | int |
2150 | sba_dma_mapping_error (dma_addr_t dma_addr) | 2150 | sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
2151 | { | 2151 | { |
2152 | return 0; | 2152 | return 0; |
2153 | } | 2153 | } |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 52175af299a0..53ebb6484495 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -350,7 +350,7 @@ void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
350 | } | 350 | } |
351 | EXPORT_SYMBOL(sn_dma_sync_sg_for_device); | 351 | EXPORT_SYMBOL(sn_dma_sync_sg_for_device); |
352 | 352 | ||
353 | int sn_dma_mapping_error(dma_addr_t dma_addr) | 353 | int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
354 | { | 354 | { |
355 | return 0; | 355 | return 0; |
356 | } | 356 | } |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index ae39dd88b9aa..891312f8e5a6 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -348,7 +348,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele | |||
348 | 348 | ||
349 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 349 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
350 | 350 | ||
351 | int dma_mapping_error(dma_addr_t dma_addr) | 351 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
352 | { | 352 | { |
353 | return 0; | 353 | return 0; |
354 | } | 354 | } |
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c index 0e04f8fb152a..3e7e0f1568ef 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c +++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c | |||
@@ -281,7 +281,7 @@ static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data) | |||
281 | 281 | ||
282 | dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va, | 282 | dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va, |
283 | PAGE_SIZE, DMA_FROM_DEVICE); | 283 | PAGE_SIZE, DMA_FROM_DEVICE); |
284 | if (dma_mapping_error(dummy_page_da)) { | 284 | if (dma_mapping_error(bus->phb->parent, dummy_page_da)) { |
285 | pr_err("PCIEX:Map dummy page failed.\n"); | 285 | pr_err("PCIEX:Map dummy page failed.\n"); |
286 | kfree(dummy_page_va); | 286 | kfree(dummy_page_va); |
287 | return -1; | 287 | return -1; |
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c index 418b605ac35a..5122ec145271 100644 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ b/arch/powerpc/platforms/cell/spider-pci.c | |||
@@ -111,7 +111,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, | |||
111 | 111 | ||
112 | dummy_page_da = dma_map_single(phb->parent, dummy_page_va, | 112 | dummy_page_da = dma_map_single(phb->parent, dummy_page_va, |
113 | PAGE_SIZE, DMA_FROM_DEVICE); | 113 | PAGE_SIZE, DMA_FROM_DEVICE); |
114 | if (dma_mapping_error(dummy_page_da)) { | 114 | if (dma_mapping_error(phb->parent, dummy_page_da)) { |
115 | pr_err("SPIDER-IOWA:Map dummy page filed.\n"); | 115 | pr_err("SPIDER-IOWA:Map dummy page filed.\n"); |
116 | kfree(dummy_page_va); | 116 | kfree(dummy_page_va); |
117 | return -1; | 117 | return -1; |
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index 1dc7295746da..731d7b157749 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c | |||
@@ -871,7 +871,7 @@ static int proc_mf_dump_cmdline(char *page, char **start, off_t off, | |||
871 | count = 256 - off; | 871 | count = 256 - off; |
872 | 872 | ||
873 | dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE); | 873 | dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE); |
874 | if (dma_mapping_error(dma_addr)) | 874 | if (dma_mapping_error(NULL, dma_addr)) |
875 | return -ENOMEM; | 875 | return -ENOMEM; |
876 | memset(page, 0, off + count); | 876 | memset(page, 0, off + count); |
877 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | 877 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 19e7fc7c2c4f..1eb86be93d7a 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -544,7 +544,7 @@ error: | |||
544 | return ret; | 544 | return ret; |
545 | } | 545 | } |
546 | 546 | ||
547 | static const struct dma_mapping_ops calgary_dma_ops = { | 547 | static struct dma_mapping_ops calgary_dma_ops = { |
548 | .alloc_coherent = calgary_alloc_coherent, | 548 | .alloc_coherent = calgary_alloc_coherent, |
549 | .map_single = calgary_map_single, | 549 | .map_single = calgary_map_single, |
550 | .unmap_single = calgary_unmap_single, | 550 | .unmap_single = calgary_unmap_single, |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index cbecb05551bb..37544123896d 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | static int forbid_dac __read_mostly; | 12 | static int forbid_dac __read_mostly; |
13 | 13 | ||
14 | const struct dma_mapping_ops *dma_ops; | 14 | struct dma_mapping_ops *dma_ops; |
15 | EXPORT_SYMBOL(dma_ops); | 15 | EXPORT_SYMBOL(dma_ops); |
16 | 16 | ||
17 | static int iommu_sac_force __read_mostly; | 17 | static int iommu_sac_force __read_mostly; |
@@ -312,6 +312,8 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr) | |||
312 | 312 | ||
313 | int dma_supported(struct device *dev, u64 mask) | 313 | int dma_supported(struct device *dev, u64 mask) |
314 | { | 314 | { |
315 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
316 | |||
315 | #ifdef CONFIG_PCI | 317 | #ifdef CONFIG_PCI |
316 | if (mask > 0xffffffff && forbid_dac > 0) { | 318 | if (mask > 0xffffffff && forbid_dac > 0) { |
317 | dev_info(dev, "PCI: Disallowing DAC for device\n"); | 319 | dev_info(dev, "PCI: Disallowing DAC for device\n"); |
@@ -319,8 +321,8 @@ int dma_supported(struct device *dev, u64 mask) | |||
319 | } | 321 | } |
320 | #endif | 322 | #endif |
321 | 323 | ||
322 | if (dma_ops->dma_supported) | 324 | if (ops->dma_supported) |
323 | return dma_ops->dma_supported(dev, mask); | 325 | return ops->dma_supported(dev, mask); |
324 | 326 | ||
325 | /* Copied from i386. Doesn't make much sense, because it will | 327 | /* Copied from i386. Doesn't make much sense, because it will |
326 | only work for pci_alloc_coherent. | 328 | only work for pci_alloc_coherent. |
@@ -367,6 +369,7 @@ void * | |||
367 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 369 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
368 | gfp_t gfp) | 370 | gfp_t gfp) |
369 | { | 371 | { |
372 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
370 | void *memory = NULL; | 373 | void *memory = NULL; |
371 | struct page *page; | 374 | struct page *page; |
372 | unsigned long dma_mask = 0; | 375 | unsigned long dma_mask = 0; |
@@ -435,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
435 | /* Let low level make its own zone decisions */ | 438 | /* Let low level make its own zone decisions */ |
436 | gfp &= ~(GFP_DMA32|GFP_DMA); | 439 | gfp &= ~(GFP_DMA32|GFP_DMA); |
437 | 440 | ||
438 | if (dma_ops->alloc_coherent) | 441 | if (ops->alloc_coherent) |
439 | return dma_ops->alloc_coherent(dev, size, | 442 | return ops->alloc_coherent(dev, size, |
440 | dma_handle, gfp); | 443 | dma_handle, gfp); |
441 | return NULL; | 444 | return NULL; |
442 | } | 445 | } |
@@ -448,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
448 | } | 451 | } |
449 | } | 452 | } |
450 | 453 | ||
451 | if (dma_ops->alloc_coherent) { | 454 | if (ops->alloc_coherent) { |
452 | free_pages((unsigned long)memory, get_order(size)); | 455 | free_pages((unsigned long)memory, get_order(size)); |
453 | gfp &= ~(GFP_DMA|GFP_DMA32); | 456 | gfp &= ~(GFP_DMA|GFP_DMA32); |
454 | return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | 457 | return ops->alloc_coherent(dev, size, dma_handle, gfp); |
455 | } | 458 | } |
456 | 459 | ||
457 | if (dma_ops->map_simple) { | 460 | if (ops->map_simple) { |
458 | *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), | 461 | *dma_handle = ops->map_simple(dev, virt_to_phys(memory), |
459 | size, | 462 | size, |
460 | PCI_DMA_BIDIRECTIONAL); | 463 | PCI_DMA_BIDIRECTIONAL); |
461 | if (*dma_handle != bad_dma_address) | 464 | if (*dma_handle != bad_dma_address) |
@@ -477,12 +480,14 @@ EXPORT_SYMBOL(dma_alloc_coherent); | |||
477 | void dma_free_coherent(struct device *dev, size_t size, | 480 | void dma_free_coherent(struct device *dev, size_t size, |
478 | void *vaddr, dma_addr_t bus) | 481 | void *vaddr, dma_addr_t bus) |
479 | { | 482 | { |
483 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
484 | |||
480 | int order = get_order(size); | 485 | int order = get_order(size); |
481 | WARN_ON(irqs_disabled()); /* for portability */ | 486 | WARN_ON(irqs_disabled()); /* for portability */ |
482 | if (dma_release_coherent(dev, order, vaddr)) | 487 | if (dma_release_coherent(dev, order, vaddr)) |
483 | return; | 488 | return; |
484 | if (dma_ops->unmap_single) | 489 | if (ops->unmap_single) |
485 | dma_ops->unmap_single(dev, bus, size, 0); | 490 | ops->unmap_single(dev, bus, size, 0); |
486 | free_pages((unsigned long)vaddr, order); | 491 | free_pages((unsigned long)vaddr, order); |
487 | } | 492 | } |
488 | EXPORT_SYMBOL(dma_free_coherent); | 493 | EXPORT_SYMBOL(dma_free_coherent); |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index df5f142657d2..744126e64950 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -692,8 +692,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
692 | 692 | ||
693 | extern int agp_amd64_init(void); | 693 | extern int agp_amd64_init(void); |
694 | 694 | ||
695 | static const struct dma_mapping_ops gart_dma_ops = { | 695 | static struct dma_mapping_ops gart_dma_ops = { |
696 | .mapping_error = NULL, | ||
697 | .map_single = gart_map_single, | 696 | .map_single = gart_map_single, |
698 | .map_simple = gart_map_simple, | 697 | .map_simple = gart_map_simple, |
699 | .unmap_single = gart_unmap_single, | 698 | .unmap_single = gart_unmap_single, |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 792b9179eff3..3f91f71cdc3e 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | |||
72 | return nents; | 72 | return nents; |
73 | } | 73 | } |
74 | 74 | ||
75 | /* Make sure we keep the same behaviour */ | 75 | struct dma_mapping_ops nommu_dma_ops = { |
76 | static int nommu_mapping_error(dma_addr_t dma_addr) | ||
77 | { | ||
78 | #ifdef CONFIG_X86_32 | ||
79 | return 0; | ||
80 | #else | ||
81 | return (dma_addr == bad_dma_address); | ||
82 | #endif | ||
83 | } | ||
84 | |||
85 | |||
86 | const struct dma_mapping_ops nommu_dma_ops = { | ||
87 | .map_single = nommu_map_single, | 76 | .map_single = nommu_map_single, |
88 | .map_sg = nommu_map_sg, | 77 | .map_sg = nommu_map_sg, |
89 | .mapping_error = nommu_mapping_error, | ||
90 | .is_phys = 1, | 78 | .is_phys = 1, |
91 | }; | 79 | }; |
92 | 80 | ||
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 20df839b9c20..c4ce0332759e 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c | |||
@@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, | |||
18 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); | 18 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); |
19 | } | 19 | } |
20 | 20 | ||
21 | const struct dma_mapping_ops swiotlb_dma_ops = { | 21 | struct dma_mapping_ops swiotlb_dma_ops = { |
22 | .mapping_error = swiotlb_dma_mapping_error, | 22 | .mapping_error = swiotlb_dma_mapping_error, |
23 | .alloc_coherent = swiotlb_alloc_coherent, | 23 | .alloc_coherent = swiotlb_alloc_coherent, |
24 | .free_coherent = swiotlb_free_coherent, | 24 | .free_coherent = swiotlb_free_coherent, |
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index bcbe794a3ea5..e14c03dc0065 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c | |||
@@ -50,7 +50,7 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | |||
50 | 50 | ||
51 | address = dma_map_page(card->device, buffer->pages[i], | 51 | address = dma_map_page(card->device, buffer->pages[i], |
52 | 0, PAGE_SIZE, direction); | 52 | 0, PAGE_SIZE, direction); |
53 | if (dma_mapping_error(address)) { | 53 | if (dma_mapping_error(card->device, address)) { |
54 | __free_page(buffer->pages[i]); | 54 | __free_page(buffer->pages[i]); |
55 | goto out_pages; | 55 | goto out_pages; |
56 | } | 56 | } |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 333b12544dd1..566672e0bcff 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -953,7 +953,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) | |||
953 | payload_bus = | 953 | payload_bus = |
954 | dma_map_single(ohci->card.device, packet->payload, | 954 | dma_map_single(ohci->card.device, packet->payload, |
955 | packet->payload_length, DMA_TO_DEVICE); | 955 | packet->payload_length, DMA_TO_DEVICE); |
956 | if (dma_mapping_error(payload_bus)) { | 956 | if (dma_mapping_error(ohci->card.device, payload_bus)) { |
957 | packet->ack = RCODE_SEND_ERROR; | 957 | packet->ack = RCODE_SEND_ERROR; |
958 | return -1; | 958 | return -1; |
959 | } | 959 | } |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 53fc5a641e6d..aaff50ebba1d 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -543,7 +543,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
543 | orb->response_bus = | 543 | orb->response_bus = |
544 | dma_map_single(device->card->device, &orb->response, | 544 | dma_map_single(device->card->device, &orb->response, |
545 | sizeof(orb->response), DMA_FROM_DEVICE); | 545 | sizeof(orb->response), DMA_FROM_DEVICE); |
546 | if (dma_mapping_error(orb->response_bus)) | 546 | if (dma_mapping_error(device->card->device, orb->response_bus)) |
547 | goto fail_mapping_response; | 547 | goto fail_mapping_response; |
548 | 548 | ||
549 | orb->request.response.high = 0; | 549 | orb->request.response.high = 0; |
@@ -577,7 +577,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
577 | orb->base.request_bus = | 577 | orb->base.request_bus = |
578 | dma_map_single(device->card->device, &orb->request, | 578 | dma_map_single(device->card->device, &orb->request, |
579 | sizeof(orb->request), DMA_TO_DEVICE); | 579 | sizeof(orb->request), DMA_TO_DEVICE); |
580 | if (dma_mapping_error(orb->base.request_bus)) | 580 | if (dma_mapping_error(device->card->device, orb->base.request_bus)) |
581 | goto fail_mapping_request; | 581 | goto fail_mapping_request; |
582 | 582 | ||
583 | sbp2_send_orb(&orb->base, lu, node_id, generation, | 583 | sbp2_send_orb(&orb->base, lu, node_id, generation, |
@@ -1424,7 +1424,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, | |||
1424 | orb->page_table_bus = | 1424 | orb->page_table_bus = |
1425 | dma_map_single(device->card->device, orb->page_table, | 1425 | dma_map_single(device->card->device, orb->page_table, |
1426 | sizeof(orb->page_table), DMA_TO_DEVICE); | 1426 | sizeof(orb->page_table), DMA_TO_DEVICE); |
1427 | if (dma_mapping_error(orb->page_table_bus)) | 1427 | if (dma_mapping_error(device->card->device, orb->page_table_bus)) |
1428 | goto fail_page_table; | 1428 | goto fail_page_table; |
1429 | 1429 | ||
1430 | /* | 1430 | /* |
@@ -1509,7 +1509,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1509 | orb->base.request_bus = | 1509 | orb->base.request_bus = |
1510 | dma_map_single(device->card->device, &orb->request, | 1510 | dma_map_single(device->card->device, &orb->request, |
1511 | sizeof(orb->request), DMA_TO_DEVICE); | 1511 | sizeof(orb->request), DMA_TO_DEVICE); |
1512 | if (dma_mapping_error(orb->base.request_bus)) | 1512 | if (dma_mapping_error(device->card->device, orb->base.request_bus)) |
1513 | goto out; | 1513 | goto out; |
1514 | 1514 | ||
1515 | sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation, | 1515 | sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation, |
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index eaba03273e4f..284c9bca517e 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c | |||
@@ -698,7 +698,7 @@ retry: | |||
698 | 698 | ||
699 | addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, | 699 | addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, |
700 | tx->map_len, DMA_TO_DEVICE); | 700 | tx->map_len, DMA_TO_DEVICE); |
701 | if (dma_mapping_error(addr)) { | 701 | if (dma_mapping_error(&dd->pcidev->dev, addr)) { |
702 | ret = -EIO; | 702 | ret = -EIO; |
703 | goto unlock; | 703 | goto unlock; |
704 | } | 704 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c index 86e016916cd1..82d9a0b5ca2f 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c | |||
@@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd, | |||
206 | 206 | ||
207 | dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, | 207 | dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, |
208 | DMA_TO_DEVICE); | 208 | DMA_TO_DEVICE); |
209 | if (dma_mapping_error(dma_addr)) { | 209 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
210 | ret = -ENOMEM; | 210 | ret = -ENOMEM; |
211 | goto free_unmap; | 211 | goto free_unmap; |
212 | } | 212 | } |
@@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, | |||
301 | pages[j], 0, flen, DMA_TO_DEVICE); | 301 | pages[j], 0, flen, DMA_TO_DEVICE); |
302 | unsigned long fofs = addr & ~PAGE_MASK; | 302 | unsigned long fofs = addr & ~PAGE_MASK; |
303 | 303 | ||
304 | if (dma_mapping_error(dma_addr)) { | 304 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
305 | ret = -ENOMEM; | 305 | ret = -ENOMEM; |
306 | goto done; | 306 | goto done; |
307 | } | 307 | } |
@@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd, | |||
508 | if (page) { | 508 | if (page) { |
509 | dma_addr = dma_map_page(&dd->pcidev->dev, | 509 | dma_addr = dma_map_page(&dd->pcidev->dev, |
510 | page, 0, len, DMA_TO_DEVICE); | 510 | page, 0, len, DMA_TO_DEVICE); |
511 | if (dma_mapping_error(dma_addr)) { | 511 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { |
512 | ret = -ENOMEM; | 512 | ret = -ENOMEM; |
513 | goto free_pbc; | 513 | goto free_pbc; |
514 | } | 514 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 4e36aa7cb3d2..cc6858f0b65b 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | |||
780 | return -ENOMEM; | 780 | return -ENOMEM; |
781 | dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, | 781 | dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, |
782 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 782 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
783 | if (pci_dma_mapping_error(dev->eq_table.icm_dma)) { | 783 | if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) { |
784 | __free_page(dev->eq_table.icm_page); | 784 | __free_page(dev->eq_table.icm_page); |
785 | return -ENOMEM; | 785 | return -ENOMEM; |
786 | } | 786 | } |
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c index 1360403b88b6..a9653c63f4db 100644 --- a/drivers/media/dvb/pluto2/pluto2.c +++ b/drivers/media/dvb/pluto2/pluto2.c | |||
@@ -242,7 +242,7 @@ static int __devinit pluto_dma_map(struct pluto *pluto) | |||
242 | pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf, | 242 | pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf, |
243 | TS_DMA_BYTES, PCI_DMA_FROMDEVICE); | 243 | TS_DMA_BYTES, PCI_DMA_FROMDEVICE); |
244 | 244 | ||
245 | return pci_dma_mapping_error(pluto->dma_addr); | 245 | return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr); |
246 | } | 246 | } |
247 | 247 | ||
248 | static void pluto_dma_unmap(struct pluto *pluto) | 248 | static void pluto_dma_unmap(struct pluto *pluto) |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c3a5db72ddd7..5f95e10229b5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -337,7 +337,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
337 | 337 | ||
338 | host->align_addr = dma_map_single(mmc_dev(host->mmc), | 338 | host->align_addr = dma_map_single(mmc_dev(host->mmc), |
339 | host->align_buffer, 128 * 4, direction); | 339 | host->align_buffer, 128 * 4, direction); |
340 | if (dma_mapping_error(host->align_addr)) | 340 | if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) |
341 | goto fail; | 341 | goto fail; |
342 | BUG_ON(host->align_addr & 0x3); | 342 | BUG_ON(host->align_addr & 0x3); |
343 | 343 | ||
@@ -439,7 +439,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
439 | 439 | ||
440 | host->adma_addr = dma_map_single(mmc_dev(host->mmc), | 440 | host->adma_addr = dma_map_single(mmc_dev(host->mmc), |
441 | host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); | 441 | host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); |
442 | if (dma_mapping_error(host->align_addr)) | 442 | if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) |
443 | goto unmap_entries; | 443 | goto unmap_entries; |
444 | BUG_ON(host->adma_addr & 0x3); | 444 | BUG_ON(host->adma_addr & 0x3); |
445 | 445 | ||
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 7a14980f3472..18d3eeb7eab2 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | |||
482 | goto err; | 482 | goto err; |
483 | 483 | ||
484 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); | 484 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); |
485 | if (dma_mapping_error(d)) { | 485 | if (dma_mapping_error(NULL, d)) { |
486 | free_page((unsigned long)page); | 486 | free_page((unsigned long)page); |
487 | goto err; | 487 | goto err; |
488 | } | 488 | } |
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | |||
505 | goto err; | 505 | goto err; |
506 | 506 | ||
507 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); | 507 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); |
508 | if (dma_mapping_error(d)) { | 508 | if (dma_mapping_error(NULL, d)) { |
509 | free_page((unsigned long)page); | 509 | free_page((unsigned long)page); |
510 | goto err; | 510 | goto err; |
511 | } | 511 | } |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 0263bef9cc6d..c7cc760a1777 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | |||
1020 | 1020 | ||
1021 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, | 1021 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, |
1022 | PCI_DMA_FROMDEVICE); | 1022 | PCI_DMA_FROMDEVICE); |
1023 | if (unlikely(dma_mapping_error(mapping))) { | 1023 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
1025 | return -ENOMEM; | 1025 | return -ENOMEM; |
1026 | } | 1026 | } |
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
1048 | 1048 | ||
1049 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 1049 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
1050 | PCI_DMA_FROMDEVICE); | 1050 | PCI_DMA_FROMDEVICE); |
1051 | if (unlikely(dma_mapping_error(mapping))) { | 1051 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1052 | dev_kfree_skb(skb); | 1052 | dev_kfree_skb(skb); |
1053 | return -ENOMEM; | 1053 | return -ENOMEM; |
1054 | } | 1054 | } |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a96331c875e6..1b0861d73ab7 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len, | |||
386 | dma_addr_t mapping; | 386 | dma_addr_t mapping; |
387 | 387 | ||
388 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); | 388 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); |
389 | if (unlikely(pci_dma_mapping_error(mapping))) | 389 | if (unlikely(pci_dma_mapping_error(pdev, mapping))) |
390 | return -ENOMEM; | 390 | return -ENOMEM; |
391 | 391 | ||
392 | pci_unmap_addr_set(sd, dma_addr, mapping); | 392 | pci_unmap_addr_set(sd, dma_addr, mapping); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 1037b1332312..19d32a227be1 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1790 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, | 1790 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, |
1791 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); | 1791 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); |
1792 | 1792 | ||
1793 | if (pci_dma_mapping_error(rx->dma_addr)) { | 1793 | if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { |
1794 | dev_kfree_skb_any(rx->skb); | 1794 | dev_kfree_skb_any(rx->skb); |
1795 | rx->skb = NULL; | 1795 | rx->skb = NULL; |
1796 | rx->dma_addr = 0; | 1796 | rx->dma_addr = 0; |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index a14561f40db0..9350564065e7 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1090 | tx_ring->buffer_info[i].dma = | 1090 | tx_ring->buffer_info[i].dma = |
1091 | pci_map_single(pdev, skb->data, skb->len, | 1091 | pci_map_single(pdev, skb->data, skb->len, |
1092 | PCI_DMA_TODEVICE); | 1092 | PCI_DMA_TODEVICE); |
1093 | if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { | 1093 | if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { |
1094 | ret_val = 4; | 1094 | ret_val = 4; |
1095 | goto err_nomem; | 1095 | goto err_nomem; |
1096 | } | 1096 | } |
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1153 | rx_ring->buffer_info[i].dma = | 1153 | rx_ring->buffer_info[i].dma = |
1154 | pci_map_single(pdev, skb->data, 2048, | 1154 | pci_map_single(pdev, skb->data, 2048, |
1155 | PCI_DMA_FROMDEVICE); | 1155 | PCI_DMA_FROMDEVICE); |
1156 | if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { | 1156 | if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { |
1157 | ret_val = 8; | 1157 | ret_val = 8; |
1158 | goto err_nomem; | 1158 | goto err_nomem; |
1159 | } | 1159 | } |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 9c0f56b3c518..d13677899767 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -195,7 +195,7 @@ map_skb: | |||
195 | buffer_info->dma = pci_map_single(pdev, skb->data, | 195 | buffer_info->dma = pci_map_single(pdev, skb->data, |
196 | adapter->rx_buffer_len, | 196 | adapter->rx_buffer_len, |
197 | PCI_DMA_FROMDEVICE); | 197 | PCI_DMA_FROMDEVICE); |
198 | if (pci_dma_mapping_error(buffer_info->dma)) { | 198 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
199 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 199 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
200 | adapter->rx_dma_failed++; | 200 | adapter->rx_dma_failed++; |
201 | break; | 201 | break; |
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
265 | ps_page->page, | 265 | ps_page->page, |
266 | 0, PAGE_SIZE, | 266 | 0, PAGE_SIZE, |
267 | PCI_DMA_FROMDEVICE); | 267 | PCI_DMA_FROMDEVICE); |
268 | if (pci_dma_mapping_error(ps_page->dma)) { | 268 | if (pci_dma_mapping_error(pdev, ps_page->dma)) { |
269 | dev_err(&adapter->pdev->dev, | 269 | dev_err(&adapter->pdev->dev, |
270 | "RX DMA page map failed\n"); | 270 | "RX DMA page map failed\n"); |
271 | adapter->rx_dma_failed++; | 271 | adapter->rx_dma_failed++; |
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
300 | buffer_info->dma = pci_map_single(pdev, skb->data, | 300 | buffer_info->dma = pci_map_single(pdev, skb->data, |
301 | adapter->rx_ps_bsize0, | 301 | adapter->rx_ps_bsize0, |
302 | PCI_DMA_FROMDEVICE); | 302 | PCI_DMA_FROMDEVICE); |
303 | if (pci_dma_mapping_error(buffer_info->dma)) { | 303 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
304 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 304 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
305 | adapter->rx_dma_failed++; | 305 | adapter->rx_dma_failed++; |
306 | /* cleanup skb */ | 306 | /* cleanup skb */ |
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3344 | skb->data + offset, | 3344 | skb->data + offset, |
3345 | size, | 3345 | size, |
3346 | PCI_DMA_TODEVICE); | 3346 | PCI_DMA_TODEVICE); |
3347 | if (pci_dma_mapping_error(buffer_info->dma)) { | 3347 | if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) { |
3348 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | 3348 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); |
3349 | adapter->tx_dma_failed++; | 3349 | adapter->tx_dma_failed++; |
3350 | return -1; | 3350 | return -1; |
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3382 | offset, | 3382 | offset, |
3383 | size, | 3383 | size, |
3384 | PCI_DMA_TODEVICE); | 3384 | PCI_DMA_TODEVICE); |
3385 | if (pci_dma_mapping_error(buffer_info->dma)) { | 3385 | if (pci_dma_mapping_error(adapter->pdev, |
3386 | buffer_info->dma)) { | ||
3386 | dev_err(&adapter->pdev->dev, | 3387 | dev_err(&adapter->pdev->dev, |
3387 | "TX DMA page map failed\n"); | 3388 | "TX DMA page map failed\n"); |
3388 | adapter->tx_dma_failed++; | 3389 | adapter->tx_dma_failed++; |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index e5a6e2e84540..91ec9fdc7184 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | 260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
261 | pool->buff_size, DMA_FROM_DEVICE); | 261 | pool->buff_size, DMA_FROM_DEVICE); |
262 | 262 | ||
263 | if (dma_mapping_error(dma_addr)) | 263 | if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) |
264 | goto failure; | 264 | goto failure; |
265 | 265 | ||
266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | 266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; |
@@ -294,7 +294,7 @@ failure: | |||
294 | pool->consumer_index = pool->size - 1; | 294 | pool->consumer_index = pool->size - 1; |
295 | else | 295 | else |
296 | pool->consumer_index--; | 296 | pool->consumer_index--; |
297 | if (!dma_mapping_error(dma_addr)) | 297 | if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) |
298 | dma_unmap_single(&adapter->vdev->dev, | 298 | dma_unmap_single(&adapter->vdev->dev, |
299 | pool->dma_addr[index], pool->buff_size, | 299 | pool->dma_addr[index], pool->buff_size, |
300 | DMA_FROM_DEVICE); | 300 | DMA_FROM_DEVICE); |
@@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
448 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | 448 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) |
449 | { | 449 | { |
450 | int i; | 450 | int i; |
451 | struct device *dev = &adapter->vdev->dev; | ||
451 | 452 | ||
452 | if(adapter->buffer_list_addr != NULL) { | 453 | if(adapter->buffer_list_addr != NULL) { |
453 | if(!dma_mapping_error(adapter->buffer_list_dma)) { | 454 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { |
454 | dma_unmap_single(&adapter->vdev->dev, | 455 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
455 | adapter->buffer_list_dma, 4096, | ||
456 | DMA_BIDIRECTIONAL); | 456 | DMA_BIDIRECTIONAL); |
457 | adapter->buffer_list_dma = DMA_ERROR_CODE; | 457 | adapter->buffer_list_dma = DMA_ERROR_CODE; |
458 | } | 458 | } |
@@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
461 | } | 461 | } |
462 | 462 | ||
463 | if(adapter->filter_list_addr != NULL) { | 463 | if(adapter->filter_list_addr != NULL) { |
464 | if(!dma_mapping_error(adapter->filter_list_dma)) { | 464 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { |
465 | dma_unmap_single(&adapter->vdev->dev, | 465 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, |
466 | adapter->filter_list_dma, 4096, | ||
467 | DMA_BIDIRECTIONAL); | 466 | DMA_BIDIRECTIONAL); |
468 | adapter->filter_list_dma = DMA_ERROR_CODE; | 467 | adapter->filter_list_dma = DMA_ERROR_CODE; |
469 | } | 468 | } |
@@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
472 | } | 471 | } |
473 | 472 | ||
474 | if(adapter->rx_queue.queue_addr != NULL) { | 473 | if(adapter->rx_queue.queue_addr != NULL) { |
475 | if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { | 474 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { |
476 | dma_unmap_single(&adapter->vdev->dev, | 475 | dma_unmap_single(dev, |
477 | adapter->rx_queue.queue_dma, | 476 | adapter->rx_queue.queue_dma, |
478 | adapter->rx_queue.queue_len, | 477 | adapter->rx_queue.queue_len, |
479 | DMA_BIDIRECTIONAL); | 478 | DMA_BIDIRECTIONAL); |
@@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
535 | int rc; | 534 | int rc; |
536 | union ibmveth_buf_desc rxq_desc; | 535 | union ibmveth_buf_desc rxq_desc; |
537 | int i; | 536 | int i; |
537 | struct device *dev; | ||
538 | 538 | ||
539 | ibmveth_debug_printk("open starting\n"); | 539 | ibmveth_debug_printk("open starting\n"); |
540 | 540 | ||
@@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev) | |||
563 | return -ENOMEM; | 563 | return -ENOMEM; |
564 | } | 564 | } |
565 | 565 | ||
566 | adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, | 566 | dev = &adapter->vdev->dev; |
567 | |||
568 | adapter->buffer_list_dma = dma_map_single(dev, | ||
567 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); | 569 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); |
568 | adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, | 570 | adapter->filter_list_dma = dma_map_single(dev, |
569 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); | 571 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); |
570 | adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, | 572 | adapter->rx_queue.queue_dma = dma_map_single(dev, |
571 | adapter->rx_queue.queue_addr, | 573 | adapter->rx_queue.queue_addr, |
572 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); | 574 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); |
573 | 575 | ||
574 | if((dma_mapping_error(adapter->buffer_list_dma) ) || | 576 | if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || |
575 | (dma_mapping_error(adapter->filter_list_dma)) || | 577 | (dma_mapping_error(dev, adapter->filter_list_dma)) || |
576 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { | 578 | (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { |
577 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | 579 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); |
578 | ibmveth_cleanup(adapter); | 580 | ibmveth_cleanup(adapter); |
579 | napi_disable(&adapter->napi); | 581 | napi_disable(&adapter->napi); |
@@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
645 | adapter->bounce_buffer_dma = | 647 | adapter->bounce_buffer_dma = |
646 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, | 648 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, |
647 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); | 649 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); |
648 | if (dma_mapping_error(adapter->bounce_buffer_dma)) { | 650 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { |
649 | ibmveth_error_printk("unable to map bounce buffer\n"); | 651 | ibmveth_error_printk("unable to map bounce buffer\n"); |
650 | ibmveth_cleanup(adapter); | 652 | ibmveth_cleanup(adapter); |
651 | napi_disable(&adapter->napi); | 653 | napi_disable(&adapter->napi); |
@@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
922 | buf[1] = 0; | 924 | buf[1] = 0; |
923 | } | 925 | } |
924 | 926 | ||
925 | if (dma_mapping_error(data_dma_addr)) { | 927 | if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { |
926 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 928 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
927 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); |
928 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, | 930 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, |
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index b8d0639c1cdf..c46864d626b2 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
1128 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, | 1128 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, |
1129 | skb->len, DMA_TO_DEVICE); | 1129 | skb->len, DMA_TO_DEVICE); |
1130 | 1130 | ||
1131 | if (dma_mapping_error(msg->data.addr[0])) | 1131 | if (dma_mapping_error(port->dev, msg->data.addr[0])) |
1132 | goto recycle_and_drop; | 1132 | goto recycle_and_drop; |
1133 | 1133 | ||
1134 | msg->dev = port->dev; | 1134 | msg->dev = port->dev; |
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx, | |||
1226 | dma_address = msg->data.addr[0]; | 1226 | dma_address = msg->data.addr[0]; |
1227 | dma_length = msg->data.len[0]; | 1227 | dma_length = msg->data.len[0]; |
1228 | 1228 | ||
1229 | if (!dma_mapping_error(dma_address)) | 1229 | if (!dma_mapping_error(msg->dev, dma_address)) |
1230 | dma_unmap_single(msg->dev, dma_address, dma_length, | 1230 | dma_unmap_single(msg->dev, dma_address, dma_length, |
1231 | DMA_TO_DEVICE); | 1231 | DMA_TO_DEVICE); |
1232 | 1232 | ||
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index ea3a09aaa844..7df928d3a3d8 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) | |||
526 | return -ENOMEM; | 526 | return -ENOMEM; |
527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | 527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, |
528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
529 | if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { | 529 | if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { |
530 | __free_page(priv->eq_table.icm_page); | 530 | __free_page(priv->eq_table.icm_page); |
531 | return -ENOMEM; | 531 | return -ENOMEM; |
532 | } | 532 | } |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 993d87c9296f..edc0fd588985 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, | |||
650 | mac->bufsz - LOCAL_SKB_ALIGN, | 650 | mac->bufsz - LOCAL_SKB_ALIGN, |
651 | PCI_DMA_FROMDEVICE); | 651 | PCI_DMA_FROMDEVICE); |
652 | 652 | ||
653 | if (unlikely(dma_mapping_error(dma))) { | 653 | if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { |
654 | dev_kfree_skb_irq(info->skb); | 654 | dev_kfree_skb_irq(info->skb); |
655 | break; | 655 | break; |
656 | } | 656 | } |
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1519 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), | 1519 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), |
1520 | PCI_DMA_TODEVICE); | 1520 | PCI_DMA_TODEVICE); |
1521 | map_size[0] = skb_headlen(skb); | 1521 | map_size[0] = skb_headlen(skb); |
1522 | if (dma_mapping_error(map[0])) | 1522 | if (pci_dma_mapping_error(mac->dma_pdev, map[0])) |
1523 | goto out_err_nolock; | 1523 | goto out_err_nolock; |
1524 | 1524 | ||
1525 | for (i = 0; i < nfrags; i++) { | 1525 | for (i = 0; i < nfrags; i++) { |
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1529 | frag->page_offset, frag->size, | 1529 | frag->page_offset, frag->size, |
1530 | PCI_DMA_TODEVICE); | 1530 | PCI_DMA_TODEVICE); |
1531 | map_size[i+1] = frag->size; | 1531 | map_size[i+1] = frag->size; |
1532 | if (dma_mapping_error(map[i+1])) { | 1532 | if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { |
1533 | nfrags = i; | 1533 | nfrags = i; |
1534 | goto out_err_nolock; | 1534 | goto out_err_nolock; |
1535 | } | 1535 | } |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index e7d48a352beb..e82b37bbd6c3 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
328 | qdev->lrg_buffer_len - | 328 | qdev->lrg_buffer_len - |
329 | QL_HEADER_SPACE, | 329 | QL_HEADER_SPACE, |
330 | PCI_DMA_FROMDEVICE); | 330 | PCI_DMA_FROMDEVICE); |
331 | err = pci_dma_mapping_error(map); | 331 | err = pci_dma_mapping_error(qdev->pdev, map); |
332 | if(err) { | 332 | if(err) { |
333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
334 | qdev->ndev->name, err); | 334 | qdev->ndev->name, err); |
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1919 | QL_HEADER_SPACE, | 1919 | QL_HEADER_SPACE, |
1920 | PCI_DMA_FROMDEVICE); | 1920 | PCI_DMA_FROMDEVICE); |
1921 | 1921 | ||
1922 | err = pci_dma_mapping_error(map); | 1922 | err = pci_dma_mapping_error(qdev->pdev, map); |
1923 | if(err) { | 1923 | if(err) { |
1924 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 1924 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
1925 | qdev->ndev->name, err); | 1925 | qdev->ndev->name, err); |
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2454 | */ | 2454 | */ |
2455 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2455 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2456 | 2456 | ||
2457 | err = pci_dma_mapping_error(map); | 2457 | err = pci_dma_mapping_error(qdev->pdev, map); |
2458 | if(err) { | 2458 | if(err) { |
2459 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2459 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
2460 | qdev->ndev->name, err); | 2460 | qdev->ndev->name, err); |
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2487 | sizeof(struct oal), | 2487 | sizeof(struct oal), |
2488 | PCI_DMA_TODEVICE); | 2488 | PCI_DMA_TODEVICE); |
2489 | 2489 | ||
2490 | err = pci_dma_mapping_error(map); | 2490 | err = pci_dma_mapping_error(qdev->pdev, map); |
2491 | if(err) { | 2491 | if(err) { |
2492 | 2492 | ||
2493 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", | 2493 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", |
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2514 | frag->page_offset, frag->size, | 2514 | frag->page_offset, frag->size, |
2515 | PCI_DMA_TODEVICE); | 2515 | PCI_DMA_TODEVICE); |
2516 | 2516 | ||
2517 | err = pci_dma_mapping_error(map); | 2517 | err = pci_dma_mapping_error(qdev->pdev, map); |
2518 | if(err) { | 2518 | if(err) { |
2519 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", | 2519 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", |
2520 | qdev->ndev->name, err); | 2520 | qdev->ndev->name, err); |
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |||
2916 | QL_HEADER_SPACE, | 2916 | QL_HEADER_SPACE, |
2917 | PCI_DMA_FROMDEVICE); | 2917 | PCI_DMA_FROMDEVICE); |
2918 | 2918 | ||
2919 | err = pci_dma_mapping_error(map); | 2919 | err = pci_dma_mapping_error(qdev->pdev, map); |
2920 | if(err) { | 2920 | if(err) { |
2921 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2921 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
2922 | qdev->ndev->name, err); | 2922 | qdev->ndev->name, err); |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 9dae40ccf048..86d77d05190a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic) | |||
2512 | * Return Value: | 2512 | * Return Value: |
2513 | * SUCCESS on success or an appropriate -ve value on failure. | 2513 | * SUCCESS on success or an appropriate -ve value on failure. |
2514 | */ | 2514 | */ |
2515 | 2515 | static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |
2516 | static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | 2516 | int from_card_up) |
2517 | { | 2517 | { |
2518 | struct sk_buff *skb; | 2518 | struct sk_buff *skb; |
2519 | struct RxD_t *rxdp; | 2519 | struct RxD_t *rxdp; |
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2602 | rxdp1->Buffer0_ptr = pci_map_single | 2602 | rxdp1->Buffer0_ptr = pci_map_single |
2603 | (ring->pdev, skb->data, size - NET_IP_ALIGN, | 2603 | (ring->pdev, skb->data, size - NET_IP_ALIGN, |
2604 | PCI_DMA_FROMDEVICE); | 2604 | PCI_DMA_FROMDEVICE); |
2605 | if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) | 2605 | if (pci_dma_mapping_error(nic->pdev, |
2606 | rxdp1->Buffer0_ptr)) | ||
2606 | goto pci_map_failed; | 2607 | goto pci_map_failed; |
2607 | 2608 | ||
2608 | rxdp->Control_2 = | 2609 | rxdp->Control_2 = |
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2636 | rxdp3->Buffer0_ptr = | 2637 | rxdp3->Buffer0_ptr = |
2637 | pci_map_single(ring->pdev, ba->ba_0, | 2638 | pci_map_single(ring->pdev, ba->ba_0, |
2638 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2639 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2639 | if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) | 2640 | if (pci_dma_mapping_error(nic->pdev, |
2641 | rxdp3->Buffer0_ptr)) | ||
2640 | goto pci_map_failed; | 2642 | goto pci_map_failed; |
2641 | } else | 2643 | } else |
2642 | pci_dma_sync_single_for_device(ring->pdev, | 2644 | pci_dma_sync_single_for_device(ring->pdev, |
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2655 | (ring->pdev, skb->data, ring->mtu + 4, | 2657 | (ring->pdev, skb->data, ring->mtu + 4, |
2656 | PCI_DMA_FROMDEVICE); | 2658 | PCI_DMA_FROMDEVICE); |
2657 | 2659 | ||
2658 | if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) | 2660 | if (pci_dma_mapping_error(nic->pdev, |
2661 | rxdp3->Buffer2_ptr)) | ||
2659 | goto pci_map_failed; | 2662 | goto pci_map_failed; |
2660 | 2663 | ||
2661 | if (from_card_up) { | 2664 | if (from_card_up) { |
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2664 | ba->ba_1, BUF1_LEN, | 2667 | ba->ba_1, BUF1_LEN, |
2665 | PCI_DMA_FROMDEVICE); | 2668 | PCI_DMA_FROMDEVICE); |
2666 | 2669 | ||
2667 | if (pci_dma_mapping_error | 2670 | if (pci_dma_mapping_error(nic->pdev, |
2668 | (rxdp3->Buffer1_ptr)) { | 2671 | rxdp3->Buffer1_ptr)) { |
2669 | pci_unmap_single | 2672 | pci_unmap_single |
2670 | (ring->pdev, | 2673 | (ring->pdev, |
2671 | (dma_addr_t)(unsigned long) | 2674 | (dma_addr_t)(unsigned long) |
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2806 | } | 2809 | } |
2807 | } | 2810 | } |
2808 | 2811 | ||
2809 | static int s2io_chk_rx_buffers(struct ring_info *ring) | 2812 | static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) |
2810 | { | 2813 | { |
2811 | if (fill_rx_buffers(ring, 0) == -ENOMEM) { | 2814 | if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { |
2812 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | 2815 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); |
2813 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | 2816 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); |
2814 | } | 2817 | } |
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) | |||
2848 | return 0; | 2851 | return 0; |
2849 | 2852 | ||
2850 | pkts_processed = rx_intr_handler(ring, budget); | 2853 | pkts_processed = rx_intr_handler(ring, budget); |
2851 | s2io_chk_rx_buffers(ring); | 2854 | s2io_chk_rx_buffers(nic, ring); |
2852 | 2855 | ||
2853 | if (pkts_processed < budget_org) { | 2856 | if (pkts_processed < budget_org) { |
2854 | netif_rx_complete(dev, napi); | 2857 | netif_rx_complete(dev, napi); |
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) | |||
2882 | for (i = 0; i < config->rx_ring_num; i++) { | 2885 | for (i = 0; i < config->rx_ring_num; i++) { |
2883 | ring = &mac_control->rings[i]; | 2886 | ring = &mac_control->rings[i]; |
2884 | ring_pkts_processed = rx_intr_handler(ring, budget); | 2887 | ring_pkts_processed = rx_intr_handler(ring, budget); |
2885 | s2io_chk_rx_buffers(ring); | 2888 | s2io_chk_rx_buffers(nic, ring); |
2886 | pkts_processed += ring_pkts_processed; | 2889 | pkts_processed += ring_pkts_processed; |
2887 | budget -= ring_pkts_processed; | 2890 | budget -= ring_pkts_processed; |
2888 | if (budget <= 0) | 2891 | if (budget <= 0) |
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev) | |||
2939 | rx_intr_handler(&mac_control->rings[i], 0); | 2942 | rx_intr_handler(&mac_control->rings[i], 0); |
2940 | 2943 | ||
2941 | for (i = 0; i < config->rx_ring_num; i++) { | 2944 | for (i = 0; i < config->rx_ring_num; i++) { |
2942 | if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { | 2945 | if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == |
2946 | -ENOMEM) { | ||
2943 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2947 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
2944 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); | 2948 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); |
2945 | break; | 2949 | break; |
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4235 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, | 4239 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, |
4236 | fifo->ufo_in_band_v, | 4240 | fifo->ufo_in_band_v, |
4237 | sizeof(u64), PCI_DMA_TODEVICE); | 4241 | sizeof(u64), PCI_DMA_TODEVICE); |
4238 | if (pci_dma_mapping_error(txdp->Buffer_Pointer)) | 4242 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4239 | goto pci_map_failed; | 4243 | goto pci_map_failed; |
4240 | txdp++; | 4244 | txdp++; |
4241 | } | 4245 | } |
4242 | 4246 | ||
4243 | txdp->Buffer_Pointer = pci_map_single | 4247 | txdp->Buffer_Pointer = pci_map_single |
4244 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); | 4248 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); |
4245 | if (pci_dma_mapping_error(txdp->Buffer_Pointer)) | 4249 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4246 | goto pci_map_failed; | 4250 | goto pci_map_failed; |
4247 | 4251 | ||
4248 | txdp->Host_Control = (unsigned long) skb; | 4252 | txdp->Host_Control = (unsigned long) skb; |
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | |||
4345 | netif_rx_schedule(dev, &ring->napi); | 4349 | netif_rx_schedule(dev, &ring->napi); |
4346 | } else { | 4350 | } else { |
4347 | rx_intr_handler(ring, 0); | 4351 | rx_intr_handler(ring, 0); |
4348 | s2io_chk_rx_buffers(ring); | 4352 | s2io_chk_rx_buffers(sp, ring); |
4349 | } | 4353 | } |
4350 | 4354 | ||
4351 | return IRQ_HANDLED; | 4355 | return IRQ_HANDLED; |
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4826 | */ | 4830 | */ |
4827 | if (!config->napi) { | 4831 | if (!config->napi) { |
4828 | for (i = 0; i < config->rx_ring_num; i++) | 4832 | for (i = 0; i < config->rx_ring_num; i++) |
4829 | s2io_chk_rx_buffers(&mac_control->rings[i]); | 4833 | s2io_chk_rx_buffers(sp, &mac_control->rings[i]); |
4830 | } | 4834 | } |
4831 | writeq(sp->general_int_mask, &bar0->general_int_mask); | 4835 | writeq(sp->general_int_mask, &bar0->general_int_mask); |
4832 | readl(&bar0->general_int_status); | 4836 | readl(&bar0->general_int_status); |
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6859 | pci_map_single( sp->pdev, (*skb)->data, | 6863 | pci_map_single( sp->pdev, (*skb)->data, |
6860 | size - NET_IP_ALIGN, | 6864 | size - NET_IP_ALIGN, |
6861 | PCI_DMA_FROMDEVICE); | 6865 | PCI_DMA_FROMDEVICE); |
6862 | if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) | 6866 | if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) |
6863 | goto memalloc_failed; | 6867 | goto memalloc_failed; |
6864 | rxdp->Host_Control = (unsigned long) (*skb); | 6868 | rxdp->Host_Control = (unsigned long) (*skb); |
6865 | } | 6869 | } |
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6886 | pci_map_single(sp->pdev, (*skb)->data, | 6890 | pci_map_single(sp->pdev, (*skb)->data, |
6887 | dev->mtu + 4, | 6891 | dev->mtu + 4, |
6888 | PCI_DMA_FROMDEVICE); | 6892 | PCI_DMA_FROMDEVICE); |
6889 | if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) | 6893 | if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) |
6890 | goto memalloc_failed; | 6894 | goto memalloc_failed; |
6891 | rxdp3->Buffer0_ptr = *temp0 = | 6895 | rxdp3->Buffer0_ptr = *temp0 = |
6892 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, | 6896 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, |
6893 | PCI_DMA_FROMDEVICE); | 6897 | PCI_DMA_FROMDEVICE); |
6894 | if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { | 6898 | if (pci_dma_mapping_error(sp->pdev, |
6899 | rxdp3->Buffer0_ptr)) { | ||
6895 | pci_unmap_single (sp->pdev, | 6900 | pci_unmap_single (sp->pdev, |
6896 | (dma_addr_t)rxdp3->Buffer2_ptr, | 6901 | (dma_addr_t)rxdp3->Buffer2_ptr, |
6897 | dev->mtu + 4, PCI_DMA_FROMDEVICE); | 6902 | dev->mtu + 4, PCI_DMA_FROMDEVICE); |
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6903 | rxdp3->Buffer1_ptr = *temp1 = | 6908 | rxdp3->Buffer1_ptr = *temp1 = |
6904 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, | 6909 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, |
6905 | PCI_DMA_FROMDEVICE); | 6910 | PCI_DMA_FROMDEVICE); |
6906 | if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { | 6911 | if (pci_dma_mapping_error(sp->pdev, |
6912 | rxdp3->Buffer1_ptr)) { | ||
6907 | pci_unmap_single (sp->pdev, | 6913 | pci_unmap_single (sp->pdev, |
6908 | (dma_addr_t)rxdp3->Buffer0_ptr, | 6914 | (dma_addr_t)rxdp3->Buffer0_ptr, |
6909 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 6915 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7187 | 7193 | ||
7188 | for (i = 0; i < config->rx_ring_num; i++) { | 7194 | for (i = 0; i < config->rx_ring_num; i++) { |
7189 | mac_control->rings[i].mtu = dev->mtu; | 7195 | mac_control->rings[i].mtu = dev->mtu; |
7190 | ret = fill_rx_buffers(&mac_control->rings[i], 1); | 7196 | ret = fill_rx_buffers(sp, &mac_control->rings[i], 1); |
7191 | if (ret) { | 7197 | if (ret) { |
7192 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", | 7198 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", |
7193 | dev->name); | 7199 | dev->name); |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 601b001437c0..0d27dd39bc09 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, | |||
233 | rx_buf->data, rx_buf->len, | 233 | rx_buf->data, rx_buf->len, |
234 | PCI_DMA_FROMDEVICE); | 234 | PCI_DMA_FROMDEVICE); |
235 | 235 | ||
236 | if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { | 236 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { |
237 | dev_kfree_skb_any(rx_buf->skb); | 237 | dev_kfree_skb_any(rx_buf->skb); |
238 | rx_buf->skb = NULL; | 238 | rx_buf->skb = NULL; |
239 | return -EIO; | 239 | return -EIO; |
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
275 | 0, efx_rx_buf_size(efx), | 275 | 0, efx_rx_buf_size(efx), |
276 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
277 | 277 | ||
278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
279 | __free_pages(rx_buf->page, efx->rx_buffer_order); | 279 | __free_pages(rx_buf->page, efx->rx_buffer_order); |
280 | rx_buf->page = NULL; | 280 | rx_buf->page = NULL; |
281 | return -EIO; | 281 | return -EIO; |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 5cdd082ab8f6..5e8374ab28ee 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
172 | 172 | ||
173 | /* Process all fragments */ | 173 | /* Process all fragments */ |
174 | while (1) { | 174 | while (1) { |
175 | if (unlikely(pci_dma_mapping_error(dma_addr))) | 175 | if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) |
176 | goto pci_err; | 176 | goto pci_err; |
177 | 177 | ||
178 | /* Store fields for marking in the per-fragment final | 178 | /* Store fields for marking in the per-fragment final |
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | |||
661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | 661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, |
662 | TSOH_BUFFER(tsoh), header_len, | 662 | TSOH_BUFFER(tsoh), header_len, |
663 | PCI_DMA_TODEVICE); | 663 | PCI_DMA_TODEVICE); |
664 | if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { | 664 | if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, |
665 | tsoh->dma_addr))) { | ||
665 | kfree(tsoh); | 666 | kfree(tsoh); |
666 | return NULL; | 667 | return NULL; |
667 | } | 668 | } |
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | |||
863 | 864 | ||
864 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, | 865 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, |
865 | len, PCI_DMA_TODEVICE); | 866 | len, PCI_DMA_TODEVICE); |
866 | if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { | 867 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { |
867 | st->ifc.unmap_len = len; | 868 | st->ifc.unmap_len = len; |
868 | st->ifc.len = len; | 869 | st->ifc.len = len; |
869 | st->ifc.dma_addr = st->ifc.unmap_addr; | 870 | st->ifc.dma_addr = st->ifc.unmap_addr; |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 00aa0b108cb9..b6435d0d71f9 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
452 | /* iommu-map the skb */ | 452 | /* iommu-map the skb */ |
453 | buf = pci_map_single(card->pdev, descr->skb->data, | 453 | buf = pci_map_single(card->pdev, descr->skb->data, |
454 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); | 454 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
455 | if (pci_dma_mapping_error(buf)) { | 455 | if (pci_dma_mapping_error(card->pdev, buf)) { |
456 | dev_kfree_skb_any(descr->skb); | 456 | dev_kfree_skb_any(descr->skb); |
457 | descr->skb = NULL; | 457 | descr->skb = NULL; |
458 | if (netif_msg_rx_err(card) && net_ratelimit()) | 458 | if (netif_msg_rx_err(card) && net_ratelimit()) |
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
691 | unsigned long flags; | 691 | unsigned long flags; |
692 | 692 | ||
693 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | 693 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); |
694 | if (pci_dma_mapping_error(buf)) { | 694 | if (pci_dma_mapping_error(card->pdev, buf)) { |
695 | if (netif_msg_tx_err(card) && net_ratelimit()) | 695 | if (netif_msg_tx_err(card) && net_ratelimit()) |
696 | dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " | 696 | dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " |
697 | "Dropping packet\n", skb->data, skb->len); | 697 | "Dropping packet\n", skb->data, skb->len); |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index a645e5028c14..8487ace9d2e3 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) | |||
506 | return NULL; | 506 | return NULL; |
507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, | 507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, |
508 | PCI_DMA_FROMDEVICE); | 508 | PCI_DMA_FROMDEVICE); |
509 | if (pci_dma_mapping_error(*dma_handle)) { | 509 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { |
510 | free_page((unsigned long)buf); | 510 | free_page((unsigned long)buf); |
511 | return NULL; | 511 | return NULL; |
512 | } | 512 | } |
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, | |||
536 | return NULL; | 536 | return NULL; |
537 | *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, | 537 | *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, |
538 | PCI_DMA_FROMDEVICE); | 538 | PCI_DMA_FROMDEVICE); |
539 | if (pci_dma_mapping_error(*dma_handle)) { | 539 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { |
540 | dev_kfree_skb_any(skb); | 540 | dev_kfree_skb_any(skb); |
541 | return NULL; | 541 | return NULL; |
542 | } | 542 | } |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 217d506527a9..d9769c527346 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | |||
1166 | bf->skb = skb; | 1166 | bf->skb = skb; |
1167 | bf->skbaddr = pci_map_single(sc->pdev, | 1167 | bf->skbaddr = pci_map_single(sc->pdev, |
1168 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); | 1168 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); |
1169 | if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { | 1169 | if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) { |
1170 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); | 1170 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); |
1171 | dev_kfree_skb(skb); | 1171 | dev_kfree_skb(skb); |
1172 | bf->skb = NULL; | 1172 | bf->skb = NULL; |
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | |||
1918 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " | 1918 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " |
1919 | "skbaddr %llx\n", skb, skb->data, skb->len, | 1919 | "skbaddr %llx\n", skb, skb->data, skb->len, |
1920 | (unsigned long long)bf->skbaddr); | 1920 | (unsigned long long)bf->skbaddr); |
1921 | if (pci_dma_mapping_error(bf->skbaddr)) { | 1921 | if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { |
1922 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); | 1922 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); |
1923 | return -EIO; | 1923 | return -EIO; |
1924 | } | 1924 | } |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index c4a7c06793c5..61f8fdea2d96 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -3525,7 +3525,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost) | |||
3525 | crq->msg_token = dma_map_single(dev, crq->msgs, | 3525 | crq->msg_token = dma_map_single(dev, crq->msgs, |
3526 | PAGE_SIZE, DMA_BIDIRECTIONAL); | 3526 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
3527 | 3527 | ||
3528 | if (dma_mapping_error(crq->msg_token)) | 3528 | if (dma_mapping_error(dev, crq->msg_token)) |
3529 | goto map_failed; | 3529 | goto map_failed; |
3530 | 3530 | ||
3531 | retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, | 3531 | retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, |
@@ -3618,7 +3618,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) | |||
3618 | async_q->size * sizeof(*async_q->msgs), | 3618 | async_q->size * sizeof(*async_q->msgs), |
3619 | DMA_BIDIRECTIONAL); | 3619 | DMA_BIDIRECTIONAL); |
3620 | 3620 | ||
3621 | if (dma_mapping_error(async_q->msg_token)) { | 3621 | if (dma_mapping_error(dev, async_q->msg_token)) { |
3622 | dev_err(dev, "Failed to map async queue\n"); | 3622 | dev_err(dev, "Failed to map async queue\n"); |
3623 | goto free_async_crq; | 3623 | goto free_async_crq; |
3624 | } | 3624 | } |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 20000ec79b04..6b24b9cdb04c 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -859,7 +859,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
859 | sizeof(hostdata->madapter_info), | 859 | sizeof(hostdata->madapter_info), |
860 | DMA_BIDIRECTIONAL); | 860 | DMA_BIDIRECTIONAL); |
861 | 861 | ||
862 | if (dma_mapping_error(req->buffer)) { | 862 | if (dma_mapping_error(hostdata->dev, req->buffer)) { |
863 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 863 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
864 | dev_err(hostdata->dev, | 864 | dev_err(hostdata->dev, |
865 | "Unable to map request_buffer for " | 865 | "Unable to map request_buffer for " |
@@ -1407,7 +1407,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1407 | length, | 1407 | length, |
1408 | DMA_BIDIRECTIONAL); | 1408 | DMA_BIDIRECTIONAL); |
1409 | 1409 | ||
1410 | if (dma_mapping_error(host_config->buffer)) { | 1410 | if (dma_mapping_error(hostdata->dev, host_config->buffer)) { |
1411 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 1411 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
1412 | dev_err(hostdata->dev, | 1412 | dev_err(hostdata->dev, |
1413 | "dma_mapping error getting host config\n"); | 1413 | "dma_mapping error getting host config\n"); |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index 3b9514c8f1f1..2e13ec00172a 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target) | |||
564 | queue->size * sizeof(*queue->msgs), | 564 | queue->size * sizeof(*queue->msgs), |
565 | DMA_BIDIRECTIONAL); | 565 | DMA_BIDIRECTIONAL); |
566 | 566 | ||
567 | if (dma_mapping_error(queue->msg_token)) | 567 | if (dma_mapping_error(target->dev, queue->msg_token)) |
568 | goto map_failed; | 568 | goto map_failed; |
569 | 569 | ||
570 | err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, | 570 | err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, |
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 182146100dc1..462a8574dad9 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c | |||
@@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue, | |||
253 | queue->size * sizeof(*queue->msgs), | 253 | queue->size * sizeof(*queue->msgs), |
254 | DMA_BIDIRECTIONAL); | 254 | DMA_BIDIRECTIONAL); |
255 | 255 | ||
256 | if (dma_mapping_error(queue->msg_token)) | 256 | if (dma_mapping_error(hostdata->dev, queue->msg_token)) |
257 | goto map_failed; | 257 | goto map_failed; |
258 | 258 | ||
259 | gather_partition_info(); | 259 | gather_partition_info(); |
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index e81d59d78910..0c7165660853 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c | |||
@@ -313,14 +313,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) | |||
313 | xfer->tx_dma = dma_map_single(dev, | 313 | xfer->tx_dma = dma_map_single(dev, |
314 | (void *) xfer->tx_buf, xfer->len, | 314 | (void *) xfer->tx_buf, xfer->len, |
315 | DMA_TO_DEVICE); | 315 | DMA_TO_DEVICE); |
316 | if (dma_mapping_error(xfer->tx_dma)) | 316 | if (dma_mapping_error(dev, xfer->tx_dma)) |
317 | return -ENOMEM; | 317 | return -ENOMEM; |
318 | } | 318 | } |
319 | if (xfer->rx_buf) { | 319 | if (xfer->rx_buf) { |
320 | xfer->rx_dma = dma_map_single(dev, | 320 | xfer->rx_dma = dma_map_single(dev, |
321 | xfer->rx_buf, xfer->len, | 321 | xfer->rx_buf, xfer->len, |
322 | DMA_FROM_DEVICE); | 322 | DMA_FROM_DEVICE); |
323 | if (dma_mapping_error(xfer->rx_dma)) { | 323 | if (dma_mapping_error(dev, xfer->rx_dma)) { |
324 | if (xfer->tx_buf) | 324 | if (xfer->tx_buf) |
325 | dma_unmap_single(dev, | 325 | dma_unmap_single(dev, |
326 | xfer->tx_dma, xfer->len, | 326 | xfer->tx_dma, xfer->len, |
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index 9149689c79d9..87b73e0169c5 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c | |||
@@ -334,7 +334,7 @@ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) | |||
334 | hw->dma_rx_tmpbuf_size = size; | 334 | hw->dma_rx_tmpbuf_size = size; |
335 | hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, | 335 | hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, |
336 | size, DMA_FROM_DEVICE); | 336 | size, DMA_FROM_DEVICE); |
337 | if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) { | 337 | if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { |
338 | kfree(hw->dma_rx_tmpbuf); | 338 | kfree(hw->dma_rx_tmpbuf); |
339 | hw->dma_rx_tmpbuf = 0; | 339 | hw->dma_rx_tmpbuf = 0; |
340 | hw->dma_rx_tmpbuf_size = 0; | 340 | hw->dma_rx_tmpbuf_size = 0; |
@@ -378,7 +378,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) | |||
378 | dma_rx_addr = dma_map_single(hw->dev, | 378 | dma_rx_addr = dma_map_single(hw->dev, |
379 | (void *)t->rx_buf, | 379 | (void *)t->rx_buf, |
380 | t->len, DMA_FROM_DEVICE); | 380 | t->len, DMA_FROM_DEVICE); |
381 | if (dma_mapping_error(dma_rx_addr)) | 381 | if (dma_mapping_error(hw->dev, dma_rx_addr)) |
382 | dev_err(hw->dev, "rx dma map error\n"); | 382 | dev_err(hw->dev, "rx dma map error\n"); |
383 | } | 383 | } |
384 | } else { | 384 | } else { |
@@ -401,7 +401,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) | |||
401 | dma_tx_addr = dma_map_single(hw->dev, | 401 | dma_tx_addr = dma_map_single(hw->dev, |
402 | (void *)t->tx_buf, | 402 | (void *)t->tx_buf, |
403 | t->len, DMA_TO_DEVICE); | 403 | t->len, DMA_TO_DEVICE); |
404 | if (dma_mapping_error(dma_tx_addr)) | 404 | if (dma_mapping_error(hw->dev, dma_tx_addr)) |
405 | dev_err(hw->dev, "tx dma map error\n"); | 405 | dev_err(hw->dev, "tx dma map error\n"); |
406 | } | 406 | } |
407 | } else { | 407 | } else { |
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index b1cc148036c1..f6f987bb71ca 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c | |||
@@ -836,7 +836,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | |||
836 | if (tx_buf != NULL) { | 836 | if (tx_buf != NULL) { |
837 | t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, | 837 | t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, |
838 | len, DMA_TO_DEVICE); | 838 | len, DMA_TO_DEVICE); |
839 | if (dma_mapping_error(t->tx_dma)) { | 839 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { |
840 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", | 840 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", |
841 | 'T', len); | 841 | 'T', len); |
842 | return -EINVAL; | 842 | return -EINVAL; |
@@ -845,7 +845,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | |||
845 | if (rx_buf != NULL) { | 845 | if (rx_buf != NULL) { |
846 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, | 846 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, |
847 | DMA_FROM_DEVICE); | 847 | DMA_FROM_DEVICE); |
848 | if (dma_mapping_error(t->rx_dma)) { | 848 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { |
849 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", | 849 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", |
850 | 'R', len); | 850 | 'R', len); |
851 | if (tx_buf != NULL) | 851 | if (tx_buf != NULL) |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 0c452c46ab07..067299d6d192 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -353,7 +353,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
353 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | 353 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, |
354 | drv_data->rx_map_len, | 354 | drv_data->rx_map_len, |
355 | DMA_FROM_DEVICE); | 355 | DMA_FROM_DEVICE); |
356 | if (dma_mapping_error(drv_data->rx_dma)) | 356 | if (dma_mapping_error(dev, drv_data->rx_dma)) |
357 | return 0; | 357 | return 0; |
358 | 358 | ||
359 | /* Stream map the tx buffer */ | 359 | /* Stream map the tx buffer */ |
@@ -361,7 +361,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
361 | drv_data->tx_map_len, | 361 | drv_data->tx_map_len, |
362 | DMA_TO_DEVICE); | 362 | DMA_TO_DEVICE); |
363 | 363 | ||
364 | if (dma_mapping_error(drv_data->tx_dma)) { | 364 | if (dma_mapping_error(dev, drv_data->tx_dma)) { |
365 | dma_unmap_single(dev, drv_data->rx_dma, | 365 | dma_unmap_single(dev, drv_data->rx_dma, |
366 | drv_data->rx_map_len, DMA_FROM_DEVICE); | 366 | drv_data->rx_map_len, DMA_FROM_DEVICE); |
367 | return 0; | 367 | return 0; |
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 54ac7bea5f8c..6fb77fcc4971 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c | |||
@@ -491,7 +491,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
491 | buf, | 491 | buf, |
492 | drv_data->tx_map_len, | 492 | drv_data->tx_map_len, |
493 | DMA_TO_DEVICE); | 493 | DMA_TO_DEVICE); |
494 | if (dma_mapping_error(drv_data->tx_dma)) | 494 | if (dma_mapping_error(dev, drv_data->tx_dma)) |
495 | return -1; | 495 | return -1; |
496 | 496 | ||
497 | drv_data->tx_dma_needs_unmap = 1; | 497 | drv_data->tx_dma_needs_unmap = 1; |
@@ -516,7 +516,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
516 | buf, | 516 | buf, |
517 | drv_data->len, | 517 | drv_data->len, |
518 | DMA_FROM_DEVICE); | 518 | DMA_FROM_DEVICE); |
519 | if (dma_mapping_error(drv_data->rx_dma)) | 519 | if (dma_mapping_error(dev, drv_data->rx_dma)) |
520 | return -1; | 520 | return -1; |
521 | drv_data->rx_dma_needs_unmap = 1; | 521 | drv_data->rx_dma_needs_unmap = 1; |
522 | } | 522 | } |
@@ -534,7 +534,7 @@ static int map_dma_buffers(struct driver_data *drv_data) | |||
534 | buf, | 534 | buf, |
535 | drv_data->tx_map_len, | 535 | drv_data->tx_map_len, |
536 | DMA_TO_DEVICE); | 536 | DMA_TO_DEVICE); |
537 | if (dma_mapping_error(drv_data->tx_dma)) { | 537 | if (dma_mapping_error(dev, drv_data->tx_dma)) { |
538 | if (drv_data->rx_dma) { | 538 | if (drv_data->rx_dma) { |
539 | dma_unmap_single(dev, | 539 | dma_unmap_single(dev, |
540 | drv_data->rx_dma, | 540 | drv_data->rx_dma, |
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h index db351d1296f4..a5801ae02e4b 100644 --- a/include/asm-alpha/dma-mapping.h +++ b/include/asm-alpha/dma-mapping.h | |||
@@ -24,8 +24,8 @@ | |||
24 | pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) | 24 | pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) |
25 | #define dma_supported(dev, mask) \ | 25 | #define dma_supported(dev, mask) \ |
26 | pci_dma_supported(alpha_gendev_to_pci(dev), mask) | 26 | pci_dma_supported(alpha_gendev_to_pci(dev), mask) |
27 | #define dma_mapping_error(addr) \ | 27 | #define dma_mapping_error(dev, addr) \ |
28 | pci_dma_mapping_error(addr) | 28 | pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr) |
29 | 29 | ||
30 | #else /* no PCI - no IOMMU. */ | 30 | #else /* no PCI - no IOMMU. */ |
31 | 31 | ||
@@ -45,7 +45,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
45 | #define dma_unmap_page(dev, addr, size, dir) ((void)0) | 45 | #define dma_unmap_page(dev, addr, size, dir) ((void)0) |
46 | #define dma_unmap_sg(dev, sg, nents, dir) ((void)0) | 46 | #define dma_unmap_sg(dev, sg, nents, dir) ((void)0) |
47 | 47 | ||
48 | #define dma_mapping_error(addr) (0) | 48 | #define dma_mapping_error(dev, addr) (0) |
49 | 49 | ||
50 | #endif /* !CONFIG_PCI */ | 50 | #endif /* !CONFIG_PCI */ |
51 | 51 | ||
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h index d31fd49ff79a..2a14302c17a3 100644 --- a/include/asm-alpha/pci.h +++ b/include/asm-alpha/pci.h | |||
@@ -106,7 +106,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *, | |||
106 | /* Test for pci_map_single or pci_map_page having generated an error. */ | 106 | /* Test for pci_map_single or pci_map_page having generated an error. */ |
107 | 107 | ||
108 | static inline int | 108 | static inline int |
109 | pci_dma_mapping_error(dma_addr_t dma_addr) | 109 | pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) |
110 | { | 110 | { |
111 | return dma_addr == 0; | 111 | return dma_addr == 0; |
112 | } | 112 | } |
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index e99406a7bece..f41335ba6337 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h | |||
@@ -56,7 +56,7 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) | |||
56 | /* | 56 | /* |
57 | * DMA errors are defined by all-bits-set in the DMA address. | 57 | * DMA errors are defined by all-bits-set in the DMA address. |
58 | */ | 58 | */ |
59 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 59 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
60 | { | 60 | { |
61 | return dma_addr == ~0; | 61 | return dma_addr == ~0; |
62 | } | 62 | } |
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h index 57dc672bab8e..0399359ab5d8 100644 --- a/include/asm-avr32/dma-mapping.h +++ b/include/asm-avr32/dma-mapping.h | |||
@@ -35,7 +35,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |||
35 | /* | 35 | /* |
36 | * dma_map_single can't fail as it is implemented now. | 36 | * dma_map_single can't fail as it is implemented now. |
37 | */ | 37 | */ |
38 | static inline int dma_mapping_error(dma_addr_t addr) | 38 | static inline int dma_mapping_error(struct device *dev, dma_addr_t addr) |
39 | { | 39 | { |
40 | return 0; | 40 | return 0; |
41 | } | 41 | } |
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h index edc8d1bfaae2..cb2fb25ff8d9 100644 --- a/include/asm-cris/dma-mapping.h +++ b/include/asm-cris/dma-mapping.h | |||
@@ -120,7 +120,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |||
120 | } | 120 | } |
121 | 121 | ||
122 | static inline int | 122 | static inline int |
123 | dma_mapping_error(dma_addr_t dma_addr) | 123 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
124 | { | 124 | { |
125 | return 0; | 125 | return 0; |
126 | } | 126 | } |
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h index 2e8966ca030d..b2898877c07b 100644 --- a/include/asm-frv/dma-mapping.h +++ b/include/asm-frv/dma-mapping.h | |||
@@ -126,7 +126,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele | |||
126 | } | 126 | } |
127 | 127 | ||
128 | static inline | 128 | static inline |
129 | int dma_mapping_error(dma_addr_t dma_addr) | 129 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
130 | { | 130 | { |
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index e2468f894d2a..82cd0cb1c3fe 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h | |||
@@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |||
61 | #define dma_sync_sg_for_device dma_sync_sg_for_cpu | 61 | #define dma_sync_sg_for_device dma_sync_sg_for_cpu |
62 | 62 | ||
63 | extern int | 63 | extern int |
64 | dma_mapping_error(dma_addr_t dma_addr); | 64 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
65 | 65 | ||
66 | extern int | 66 | extern int |
67 | dma_supported(struct device *dev, u64 mask); | 67 | dma_supported(struct device *dev, u64 mask); |
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 783ab9944d70..189486c3f92e 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h | |||
@@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |||
144 | } | 144 | } |
145 | 145 | ||
146 | static inline int | 146 | static inline int |
147 | dma_mapping_error(dma_addr_t dma_addr) | 147 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
148 | { | 148 | { |
149 | return pci_dma_mapping_error(dma_addr); | 149 | return pci_dma_mapping_error(to_pci_dev(dev), dma_addr); |
150 | } | 150 | } |
151 | 151 | ||
152 | 152 | ||
diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h index 25c10e96b2b7..37b3706226e7 100644 --- a/include/asm-generic/pci-dma-compat.h +++ b/include/asm-generic/pci-dma-compat.h | |||
@@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static inline int | 101 | static inline int |
102 | pci_dma_mapping_error(dma_addr_t dma_addr) | 102 | pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) |
103 | { | 103 | { |
104 | return dma_mapping_error(dma_addr); | 104 | return dma_mapping_error(&pdev->dev, dma_addr); |
105 | } | 105 | } |
106 | 106 | ||
107 | #endif | 107 | #endif |
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index 0721a5e8271e..a6d50c77b6bf 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h | |||
@@ -54,7 +54,7 @@ typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_ | |||
54 | typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); | 54 | typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); |
55 | typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); | 55 | typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); |
56 | typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); | 56 | typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); |
57 | typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); | 57 | typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); |
58 | typedef int ia64_mv_dma_supported (struct device *, u64); | 58 | typedef int ia64_mv_dma_supported (struct device *, u64); |
59 | 59 | ||
60 | typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); | 60 | typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); |
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h index a26cdeb46a57..91f7944333d4 100644 --- a/include/asm-m68k/dma-mapping.h +++ b/include/asm-m68k/dma-mapping.h | |||
@@ -84,7 +84,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *s | |||
84 | { | 84 | { |
85 | } | 85 | } |
86 | 86 | ||
87 | static inline int dma_mapping_error(dma_addr_t handle) | 87 | static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) |
88 | { | 88 | { |
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h index 230b3f1b69b1..c64afb40cd06 100644 --- a/include/asm-mips/dma-mapping.h +++ b/include/asm-mips/dma-mapping.h | |||
@@ -42,7 +42,7 @@ extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
42 | int nelems, enum dma_data_direction direction); | 42 | int nelems, enum dma_data_direction direction); |
43 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 43 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
44 | int nelems, enum dma_data_direction direction); | 44 | int nelems, enum dma_data_direction direction); |
45 | extern int dma_mapping_error(dma_addr_t dma_addr); | 45 | extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
46 | extern int dma_supported(struct device *dev, u64 mask); | 46 | extern int dma_supported(struct device *dev, u64 mask); |
47 | 47 | ||
48 | static inline int | 48 | static inline int |
diff --git a/include/asm-mn10300/dma-mapping.h b/include/asm-mn10300/dma-mapping.h index 7c882fca9ec8..ccae8f6c6326 100644 --- a/include/asm-mn10300/dma-mapping.h +++ b/include/asm-mn10300/dma-mapping.h | |||
@@ -182,7 +182,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
182 | } | 182 | } |
183 | 183 | ||
184 | static inline | 184 | static inline |
185 | int dma_mapping_error(dma_addr_t dma_addr) | 185 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
186 | { | 186 | { |
187 | return 0; | 187 | return 0; |
188 | } | 188 | } |
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h index c6c0e9ff6bde..53af696f23d2 100644 --- a/include/asm-parisc/dma-mapping.h +++ b/include/asm-parisc/dma-mapping.h | |||
@@ -248,6 +248,6 @@ void * sba_get_iommu(struct parisc_device *dev); | |||
248 | #endif | 248 | #endif |
249 | 249 | ||
250 | /* At the moment, we panic on error for IOMMU resource exaustion */ | 250 | /* At the moment, we panic on error for IOMMU resource exaustion */ |
251 | #define dma_mapping_error(x) 0 | 251 | #define dma_mapping_error(dev, x) 0 |
252 | 252 | ||
253 | #endif | 253 | #endif |
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h index 74c549780987..c7ca45f97dd2 100644 --- a/include/asm-powerpc/dma-mapping.h +++ b/include/asm-powerpc/dma-mapping.h | |||
@@ -415,7 +415,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, | |||
415 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | 415 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
416 | } | 416 | } |
417 | 417 | ||
418 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 418 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
419 | { | 419 | { |
420 | #ifdef CONFIG_PPC64 | 420 | #ifdef CONFIG_PPC64 |
421 | return (dma_addr == DMA_ERROR_CODE); | 421 | return (dma_addr == DMA_ERROR_CODE); |
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h index 22cc419389fe..6c0b8a2de143 100644 --- a/include/asm-sh/dma-mapping.h +++ b/include/asm-sh/dma-mapping.h | |||
@@ -171,7 +171,7 @@ static inline int dma_get_cache_alignment(void) | |||
171 | return L1_CACHE_BYTES; | 171 | return L1_CACHE_BYTES; |
172 | } | 172 | } |
173 | 173 | ||
174 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 174 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
175 | { | 175 | { |
176 | return dma_addr == 0; | 176 | return dma_addr == 0; |
177 | } | 177 | } |
diff --git a/include/asm-sparc/dma-mapping_64.h b/include/asm-sparc/dma-mapping_64.h index 38cbec76a33f..bfa64f9702d5 100644 --- a/include/asm-sparc/dma-mapping_64.h +++ b/include/asm-sparc/dma-mapping_64.h | |||
@@ -135,7 +135,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, | |||
135 | /* No flushing needed to sync cpu writes to the device. */ | 135 | /* No flushing needed to sync cpu writes to the device. */ |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 138 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
139 | { | 139 | { |
140 | return (dma_addr == DMA_ERROR_CODE); | 140 | return (dma_addr == DMA_ERROR_CODE); |
141 | } | 141 | } |
diff --git a/include/asm-sparc/pci_32.h b/include/asm-sparc/pci_32.h index b93b6c79e08f..0ee949d220c0 100644 --- a/include/asm-sparc/pci_32.h +++ b/include/asm-sparc/pci_32.h | |||
@@ -154,7 +154,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
154 | 154 | ||
155 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) | 155 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) |
156 | 156 | ||
157 | static inline int pci_dma_mapping_error(dma_addr_t dma_addr) | 157 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, |
158 | dma_addr_t dma_addr) | ||
158 | { | 159 | { |
159 | return (dma_addr == PCI_DMA_ERROR_CODE); | 160 | return (dma_addr == PCI_DMA_ERROR_CODE); |
160 | } | 161 | } |
diff --git a/include/asm-sparc/pci_64.h b/include/asm-sparc/pci_64.h index f59f2571295b..4f79a54948f6 100644 --- a/include/asm-sparc/pci_64.h +++ b/include/asm-sparc/pci_64.h | |||
@@ -140,9 +140,10 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | |||
140 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) | 140 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) |
141 | #define PCI64_ADDR_BASE 0xfffc000000000000UL | 141 | #define PCI64_ADDR_BASE 0xfffc000000000000UL |
142 | 142 | ||
143 | static inline int pci_dma_mapping_error(dma_addr_t dma_addr) | 143 | static inline int pci_dma_mapping_error(struct pci_dev *pdev, |
144 | dma_addr_t dma_addr) | ||
144 | { | 145 | { |
145 | return dma_mapping_error(dma_addr); | 146 | return dma_mapping_error(&pdev->dev, dma_addr); |
146 | } | 147 | } |
147 | 148 | ||
148 | #ifdef CONFIG_PCI | 149 | #ifdef CONFIG_PCI |
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h index 87a715367a1b..3c034f48fdb0 100644 --- a/include/asm-x86/device.h +++ b/include/asm-x86/device.h | |||
@@ -5,6 +5,9 @@ struct dev_archdata { | |||
5 | #ifdef CONFIG_ACPI | 5 | #ifdef CONFIG_ACPI |
6 | void *acpi_handle; | 6 | void *acpi_handle; |
7 | #endif | 7 | #endif |
8 | #ifdef CONFIG_X86_64 | ||
9 | struct dma_mapping_ops *dma_ops; | ||
10 | #endif | ||
8 | #ifdef CONFIG_DMAR | 11 | #ifdef CONFIG_DMAR |
9 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
10 | #endif | 13 | #endif |
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index c2ddd3d1b883..0eaa9bf6011f 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
@@ -17,7 +17,8 @@ extern int panic_on_overflow; | |||
17 | extern int force_iommu; | 17 | extern int force_iommu; |
18 | 18 | ||
19 | struct dma_mapping_ops { | 19 | struct dma_mapping_ops { |
20 | int (*mapping_error)(dma_addr_t dma_addr); | 20 | int (*mapping_error)(struct device *dev, |
21 | dma_addr_t dma_addr); | ||
21 | void* (*alloc_coherent)(struct device *dev, size_t size, | 22 | void* (*alloc_coherent)(struct device *dev, size_t size, |
22 | dma_addr_t *dma_handle, gfp_t gfp); | 23 | dma_addr_t *dma_handle, gfp_t gfp); |
23 | void (*free_coherent)(struct device *dev, size_t size, | 24 | void (*free_coherent)(struct device *dev, size_t size, |
@@ -56,14 +57,32 @@ struct dma_mapping_ops { | |||
56 | int is_phys; | 57 | int is_phys; |
57 | }; | 58 | }; |
58 | 59 | ||
59 | extern const struct dma_mapping_ops *dma_ops; | 60 | extern struct dma_mapping_ops *dma_ops; |
60 | 61 | ||
61 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 62 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
62 | { | 63 | { |
63 | if (dma_ops->mapping_error) | 64 | #ifdef CONFIG_X86_32 |
64 | return dma_ops->mapping_error(dma_addr); | 65 | return dma_ops; |
66 | #else | ||
67 | if (unlikely(!dev) || !dev->archdata.dma_ops) | ||
68 | return dma_ops; | ||
69 | else | ||
70 | return dev->archdata.dma_ops; | ||
71 | #endif | ||
72 | } | ||
73 | |||
74 | /* Make sure we keep the same behaviour */ | ||
75 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
76 | { | ||
77 | #ifdef CONFIG_X86_32 | ||
78 | return 0; | ||
79 | #else | ||
80 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
81 | if (ops->mapping_error) | ||
82 | return ops->mapping_error(dev, dma_addr); | ||
65 | 83 | ||
66 | return (dma_addr == bad_dma_address); | 84 | return (dma_addr == bad_dma_address); |
85 | #endif | ||
67 | } | 86 | } |
68 | 87 | ||
69 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 88 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
@@ -83,44 +102,53 @@ static inline dma_addr_t | |||
83 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 102 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
84 | int direction) | 103 | int direction) |
85 | { | 104 | { |
105 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
106 | |||
86 | BUG_ON(!valid_dma_direction(direction)); | 107 | BUG_ON(!valid_dma_direction(direction)); |
87 | return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 108 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); |
88 | } | 109 | } |
89 | 110 | ||
90 | static inline void | 111 | static inline void |
91 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 112 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
92 | int direction) | 113 | int direction) |
93 | { | 114 | { |
115 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
116 | |||
94 | BUG_ON(!valid_dma_direction(direction)); | 117 | BUG_ON(!valid_dma_direction(direction)); |
95 | if (dma_ops->unmap_single) | 118 | if (ops->unmap_single) |
96 | dma_ops->unmap_single(dev, addr, size, direction); | 119 | ops->unmap_single(dev, addr, size, direction); |
97 | } | 120 | } |
98 | 121 | ||
99 | static inline int | 122 | static inline int |
100 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 123 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
101 | int nents, int direction) | 124 | int nents, int direction) |
102 | { | 125 | { |
126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
127 | |||
103 | BUG_ON(!valid_dma_direction(direction)); | 128 | BUG_ON(!valid_dma_direction(direction)); |
104 | return dma_ops->map_sg(hwdev, sg, nents, direction); | 129 | return ops->map_sg(hwdev, sg, nents, direction); |
105 | } | 130 | } |
106 | 131 | ||
107 | static inline void | 132 | static inline void |
108 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 133 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
109 | int direction) | 134 | int direction) |
110 | { | 135 | { |
136 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
137 | |||
111 | BUG_ON(!valid_dma_direction(direction)); | 138 | BUG_ON(!valid_dma_direction(direction)); |
112 | if (dma_ops->unmap_sg) | 139 | if (ops->unmap_sg) |
113 | dma_ops->unmap_sg(hwdev, sg, nents, direction); | 140 | ops->unmap_sg(hwdev, sg, nents, direction); |
114 | } | 141 | } |
115 | 142 | ||
116 | static inline void | 143 | static inline void |
117 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 144 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
118 | size_t size, int direction) | 145 | size_t size, int direction) |
119 | { | 146 | { |
147 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
148 | |||
120 | BUG_ON(!valid_dma_direction(direction)); | 149 | BUG_ON(!valid_dma_direction(direction)); |
121 | if (dma_ops->sync_single_for_cpu) | 150 | if (ops->sync_single_for_cpu) |
122 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, | 151 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); |
123 | direction); | ||
124 | flush_write_buffers(); | 152 | flush_write_buffers(); |
125 | } | 153 | } |
126 | 154 | ||
@@ -128,10 +156,11 @@ static inline void | |||
128 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 156 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
129 | size_t size, int direction) | 157 | size_t size, int direction) |
130 | { | 158 | { |
159 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
160 | |||
131 | BUG_ON(!valid_dma_direction(direction)); | 161 | BUG_ON(!valid_dma_direction(direction)); |
132 | if (dma_ops->sync_single_for_device) | 162 | if (ops->sync_single_for_device) |
133 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, | 163 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); |
134 | direction); | ||
135 | flush_write_buffers(); | 164 | flush_write_buffers(); |
136 | } | 165 | } |
137 | 166 | ||
@@ -139,11 +168,12 @@ static inline void | |||
139 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 168 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
140 | unsigned long offset, size_t size, int direction) | 169 | unsigned long offset, size_t size, int direction) |
141 | { | 170 | { |
142 | BUG_ON(!valid_dma_direction(direction)); | 171 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
143 | if (dma_ops->sync_single_range_for_cpu) | ||
144 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
145 | size, direction); | ||
146 | 172 | ||
173 | BUG_ON(!valid_dma_direction(direction)); | ||
174 | if (ops->sync_single_range_for_cpu) | ||
175 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
176 | size, direction); | ||
147 | flush_write_buffers(); | 177 | flush_write_buffers(); |
148 | } | 178 | } |
149 | 179 | ||
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
152 | unsigned long offset, size_t size, | 182 | unsigned long offset, size_t size, |
153 | int direction) | 183 | int direction) |
154 | { | 184 | { |
155 | BUG_ON(!valid_dma_direction(direction)); | 185 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
156 | if (dma_ops->sync_single_range_for_device) | ||
157 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, | ||
158 | offset, size, direction); | ||
159 | 186 | ||
187 | BUG_ON(!valid_dma_direction(direction)); | ||
188 | if (ops->sync_single_range_for_device) | ||
189 | ops->sync_single_range_for_device(hwdev, dma_handle, | ||
190 | offset, size, direction); | ||
160 | flush_write_buffers(); | 191 | flush_write_buffers(); |
161 | } | 192 | } |
162 | 193 | ||
@@ -164,9 +195,11 @@ static inline void | |||
164 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 195 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
165 | int nelems, int direction) | 196 | int nelems, int direction) |
166 | { | 197 | { |
198 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
199 | |||
167 | BUG_ON(!valid_dma_direction(direction)); | 200 | BUG_ON(!valid_dma_direction(direction)); |
168 | if (dma_ops->sync_sg_for_cpu) | 201 | if (ops->sync_sg_for_cpu) |
169 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 202 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); |
170 | flush_write_buffers(); | 203 | flush_write_buffers(); |
171 | } | 204 | } |
172 | 205 | ||
@@ -174,9 +207,11 @@ static inline void | |||
174 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 207 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
175 | int nelems, int direction) | 208 | int nelems, int direction) |
176 | { | 209 | { |
210 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
211 | |||
177 | BUG_ON(!valid_dma_direction(direction)); | 212 | BUG_ON(!valid_dma_direction(direction)); |
178 | if (dma_ops->sync_sg_for_device) | 213 | if (ops->sync_sg_for_device) |
179 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 214 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); |
180 | 215 | ||
181 | flush_write_buffers(); | 216 | flush_write_buffers(); |
182 | } | 217 | } |
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
185 | size_t offset, size_t size, | 220 | size_t offset, size_t size, |
186 | int direction) | 221 | int direction) |
187 | { | 222 | { |
223 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
224 | |||
188 | BUG_ON(!valid_dma_direction(direction)); | 225 | BUG_ON(!valid_dma_direction(direction)); |
189 | return dma_ops->map_single(dev, page_to_phys(page)+offset, | 226 | return ops->map_single(dev, page_to_phys(page) + offset, |
190 | size, direction); | 227 | size, direction); |
191 | } | 228 | } |
192 | 229 | ||
193 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 230 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index c706a7442633..2730b351afcf 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h | |||
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | |||
35 | int nents, int direction); | 35 | int nents, int direction); |
36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | 36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, |
37 | int nents, int direction); | 37 | int nents, int direction); |
38 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); | 38 | extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); |
39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, | 39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, |
40 | void *vaddr, dma_addr_t dma_handle); | 40 | void *vaddr, dma_addr_t dma_handle); |
41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | 41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); |
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h index 3c7d537dd15d..51882ae3db4d 100644 --- a/include/asm-xtensa/dma-mapping.h +++ b/include/asm-xtensa/dma-mapping.h | |||
@@ -139,7 +139,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |||
139 | consistent_sync(sg_virt(sg), sg->length, dir); | 139 | consistent_sync(sg_virt(sg), sg->length, dir); |
140 | } | 140 | } |
141 | static inline int | 141 | static inline int |
142 | dma_mapping_error(dma_addr_t dma_addr) | 142 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
143 | { | 143 | { |
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 7d51cbca49ab..75ae6d8aba4f 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
@@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | |||
758 | } | 758 | } |
759 | 759 | ||
760 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); | 760 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); |
761 | if (!dma_mapping_error(dma_addr)) { | 761 | if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { |
762 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 762 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 |
763 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | 763 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { |
764 | *mptr++ = cpu_to_le32(0x7C020002); | 764 | *mptr++ = cpu_to_le32(0x7C020002); |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 4bf8cade9dbc..e530026eedf7 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) | |||
427 | { | 427 | { |
428 | switch (dev->bus->bustype) { | 428 | switch (dev->bus->bustype) { |
429 | case SSB_BUSTYPE_PCI: | 429 | case SSB_BUSTYPE_PCI: |
430 | return pci_dma_mapping_error(addr); | 430 | return pci_dma_mapping_error(dev->bus->host_pci, addr); |
431 | case SSB_BUSTYPE_SSB: | 431 | case SSB_BUSTYPE_SSB: |
432 | return dma_mapping_error(addr); | 432 | return dma_mapping_error(dev->dev, addr); |
433 | default: | 433 | default: |
434 | __ssb_dma_not_implemented(dev); | 434 | __ssb_dma_not_implemented(dev); |
435 | } | 435 | } |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 90b529f7a154..936e333e7ce5 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -1590,7 +1590,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) | |||
1590 | { | 1590 | { |
1591 | if (dev->dma_ops) | 1591 | if (dev->dma_ops) |
1592 | return dev->dma_ops->mapping_error(dev, dma_addr); | 1592 | return dev->dma_ops->mapping_error(dev, dma_addr); |
1593 | return dma_mapping_error(dma_addr); | 1593 | return dma_mapping_error(dev->dma_device, dma_addr); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | /** | 1596 | /** |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d568894df8cc..977edbdbc1de 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
492 | */ | 492 | */ |
493 | dma_addr_t handle; | 493 | dma_addr_t handle; |
494 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); | 494 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); |
495 | if (swiotlb_dma_mapping_error(handle)) | 495 | if (swiotlb_dma_mapping_error(hwdev, handle)) |
496 | return NULL; | 496 | return NULL; |
497 | 497 | ||
498 | ret = bus_to_virt(handle); | 498 | ret = bus_to_virt(handle); |
@@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
824 | } | 824 | } |
825 | 825 | ||
826 | int | 826 | int |
827 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) | 827 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
828 | { | 828 | { |
829 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); | 829 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); |
830 | } | 830 | } |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index a19b22b452a3..84d328329d98 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -169,7 +169,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
169 | (void *) | 169 | (void *) |
170 | vec->sge[xdr_sge_no].iov_base + sge_off, | 170 | vec->sge[xdr_sge_no].iov_base + sge_off, |
171 | sge_bytes, DMA_TO_DEVICE); | 171 | sge_bytes, DMA_TO_DEVICE); |
172 | if (dma_mapping_error(sge[sge_no].addr)) | 172 | if (dma_mapping_error(xprt->sc_cm_id->device->dma_device, |
173 | sge[sge_no].addr)) | ||
173 | goto err; | 174 | goto err; |
174 | sge_off = 0; | 175 | sge_off = 0; |
175 | sge_no++; | 176 | sge_no++; |