diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/common/dmabounce.c | 2 | ||||
-rw-r--r-- | arch/ia64/hp/common/hwsw_iommu.c | 5 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pci_dma.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/celleb_scc_pciex.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spider-pci.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/mf.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/pci-calgary_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 27 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/pci-nommu.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/pci-swiotlb_64.c | 2 |
13 files changed, 30 insertions, 37 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index dd2947342604..69130f365904 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -280,7 +280,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
280 | /* | 280 | /* |
281 | * Trying to unmap an invalid mapping | 281 | * Trying to unmap an invalid mapping |
282 | */ | 282 | */ |
283 | if (dma_mapping_error(dma_addr)) { | 283 | if (dma_mapping_error(dev, dma_addr)) { |
284 | dev_err(dev, "Trying to unmap invalid mapping\n"); | 284 | dev_err(dev, "Trying to unmap invalid mapping\n"); |
285 | return; | 285 | return; |
286 | } | 286 | } |
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 1c44ec2a1d58..88b6e6f3fd88 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -186,9 +186,10 @@ hwsw_dma_supported (struct device *dev, u64 mask) | |||
186 | } | 186 | } |
187 | 187 | ||
188 | int | 188 | int |
189 | hwsw_dma_mapping_error (dma_addr_t dma_addr) | 189 | hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
190 | { | 190 | { |
191 | return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr); | 191 | return hwiommu_dma_mapping_error(dev, dma_addr) || |
192 | swiotlb_dma_mapping_error(dev, dma_addr); | ||
192 | } | 193 | } |
193 | 194 | ||
194 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | 195 | EXPORT_SYMBOL(hwsw_dma_mapping_error); |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 34421aed1e2a..4956be40d7b5 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -2147,7 +2147,7 @@ sba_dma_supported (struct device *dev, u64 mask) | |||
2147 | } | 2147 | } |
2148 | 2148 | ||
2149 | int | 2149 | int |
2150 | sba_dma_mapping_error (dma_addr_t dma_addr) | 2150 | sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
2151 | { | 2151 | { |
2152 | return 0; | 2152 | return 0; |
2153 | } | 2153 | } |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 52175af299a0..53ebb6484495 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -350,7 +350,7 @@ void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
350 | } | 350 | } |
351 | EXPORT_SYMBOL(sn_dma_sync_sg_for_device); | 351 | EXPORT_SYMBOL(sn_dma_sync_sg_for_device); |
352 | 352 | ||
353 | int sn_dma_mapping_error(dma_addr_t dma_addr) | 353 | int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
354 | { | 354 | { |
355 | return 0; | 355 | return 0; |
356 | } | 356 | } |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index ae39dd88b9aa..891312f8e5a6 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -348,7 +348,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele | |||
348 | 348 | ||
349 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 349 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
350 | 350 | ||
351 | int dma_mapping_error(dma_addr_t dma_addr) | 351 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
352 | { | 352 | { |
353 | return 0; | 353 | return 0; |
354 | } | 354 | } |
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c index 0e04f8fb152a..3e7e0f1568ef 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c +++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c | |||
@@ -281,7 +281,7 @@ static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data) | |||
281 | 281 | ||
282 | dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va, | 282 | dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va, |
283 | PAGE_SIZE, DMA_FROM_DEVICE); | 283 | PAGE_SIZE, DMA_FROM_DEVICE); |
284 | if (dma_mapping_error(dummy_page_da)) { | 284 | if (dma_mapping_error(bus->phb->parent, dummy_page_da)) { |
285 | pr_err("PCIEX:Map dummy page failed.\n"); | 285 | pr_err("PCIEX:Map dummy page failed.\n"); |
286 | kfree(dummy_page_va); | 286 | kfree(dummy_page_va); |
287 | return -1; | 287 | return -1; |
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c index 418b605ac35a..5122ec145271 100644 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ b/arch/powerpc/platforms/cell/spider-pci.c | |||
@@ -111,7 +111,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, | |||
111 | 111 | ||
112 | dummy_page_da = dma_map_single(phb->parent, dummy_page_va, | 112 | dummy_page_da = dma_map_single(phb->parent, dummy_page_va, |
113 | PAGE_SIZE, DMA_FROM_DEVICE); | 113 | PAGE_SIZE, DMA_FROM_DEVICE); |
114 | if (dma_mapping_error(dummy_page_da)) { | 114 | if (dma_mapping_error(phb->parent, dummy_page_da)) { |
115 | pr_err("SPIDER-IOWA:Map dummy page filed.\n"); | 115 | pr_err("SPIDER-IOWA:Map dummy page filed.\n"); |
116 | kfree(dummy_page_va); | 116 | kfree(dummy_page_va); |
117 | return -1; | 117 | return -1; |
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index 1dc7295746da..731d7b157749 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c | |||
@@ -871,7 +871,7 @@ static int proc_mf_dump_cmdline(char *page, char **start, off_t off, | |||
871 | count = 256 - off; | 871 | count = 256 - off; |
872 | 872 | ||
873 | dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE); | 873 | dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE); |
874 | if (dma_mapping_error(dma_addr)) | 874 | if (dma_mapping_error(NULL, dma_addr)) |
875 | return -ENOMEM; | 875 | return -ENOMEM; |
876 | memset(page, 0, off + count); | 876 | memset(page, 0, off + count); |
877 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | 877 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 19e7fc7c2c4f..1eb86be93d7a 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -544,7 +544,7 @@ error: | |||
544 | return ret; | 544 | return ret; |
545 | } | 545 | } |
546 | 546 | ||
547 | static const struct dma_mapping_ops calgary_dma_ops = { | 547 | static struct dma_mapping_ops calgary_dma_ops = { |
548 | .alloc_coherent = calgary_alloc_coherent, | 548 | .alloc_coherent = calgary_alloc_coherent, |
549 | .map_single = calgary_map_single, | 549 | .map_single = calgary_map_single, |
550 | .unmap_single = calgary_unmap_single, | 550 | .unmap_single = calgary_unmap_single, |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index cbecb05551bb..37544123896d 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | static int forbid_dac __read_mostly; | 12 | static int forbid_dac __read_mostly; |
13 | 13 | ||
14 | const struct dma_mapping_ops *dma_ops; | 14 | struct dma_mapping_ops *dma_ops; |
15 | EXPORT_SYMBOL(dma_ops); | 15 | EXPORT_SYMBOL(dma_ops); |
16 | 16 | ||
17 | static int iommu_sac_force __read_mostly; | 17 | static int iommu_sac_force __read_mostly; |
@@ -312,6 +312,8 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr) | |||
312 | 312 | ||
313 | int dma_supported(struct device *dev, u64 mask) | 313 | int dma_supported(struct device *dev, u64 mask) |
314 | { | 314 | { |
315 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
316 | |||
315 | #ifdef CONFIG_PCI | 317 | #ifdef CONFIG_PCI |
316 | if (mask > 0xffffffff && forbid_dac > 0) { | 318 | if (mask > 0xffffffff && forbid_dac > 0) { |
317 | dev_info(dev, "PCI: Disallowing DAC for device\n"); | 319 | dev_info(dev, "PCI: Disallowing DAC for device\n"); |
@@ -319,8 +321,8 @@ int dma_supported(struct device *dev, u64 mask) | |||
319 | } | 321 | } |
320 | #endif | 322 | #endif |
321 | 323 | ||
322 | if (dma_ops->dma_supported) | 324 | if (ops->dma_supported) |
323 | return dma_ops->dma_supported(dev, mask); | 325 | return ops->dma_supported(dev, mask); |
324 | 326 | ||
325 | /* Copied from i386. Doesn't make much sense, because it will | 327 | /* Copied from i386. Doesn't make much sense, because it will |
326 | only work for pci_alloc_coherent. | 328 | only work for pci_alloc_coherent. |
@@ -367,6 +369,7 @@ void * | |||
367 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 369 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
368 | gfp_t gfp) | 370 | gfp_t gfp) |
369 | { | 371 | { |
372 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
370 | void *memory = NULL; | 373 | void *memory = NULL; |
371 | struct page *page; | 374 | struct page *page; |
372 | unsigned long dma_mask = 0; | 375 | unsigned long dma_mask = 0; |
@@ -435,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
435 | /* Let low level make its own zone decisions */ | 438 | /* Let low level make its own zone decisions */ |
436 | gfp &= ~(GFP_DMA32|GFP_DMA); | 439 | gfp &= ~(GFP_DMA32|GFP_DMA); |
437 | 440 | ||
438 | if (dma_ops->alloc_coherent) | 441 | if (ops->alloc_coherent) |
439 | return dma_ops->alloc_coherent(dev, size, | 442 | return ops->alloc_coherent(dev, size, |
440 | dma_handle, gfp); | 443 | dma_handle, gfp); |
441 | return NULL; | 444 | return NULL; |
442 | } | 445 | } |
@@ -448,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
448 | } | 451 | } |
449 | } | 452 | } |
450 | 453 | ||
451 | if (dma_ops->alloc_coherent) { | 454 | if (ops->alloc_coherent) { |
452 | free_pages((unsigned long)memory, get_order(size)); | 455 | free_pages((unsigned long)memory, get_order(size)); |
453 | gfp &= ~(GFP_DMA|GFP_DMA32); | 456 | gfp &= ~(GFP_DMA|GFP_DMA32); |
454 | return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | 457 | return ops->alloc_coherent(dev, size, dma_handle, gfp); |
455 | } | 458 | } |
456 | 459 | ||
457 | if (dma_ops->map_simple) { | 460 | if (ops->map_simple) { |
458 | *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), | 461 | *dma_handle = ops->map_simple(dev, virt_to_phys(memory), |
459 | size, | 462 | size, |
460 | PCI_DMA_BIDIRECTIONAL); | 463 | PCI_DMA_BIDIRECTIONAL); |
461 | if (*dma_handle != bad_dma_address) | 464 | if (*dma_handle != bad_dma_address) |
@@ -477,12 +480,14 @@ EXPORT_SYMBOL(dma_alloc_coherent); | |||
477 | void dma_free_coherent(struct device *dev, size_t size, | 480 | void dma_free_coherent(struct device *dev, size_t size, |
478 | void *vaddr, dma_addr_t bus) | 481 | void *vaddr, dma_addr_t bus) |
479 | { | 482 | { |
483 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
484 | |||
480 | int order = get_order(size); | 485 | int order = get_order(size); |
481 | WARN_ON(irqs_disabled()); /* for portability */ | 486 | WARN_ON(irqs_disabled()); /* for portability */ |
482 | if (dma_release_coherent(dev, order, vaddr)) | 487 | if (dma_release_coherent(dev, order, vaddr)) |
483 | return; | 488 | return; |
484 | if (dma_ops->unmap_single) | 489 | if (ops->unmap_single) |
485 | dma_ops->unmap_single(dev, bus, size, 0); | 490 | ops->unmap_single(dev, bus, size, 0); |
486 | free_pages((unsigned long)vaddr, order); | 491 | free_pages((unsigned long)vaddr, order); |
487 | } | 492 | } |
488 | EXPORT_SYMBOL(dma_free_coherent); | 493 | EXPORT_SYMBOL(dma_free_coherent); |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index df5f142657d2..744126e64950 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -692,8 +692,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
692 | 692 | ||
693 | extern int agp_amd64_init(void); | 693 | extern int agp_amd64_init(void); |
694 | 694 | ||
695 | static const struct dma_mapping_ops gart_dma_ops = { | 695 | static struct dma_mapping_ops gart_dma_ops = { |
696 | .mapping_error = NULL, | ||
697 | .map_single = gart_map_single, | 696 | .map_single = gart_map_single, |
698 | .map_simple = gart_map_simple, | 697 | .map_simple = gart_map_simple, |
699 | .unmap_single = gart_unmap_single, | 698 | .unmap_single = gart_unmap_single, |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 792b9179eff3..3f91f71cdc3e 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | |||
72 | return nents; | 72 | return nents; |
73 | } | 73 | } |
74 | 74 | ||
75 | /* Make sure we keep the same behaviour */ | 75 | struct dma_mapping_ops nommu_dma_ops = { |
76 | static int nommu_mapping_error(dma_addr_t dma_addr) | ||
77 | { | ||
78 | #ifdef CONFIG_X86_32 | ||
79 | return 0; | ||
80 | #else | ||
81 | return (dma_addr == bad_dma_address); | ||
82 | #endif | ||
83 | } | ||
84 | |||
85 | |||
86 | const struct dma_mapping_ops nommu_dma_ops = { | ||
87 | .map_single = nommu_map_single, | 76 | .map_single = nommu_map_single, |
88 | .map_sg = nommu_map_sg, | 77 | .map_sg = nommu_map_sg, |
89 | .mapping_error = nommu_mapping_error, | ||
90 | .is_phys = 1, | 78 | .is_phys = 1, |
91 | }; | 79 | }; |
92 | 80 | ||
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 20df839b9c20..c4ce0332759e 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c | |||
@@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, | |||
18 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); | 18 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); |
19 | } | 19 | } |
20 | 20 | ||
21 | const struct dma_mapping_ops swiotlb_dma_ops = { | 21 | struct dma_mapping_ops swiotlb_dma_ops = { |
22 | .mapping_error = swiotlb_dma_mapping_error, | 22 | .mapping_error = swiotlb_dma_mapping_error, |
23 | .alloc_coherent = swiotlb_alloc_coherent, | 23 | .alloc_coherent = swiotlb_alloc_coherent, |
24 | .free_coherent = swiotlb_free_coherent, | 24 | .free_coherent = swiotlb_free_coherent, |