aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-07-25 22:44:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-26 15:00:03 -0400
commit8d8bb39b9eba32dd70e87fd5ad5c5dd4ba118e06 (patch)
tree64090a84f4c4466f9f30ff46c993e0cede379052 /arch/x86
parentc485b465a031b6f9b9a51300e0ee1f86efc6db87 (diff)
dma-mapping: add the device argument to dma_mapping_error()
Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER architecture does: This enables us to cleanly fix the Calgary IOMMU issue that some devices are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423). I think that per-device dma_mapping_ops support would be also helpful for KVM people to support PCI passthrough but Andi thinks that this makes it difficult to support the PCI passthrough (see the above thread). So I CC'ed this to KVM camp. Comments are appreciated. A pointer to dma_mapping_ops to struct dev_archdata is added. If the pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's NULL, the system-wide dma_ops pointer is used as before. If it's useful for KVM people, I plan to implement a mechanism to register a hook called when a new pci (or dma capable) device is created (it works with hot plugging). It enables IOMMUs to set up an appropriate dma_mapping_ops per device. The major obstacle is that dma_mapping_error doesn't take a pointer to the device unlike other DMA operations. So x86 can't have dma_mapping_ops per device. Note all the POWER IOMMUs use the same dma_mapping_error function so this is not a problem for POWER but x86 IOMMUs use different dma_mapping_error functions. The first patch adds the device argument to dma_mapping_error. The patch is trivial but large since it touches lots of drivers and dma-mapping.h in all the architecture. This patch: dma_mapping_error() doesn't take a pointer to the device unlike other DMA operations. So we can't have dma_mapping_ops per device. Note that POWER already has dma_mapping_ops per device but all the POWER IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device argument. [akpm@linux-foundation.org: fix sge] [akpm@linux-foundation.org: fix svc_rdma] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix bnx2x] [akpm@linux-foundation.org: fix s2io] [akpm@linux-foundation.org: fix pasemi_mac] [akpm@linux-foundation.org: fix sdhci] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc] [akpm@linux-foundation.org: fix ibmvscsi] Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Muli Ben-Yehuda <muli@il.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/pci-dma.c27
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kernel/pci-nommu.c14
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c2
5 files changed, 20 insertions, 28 deletions
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 19e7fc7c2c4f..1eb86be93d7a 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -544,7 +544,7 @@ error:
544 return ret; 544 return ret;
545} 545}
546 546
547static const struct dma_mapping_ops calgary_dma_ops = { 547static struct dma_mapping_ops calgary_dma_ops = {
548 .alloc_coherent = calgary_alloc_coherent, 548 .alloc_coherent = calgary_alloc_coherent,
549 .map_single = calgary_map_single, 549 .map_single = calgary_map_single,
550 .unmap_single = calgary_unmap_single, 550 .unmap_single = calgary_unmap_single,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index cbecb05551bb..37544123896d 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,7 +11,7 @@
11 11
12static int forbid_dac __read_mostly; 12static int forbid_dac __read_mostly;
13 13
14const struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
16 16
17static int iommu_sac_force __read_mostly; 17static int iommu_sac_force __read_mostly;
@@ -312,6 +312,8 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr)
312 312
313int dma_supported(struct device *dev, u64 mask) 313int dma_supported(struct device *dev, u64 mask)
314{ 314{
315 struct dma_mapping_ops *ops = get_dma_ops(dev);
316
315#ifdef CONFIG_PCI 317#ifdef CONFIG_PCI
316 if (mask > 0xffffffff && forbid_dac > 0) { 318 if (mask > 0xffffffff && forbid_dac > 0) {
317 dev_info(dev, "PCI: Disallowing DAC for device\n"); 319 dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -319,8 +321,8 @@ int dma_supported(struct device *dev, u64 mask)
319 } 321 }
320#endif 322#endif
321 323
322 if (dma_ops->dma_supported) 324 if (ops->dma_supported)
323 return dma_ops->dma_supported(dev, mask); 325 return ops->dma_supported(dev, mask);
324 326
325 /* Copied from i386. Doesn't make much sense, because it will 327 /* Copied from i386. Doesn't make much sense, because it will
326 only work for pci_alloc_coherent. 328 only work for pci_alloc_coherent.
@@ -367,6 +369,7 @@ void *
367dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 369dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
368 gfp_t gfp) 370 gfp_t gfp)
369{ 371{
372 struct dma_mapping_ops *ops = get_dma_ops(dev);
370 void *memory = NULL; 373 void *memory = NULL;
371 struct page *page; 374 struct page *page;
372 unsigned long dma_mask = 0; 375 unsigned long dma_mask = 0;
@@ -435,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
435 /* Let low level make its own zone decisions */ 438 /* Let low level make its own zone decisions */
436 gfp &= ~(GFP_DMA32|GFP_DMA); 439 gfp &= ~(GFP_DMA32|GFP_DMA);
437 440
438 if (dma_ops->alloc_coherent) 441 if (ops->alloc_coherent)
439 return dma_ops->alloc_coherent(dev, size, 442 return ops->alloc_coherent(dev, size,
440 dma_handle, gfp); 443 dma_handle, gfp);
441 return NULL; 444 return NULL;
442 } 445 }
@@ -448,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
448 } 451 }
449 } 452 }
450 453
451 if (dma_ops->alloc_coherent) { 454 if (ops->alloc_coherent) {
452 free_pages((unsigned long)memory, get_order(size)); 455 free_pages((unsigned long)memory, get_order(size));
453 gfp &= ~(GFP_DMA|GFP_DMA32); 456 gfp &= ~(GFP_DMA|GFP_DMA32);
454 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); 457 return ops->alloc_coherent(dev, size, dma_handle, gfp);
455 } 458 }
456 459
457 if (dma_ops->map_simple) { 460 if (ops->map_simple) {
458 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), 461 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
459 size, 462 size,
460 PCI_DMA_BIDIRECTIONAL); 463 PCI_DMA_BIDIRECTIONAL);
461 if (*dma_handle != bad_dma_address) 464 if (*dma_handle != bad_dma_address)
@@ -477,12 +480,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
477void dma_free_coherent(struct device *dev, size_t size, 480void dma_free_coherent(struct device *dev, size_t size,
478 void *vaddr, dma_addr_t bus) 481 void *vaddr, dma_addr_t bus)
479{ 482{
483 struct dma_mapping_ops *ops = get_dma_ops(dev);
484
480 int order = get_order(size); 485 int order = get_order(size);
481 WARN_ON(irqs_disabled()); /* for portability */ 486 WARN_ON(irqs_disabled()); /* for portability */
482 if (dma_release_coherent(dev, order, vaddr)) 487 if (dma_release_coherent(dev, order, vaddr))
483 return; 488 return;
484 if (dma_ops->unmap_single) 489 if (ops->unmap_single)
485 dma_ops->unmap_single(dev, bus, size, 0); 490 ops->unmap_single(dev, bus, size, 0);
486 free_pages((unsigned long)vaddr, order); 491 free_pages((unsigned long)vaddr, order);
487} 492}
488EXPORT_SYMBOL(dma_free_coherent); 493EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index df5f142657d2..744126e64950 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -692,8 +692,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
692 692
693extern int agp_amd64_init(void); 693extern int agp_amd64_init(void);
694 694
695static const struct dma_mapping_ops gart_dma_ops = { 695static struct dma_mapping_ops gart_dma_ops = {
696 .mapping_error = NULL,
697 .map_single = gart_map_single, 696 .map_single = gart_map_single,
698 .map_simple = gart_map_simple, 697 .map_simple = gart_map_simple,
699 .unmap_single = gart_unmap_single, 698 .unmap_single = gart_unmap_single,
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 792b9179eff3..3f91f71cdc3e 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
72 return nents; 72 return nents;
73} 73}
74 74
75/* Make sure we keep the same behaviour */ 75struct dma_mapping_ops nommu_dma_ops = {
76static int nommu_mapping_error(dma_addr_t dma_addr)
77{
78#ifdef CONFIG_X86_32
79 return 0;
80#else
81 return (dma_addr == bad_dma_address);
82#endif
83}
84
85
86const struct dma_mapping_ops nommu_dma_ops = {
87 .map_single = nommu_map_single, 76 .map_single = nommu_map_single,
88 .map_sg = nommu_map_sg, 77 .map_sg = nommu_map_sg,
89 .mapping_error = nommu_mapping_error,
90 .is_phys = 1, 78 .is_phys = 1,
91}; 79};
92 80
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 20df839b9c20..c4ce0332759e 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); 18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19} 19}
20 20
21const struct dma_mapping_ops swiotlb_dma_ops = { 21struct dma_mapping_ops swiotlb_dma_ops = {
22 .mapping_error = swiotlb_dma_mapping_error, 22 .mapping_error = swiotlb_dma_mapping_error,
23 .alloc_coherent = swiotlb_alloc_coherent, 23 .alloc_coherent = swiotlb_alloc_coherent,
24 .free_coherent = swiotlb_free_coherent, 24 .free_coherent = swiotlb_free_coherent,