aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c50
1 files changed, 20 insertions, 30 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 8467ec2320f1..37544123896d 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -5,14 +5,13 @@
5 5
6#include <asm/proto.h> 6#include <asm/proto.h>
7#include <asm/dma.h> 7#include <asm/dma.h>
8#include <asm/gart.h> 8#include <asm/iommu.h>
9#include <asm/calgary.h> 9#include <asm/calgary.h>
10#include <asm/amd_iommu.h> 10#include <asm/amd_iommu.h>
11 11
12int forbid_dac __read_mostly; 12static int forbid_dac __read_mostly;
13EXPORT_SYMBOL(forbid_dac);
14 13
15const struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
16EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
17 16
18static int iommu_sac_force __read_mostly; 17static int iommu_sac_force __read_mostly;
@@ -114,21 +113,15 @@ void __init pci_iommu_alloc(void)
114 * The order of these functions is important for 113 * The order of these functions is important for
115 * fall-back/fail-over reasons 114 * fall-back/fail-over reasons
116 */ 115 */
117#ifdef CONFIG_GART_IOMMU
118 gart_iommu_hole_init(); 116 gart_iommu_hole_init();
119#endif
120 117
121#ifdef CONFIG_CALGARY_IOMMU
122 detect_calgary(); 118 detect_calgary();
123#endif
124 119
125 detect_intel_iommu(); 120 detect_intel_iommu();
126 121
127 amd_iommu_detect(); 122 amd_iommu_detect();
128 123
129#ifdef CONFIG_SWIOTLB
130 pci_swiotlb_init(); 124 pci_swiotlb_init();
131#endif
132} 125}
133#endif 126#endif
134 127
@@ -184,9 +177,7 @@ static __init int iommu_setup(char *p)
184 swiotlb = 1; 177 swiotlb = 1;
185#endif 178#endif
186 179
187#ifdef CONFIG_GART_IOMMU
188 gart_parse_options(p); 180 gart_parse_options(p);
189#endif
190 181
191#ifdef CONFIG_CALGARY_IOMMU 182#ifdef CONFIG_CALGARY_IOMMU
192 if (!strncmp(p, "calgary", 7)) 183 if (!strncmp(p, "calgary", 7))
@@ -321,16 +312,17 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr)
321 312
322int dma_supported(struct device *dev, u64 mask) 313int dma_supported(struct device *dev, u64 mask)
323{ 314{
315 struct dma_mapping_ops *ops = get_dma_ops(dev);
316
324#ifdef CONFIG_PCI 317#ifdef CONFIG_PCI
325 if (mask > 0xffffffff && forbid_dac > 0) { 318 if (mask > 0xffffffff && forbid_dac > 0) {
326 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", 319 dev_info(dev, "PCI: Disallowing DAC for device\n");
327 dev->bus_id);
328 return 0; 320 return 0;
329 } 321 }
330#endif 322#endif
331 323
332 if (dma_ops->dma_supported) 324 if (ops->dma_supported)
333 return dma_ops->dma_supported(dev, mask); 325 return ops->dma_supported(dev, mask);
334 326
335 /* Copied from i386. Doesn't make much sense, because it will 327 /* Copied from i386. Doesn't make much sense, because it will
336 only work for pci_alloc_coherent. 328 only work for pci_alloc_coherent.
@@ -351,8 +343,7 @@ int dma_supported(struct device *dev, u64 mask)
351 type. Normally this doesn't make any difference, but gives 343 type. Normally this doesn't make any difference, but gives
352 more gentle handling of IOMMU overflow. */ 344 more gentle handling of IOMMU overflow. */
353 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { 345 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
354 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", 346 dev_info(dev, "Force SAC with mask %Lx\n", mask);
355 dev->bus_id, mask);
356 return 0; 347 return 0;
357 } 348 }
358 349
@@ -378,6 +369,7 @@ void *
378dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 369dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
379 gfp_t gfp) 370 gfp_t gfp)
380{ 371{
372 struct dma_mapping_ops *ops = get_dma_ops(dev);
381 void *memory = NULL; 373 void *memory = NULL;
382 struct page *page; 374 struct page *page;
383 unsigned long dma_mask = 0; 375 unsigned long dma_mask = 0;
@@ -446,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
446 /* Let low level make its own zone decisions */ 438 /* Let low level make its own zone decisions */
447 gfp &= ~(GFP_DMA32|GFP_DMA); 439 gfp &= ~(GFP_DMA32|GFP_DMA);
448 440
449 if (dma_ops->alloc_coherent) 441 if (ops->alloc_coherent)
450 return dma_ops->alloc_coherent(dev, size, 442 return ops->alloc_coherent(dev, size,
451 dma_handle, gfp); 443 dma_handle, gfp);
452 return NULL; 444 return NULL;
453 } 445 }
@@ -459,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
459 } 451 }
460 } 452 }
461 453
462 if (dma_ops->alloc_coherent) { 454 if (ops->alloc_coherent) {
463 free_pages((unsigned long)memory, get_order(size)); 455 free_pages((unsigned long)memory, get_order(size));
464 gfp &= ~(GFP_DMA|GFP_DMA32); 456 gfp &= ~(GFP_DMA|GFP_DMA32);
465 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); 457 return ops->alloc_coherent(dev, size, dma_handle, gfp);
466 } 458 }
467 459
468 if (dma_ops->map_simple) { 460 if (ops->map_simple) {
469 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), 461 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
470 size, 462 size,
471 PCI_DMA_BIDIRECTIONAL); 463 PCI_DMA_BIDIRECTIONAL);
472 if (*dma_handle != bad_dma_address) 464 if (*dma_handle != bad_dma_address)
@@ -488,29 +480,27 @@ EXPORT_SYMBOL(dma_alloc_coherent);
488void dma_free_coherent(struct device *dev, size_t size, 480void dma_free_coherent(struct device *dev, size_t size,
489 void *vaddr, dma_addr_t bus) 481 void *vaddr, dma_addr_t bus)
490{ 482{
483 struct dma_mapping_ops *ops = get_dma_ops(dev);
484
491 int order = get_order(size); 485 int order = get_order(size);
492 WARN_ON(irqs_disabled()); /* for portability */ 486 WARN_ON(irqs_disabled()); /* for portability */
493 if (dma_release_coherent(dev, order, vaddr)) 487 if (dma_release_coherent(dev, order, vaddr))
494 return; 488 return;
495 if (dma_ops->unmap_single) 489 if (ops->unmap_single)
496 dma_ops->unmap_single(dev, bus, size, 0); 490 ops->unmap_single(dev, bus, size, 0);
497 free_pages((unsigned long)vaddr, order); 491 free_pages((unsigned long)vaddr, order);
498} 492}
499EXPORT_SYMBOL(dma_free_coherent); 493EXPORT_SYMBOL(dma_free_coherent);
500 494
501static int __init pci_iommu_init(void) 495static int __init pci_iommu_init(void)
502{ 496{
503#ifdef CONFIG_CALGARY_IOMMU
504 calgary_iommu_init(); 497 calgary_iommu_init();
505#endif
506 498
507 intel_iommu_init(); 499 intel_iommu_init();
508 500
509 amd_iommu_init(); 501 amd_iommu_init();
510 502
511#ifdef CONFIG_GART_IOMMU
512 gart_iommu_init(); 503 gart_iommu_init();
513#endif
514 504
515 no_iommu_init(); 505 no_iommu_init();
516 return 0; 506 return 0;