aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-28 18:07:55 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-28 18:07:55 -0400
commitcb28a1bbdb4790378e7366d6c9ee1d2340b84f92 (patch)
tree316436f77dac75335fd2c3ef5f109e71606c50d3 /arch/x86/kernel/pci-dma.c
parentb6d4f7e3ef25beb8c658c97867d98883e69dc544 (diff)
parentf934fb19ef34730263e6afc01e8ec27a8a71470f (diff)
Merge branch 'linus' into core/generic-dma-coherent
Conflicts: arch/x86/Kconfig Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c50
1 files changed, 20 insertions, 30 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index b7dd70fda031..8dbffb846de9 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -5,14 +5,13 @@
5 5
6#include <asm/proto.h> 6#include <asm/proto.h>
7#include <asm/dma.h> 7#include <asm/dma.h>
8#include <asm/gart.h> 8#include <asm/iommu.h>
9#include <asm/calgary.h> 9#include <asm/calgary.h>
10#include <asm/amd_iommu.h> 10#include <asm/amd_iommu.h>
11 11
12int forbid_dac __read_mostly; 12static int forbid_dac __read_mostly;
13EXPORT_SYMBOL(forbid_dac);
14 13
15const struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
16EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
17 16
18static int iommu_sac_force __read_mostly; 17static int iommu_sac_force __read_mostly;
@@ -114,21 +113,15 @@ void __init pci_iommu_alloc(void)
114 * The order of these functions is important for 113 * The order of these functions is important for
115 * fall-back/fail-over reasons 114 * fall-back/fail-over reasons
116 */ 115 */
117#ifdef CONFIG_GART_IOMMU
118 gart_iommu_hole_init(); 116 gart_iommu_hole_init();
119#endif
120 117
121#ifdef CONFIG_CALGARY_IOMMU
122 detect_calgary(); 118 detect_calgary();
123#endif
124 119
125 detect_intel_iommu(); 120 detect_intel_iommu();
126 121
127 amd_iommu_detect(); 122 amd_iommu_detect();
128 123
129#ifdef CONFIG_SWIOTLB
130 pci_swiotlb_init(); 124 pci_swiotlb_init();
131#endif
132} 125}
133#endif 126#endif
134 127
@@ -184,9 +177,7 @@ static __init int iommu_setup(char *p)
184 swiotlb = 1; 177 swiotlb = 1;
185#endif 178#endif
186 179
187#ifdef CONFIG_GART_IOMMU
188 gart_parse_options(p); 180 gart_parse_options(p);
189#endif
190 181
191#ifdef CONFIG_CALGARY_IOMMU 182#ifdef CONFIG_CALGARY_IOMMU
192 if (!strncmp(p, "calgary", 7)) 183 if (!strncmp(p, "calgary", 7))
@@ -203,16 +194,17 @@ early_param("iommu", iommu_setup);
203 194
204int dma_supported(struct device *dev, u64 mask) 195int dma_supported(struct device *dev, u64 mask)
205{ 196{
197 struct dma_mapping_ops *ops = get_dma_ops(dev);
198
206#ifdef CONFIG_PCI 199#ifdef CONFIG_PCI
207 if (mask > 0xffffffff && forbid_dac > 0) { 200 if (mask > 0xffffffff && forbid_dac > 0) {
208 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", 201 dev_info(dev, "PCI: Disallowing DAC for device\n");
209 dev->bus_id);
210 return 0; 202 return 0;
211 } 203 }
212#endif 204#endif
213 205
214 if (dma_ops->dma_supported) 206 if (ops->dma_supported)
215 return dma_ops->dma_supported(dev, mask); 207 return ops->dma_supported(dev, mask);
216 208
217 /* Copied from i386. Doesn't make much sense, because it will 209 /* Copied from i386. Doesn't make much sense, because it will
218 only work for pci_alloc_coherent. 210 only work for pci_alloc_coherent.
@@ -233,8 +225,7 @@ int dma_supported(struct device *dev, u64 mask)
233 type. Normally this doesn't make any difference, but gives 225 type. Normally this doesn't make any difference, but gives
234 more gentle handling of IOMMU overflow. */ 226 more gentle handling of IOMMU overflow. */
235 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { 227 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
236 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", 228 dev_info(dev, "Force SAC with mask %Lx\n", mask);
237 dev->bus_id, mask);
238 return 0; 229 return 0;
239 } 230 }
240 231
@@ -260,6 +251,7 @@ void *
260dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 251dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
261 gfp_t gfp) 252 gfp_t gfp)
262{ 253{
254 struct dma_mapping_ops *ops = get_dma_ops(dev);
263 void *memory = NULL; 255 void *memory = NULL;
264 struct page *page; 256 struct page *page;
265 unsigned long dma_mask = 0; 257 unsigned long dma_mask = 0;
@@ -328,8 +320,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
328 /* Let low level make its own zone decisions */ 320 /* Let low level make its own zone decisions */
329 gfp &= ~(GFP_DMA32|GFP_DMA); 321 gfp &= ~(GFP_DMA32|GFP_DMA);
330 322
331 if (dma_ops->alloc_coherent) 323 if (ops->alloc_coherent)
332 return dma_ops->alloc_coherent(dev, size, 324 return ops->alloc_coherent(dev, size,
333 dma_handle, gfp); 325 dma_handle, gfp);
334 return NULL; 326 return NULL;
335 } 327 }
@@ -341,14 +333,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
341 } 333 }
342 } 334 }
343 335
344 if (dma_ops->alloc_coherent) { 336 if (ops->alloc_coherent) {
345 free_pages((unsigned long)memory, get_order(size)); 337 free_pages((unsigned long)memory, get_order(size));
346 gfp &= ~(GFP_DMA|GFP_DMA32); 338 gfp &= ~(GFP_DMA|GFP_DMA32);
347 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); 339 return ops->alloc_coherent(dev, size, dma_handle, gfp);
348 } 340 }
349 341
350 if (dma_ops->map_simple) { 342 if (ops->map_simple) {
351 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), 343 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
352 size, 344 size,
353 PCI_DMA_BIDIRECTIONAL); 345 PCI_DMA_BIDIRECTIONAL);
354 if (*dma_handle != bad_dma_address) 346 if (*dma_handle != bad_dma_address)
@@ -370,29 +362,27 @@ EXPORT_SYMBOL(dma_alloc_coherent);
370void dma_free_coherent(struct device *dev, size_t size, 362void dma_free_coherent(struct device *dev, size_t size,
371 void *vaddr, dma_addr_t bus) 363 void *vaddr, dma_addr_t bus)
372{ 364{
365 struct dma_mapping_ops *ops = get_dma_ops(dev);
366
373 int order = get_order(size); 367 int order = get_order(size);
374 WARN_ON(irqs_disabled()); /* for portability */ 368 WARN_ON(irqs_disabled()); /* for portability */
375 if (dma_release_from_coherent(dev, order, vaddr)) 369 if (dma_release_from_coherent(dev, order, vaddr))
376 return; 370 return;
377 if (dma_ops->unmap_single) 371 if (ops->unmap_single)
378 dma_ops->unmap_single(dev, bus, size, 0); 372 ops->unmap_single(dev, bus, size, 0);
379 free_pages((unsigned long)vaddr, order); 373 free_pages((unsigned long)vaddr, order);
380} 374}
381EXPORT_SYMBOL(dma_free_coherent); 375EXPORT_SYMBOL(dma_free_coherent);
382 376
383static int __init pci_iommu_init(void) 377static int __init pci_iommu_init(void)
384{ 378{
385#ifdef CONFIG_CALGARY_IOMMU
386 calgary_iommu_init(); 379 calgary_iommu_init();
387#endif
388 380
389 intel_iommu_init(); 381 intel_iommu_init();
390 382
391 amd_iommu_init(); 383 amd_iommu_init();
392 384
393#ifdef CONFIG_GART_IOMMU
394 gart_iommu_init(); 385 gart_iommu_init();
395#endif
396 386
397 no_iommu_init(); 387 no_iommu_init();
398 return 0; 388 return 0;