aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/pci-dma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-26 13:51:09 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-26 13:51:09 -0400
commit81a07d7588d376c530d006e24d7981304ce96e16 (patch)
tree1608e094c88b9702c86cf2e6f65339aab9ea3f3f /arch/x86_64/kernel/pci-dma.c
parent8871e73fdbde07d0a41393f7ee30787b65387b36 (diff)
parent8501a2fbe762b21d2504ed3aca3b52be61b5e6e4 (diff)
Merge branch 'x86-64'
* x86-64: (83 commits) [PATCH] x86_64: x86_64 stack usage debugging [PATCH] x86_64: (resend) x86_64 stack overflow debugging [PATCH] x86_64: msi_apic.c build fix [PATCH] x86_64: i386/x86-64 Add nmi watchdog support for new Intel CPUs [PATCH] x86_64: Avoid broadcasting NMI IPIs [PATCH] x86_64: fix apic error on bootup [PATCH] x86_64: enlarge window for stack growth [PATCH] x86_64: Minor string functions optimizations [PATCH] x86_64: Move export symbols to their C functions [PATCH] x86_64: Standardize i386/x86_64 handling of NMI_VECTOR [PATCH] x86_64: Fix modular pc speaker [PATCH] x86_64: remove sys32_ni_syscall() [PATCH] x86_64: Do not use -ffunction-sections for modules [PATCH] x86_64: Add cpu_relax to apic_wait_icr_idle [PATCH] x86_64: adjust kstack_depth_to_print default [PATCH] i386/x86-64: adjust /proc/interrupts column headings [PATCH] x86_64: Fix race in cpu_local_* on preemptible kernels [PATCH] x86_64: Fix fast check in safe_smp_processor_id [PATCH] x86_64: x86_64 setup.c - printing cmp related boottime information [PATCH] i386/x86-64/ia64: Move polling flag into thread_info_status ... Manual resolve of trivial conflict in arch/i386/kernel/Makefile
Diffstat (limited to 'arch/x86_64/kernel/pci-dma.c')
-rw-r--r--arch/x86_64/kernel/pci-dma.c55
1 files changed, 48 insertions, 7 deletions
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index a9275c9557cf..9c44f4f2433d 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/proto.h> 11#include <asm/proto.h>
12#include <asm/calgary.h>
12 13
13int iommu_merge __read_mostly = 0; 14int iommu_merge __read_mostly = 0;
14EXPORT_SYMBOL(iommu_merge); 15EXPORT_SYMBOL(iommu_merge);
@@ -33,12 +34,15 @@ int panic_on_overflow __read_mostly = 0;
33int force_iommu __read_mostly= 0; 34int force_iommu __read_mostly= 0;
34#endif 35#endif
35 36
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
36/* Dummy device used for NULL arguments (normally ISA). Better would 40/* Dummy device used for NULL arguments (normally ISA). Better would
37 be probably a smaller DMA mask, but this is bug-to-bug compatible 41 be probably a smaller DMA mask, but this is bug-to-bug compatible
38 to i386. */ 42 to i386. */
39struct device fallback_dev = { 43struct device fallback_dev = {
40 .bus_id = "fallback device", 44 .bus_id = "fallback device",
41 .coherent_dma_mask = 0xffffffff, 45 .coherent_dma_mask = DMA_32BIT_MASK,
42 .dma_mask = &fallback_dev.coherent_dma_mask, 46 .dma_mask = &fallback_dev.coherent_dma_mask,
43}; 47};
44 48
@@ -77,7 +81,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
77 dev = &fallback_dev; 81 dev = &fallback_dev;
78 dma_mask = dev->coherent_dma_mask; 82 dma_mask = dev->coherent_dma_mask;
79 if (dma_mask == 0) 83 if (dma_mask == 0)
80 dma_mask = 0xffffffff; 84 dma_mask = DMA_32BIT_MASK;
81 85
82 /* Don't invoke OOM killer */ 86 /* Don't invoke OOM killer */
83 gfp |= __GFP_NORETRY; 87 gfp |= __GFP_NORETRY;
@@ -90,7 +94,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
90 larger than 16MB and in this case we have a chance of 94 larger than 16MB and in this case we have a chance of
91 finding fitting memory in the next higher zone first. If 95 finding fitting memory in the next higher zone first. If
92 not retry with true GFP_DMA. -AK */ 96 not retry with true GFP_DMA. -AK */
93 if (dma_mask <= 0xffffffff) 97 if (dma_mask <= DMA_32BIT_MASK)
94 gfp |= GFP_DMA32; 98 gfp |= GFP_DMA32;
95 99
96 again: 100 again:
@@ -111,7 +115,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
111 115
112 /* Don't use the 16MB ZONE_DMA unless absolutely 116 /* Don't use the 16MB ZONE_DMA unless absolutely
113 needed. It's better to use remapping first. */ 117 needed. It's better to use remapping first. */
114 if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) { 118 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
115 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 119 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
116 goto again; 120 goto again;
117 } 121 }
@@ -174,7 +178,7 @@ int dma_supported(struct device *dev, u64 mask)
174 /* Copied from i386. Doesn't make much sense, because it will 178 /* Copied from i386. Doesn't make much sense, because it will
175 only work for pci_alloc_coherent. 179 only work for pci_alloc_coherent.
176 The caller just has to use GFP_DMA in this case. */ 180 The caller just has to use GFP_DMA in this case. */
177 if (mask < 0x00ffffff) 181 if (mask < DMA_24BIT_MASK)
178 return 0; 182 return 0;
179 183
180 /* Tell the device to use SAC when IOMMU force is on. This 184 /* Tell the device to use SAC when IOMMU force is on. This
@@ -189,7 +193,7 @@ int dma_supported(struct device *dev, u64 mask)
189 SAC for these. Assume all masks <= 40 bits are of this 193 SAC for these. Assume all masks <= 40 bits are of this
190 type. Normally this doesn't make any difference, but gives 194 type. Normally this doesn't make any difference, but gives
191 more gentle handling of IOMMU overflow. */ 195 more gentle handling of IOMMU overflow. */
192 if (iommu_sac_force && (mask >= 0xffffffffffULL)) { 196 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
193 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask); 197 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
194 return 0; 198 return 0;
195 } 199 }
@@ -266,7 +270,7 @@ __init int iommu_setup(char *p)
266 swiotlb = 1; 270 swiotlb = 1;
267#endif 271#endif
268 272
269#ifdef CONFIG_GART_IOMMU 273#ifdef CONFIG_IOMMU
270 gart_parse_options(p); 274 gart_parse_options(p);
271#endif 275#endif
272 276
@@ -276,3 +280,40 @@ __init int iommu_setup(char *p)
276 } 280 }
277 return 1; 281 return 1;
278} 282}
283__setup("iommu=", iommu_setup);
284
285void __init pci_iommu_alloc(void)
286{
287 /*
288 * The order of these functions is important for
289 * fall-back/fail-over reasons
290 */
291#ifdef CONFIG_IOMMU
292 iommu_hole_init();
293#endif
294
295#ifdef CONFIG_CALGARY_IOMMU
296 detect_calgary();
297#endif
298
299#ifdef CONFIG_SWIOTLB
300 pci_swiotlb_init();
301#endif
302}
303
304static int __init pci_iommu_init(void)
305{
306#ifdef CONFIG_CALGARY_IOMMU
307 calgary_iommu_init();
308#endif
309
310#ifdef CONFIG_IOMMU
311 gart_iommu_init();
312#endif
313
314 no_iommu_init();
315 return 0;
316}
317
318/* Must execute after PCI subsystem */
319fs_initcall(pci_iommu_init);