diff options
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 524 |
1 files changed, 524 insertions, 0 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c new file mode 100644 index 000000000000..388b113a7d88 --- /dev/null +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -0,0 +1,524 @@ | |||
1 | #include <linux/dma-mapping.h> | ||
2 | #include <linux/dmar.h> | ||
3 | #include <linux/bootmem.h> | ||
4 | #include <linux/pci.h> | ||
5 | |||
6 | #include <asm/proto.h> | ||
7 | #include <asm/dma.h> | ||
8 | #include <asm/gart.h> | ||
9 | #include <asm/calgary.h> | ||
10 | |||
11 | int forbid_dac __read_mostly; | ||
12 | EXPORT_SYMBOL(forbid_dac); | ||
13 | |||
14 | const struct dma_mapping_ops *dma_ops; | ||
15 | EXPORT_SYMBOL(dma_ops); | ||
16 | |||
17 | int iommu_sac_force __read_mostly = 0; | ||
18 | |||
19 | #ifdef CONFIG_IOMMU_DEBUG | ||
20 | int panic_on_overflow __read_mostly = 1; | ||
21 | int force_iommu __read_mostly = 1; | ||
22 | #else | ||
23 | int panic_on_overflow __read_mostly = 0; | ||
24 | int force_iommu __read_mostly = 0; | ||
25 | #endif | ||
26 | |||
27 | int iommu_merge __read_mostly = 0; | ||
28 | |||
29 | int no_iommu __read_mostly; | ||
30 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
31 | int iommu_detected __read_mostly = 0; | ||
32 | |||
33 | /* This tells the BIO block layer to assume merging. Default to off | ||
34 | because we cannot guarantee merging later. */ | ||
35 | int iommu_bio_merge __read_mostly = 0; | ||
36 | EXPORT_SYMBOL(iommu_bio_merge); | ||
37 | |||
38 | dma_addr_t bad_dma_address __read_mostly = 0; | ||
39 | EXPORT_SYMBOL(bad_dma_address); | ||
40 | |||
41 | /* Dummy device used for NULL arguments (normally ISA). Better would | ||
42 | be probably a smaller DMA mask, but this is bug-to-bug compatible | ||
43 | to older i386. */ | ||
44 | struct device fallback_dev = { | ||
45 | .bus_id = "fallback device", | ||
46 | .coherent_dma_mask = DMA_32BIT_MASK, | ||
47 | .dma_mask = &fallback_dev.coherent_dma_mask, | ||
48 | }; | ||
49 | |||
50 | int dma_set_mask(struct device *dev, u64 mask) | ||
51 | { | ||
52 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
53 | return -EIO; | ||
54 | |||
55 | *dev->dma_mask = mask; | ||
56 | |||
57 | return 0; | ||
58 | } | ||
59 | EXPORT_SYMBOL(dma_set_mask); | ||
60 | |||
61 | #ifdef CONFIG_X86_64 | ||
62 | static __initdata void *dma32_bootmem_ptr; | ||
63 | static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); | ||
64 | |||
65 | static int __init parse_dma32_size_opt(char *p) | ||
66 | { | ||
67 | if (!p) | ||
68 | return -EINVAL; | ||
69 | dma32_bootmem_size = memparse(p, &p); | ||
70 | return 0; | ||
71 | } | ||
72 | early_param("dma32_size", parse_dma32_size_opt); | ||
73 | |||
74 | void __init dma32_reserve_bootmem(void) | ||
75 | { | ||
76 | unsigned long size, align; | ||
77 | if (end_pfn <= MAX_DMA32_PFN) | ||
78 | return; | ||
79 | |||
80 | align = 64ULL<<20; | ||
81 | size = round_up(dma32_bootmem_size, align); | ||
82 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | ||
83 | __pa(MAX_DMA_ADDRESS)); | ||
84 | if (dma32_bootmem_ptr) | ||
85 | dma32_bootmem_size = size; | ||
86 | else | ||
87 | dma32_bootmem_size = 0; | ||
88 | } | ||
89 | static void __init dma32_free_bootmem(void) | ||
90 | { | ||
91 | int node; | ||
92 | |||
93 | if (end_pfn <= MAX_DMA32_PFN) | ||
94 | return; | ||
95 | |||
96 | if (!dma32_bootmem_ptr) | ||
97 | return; | ||
98 | |||
99 | for_each_online_node(node) | ||
100 | free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr), | ||
101 | dma32_bootmem_size); | ||
102 | |||
103 | dma32_bootmem_ptr = NULL; | ||
104 | dma32_bootmem_size = 0; | ||
105 | } | ||
106 | |||
107 | void __init pci_iommu_alloc(void) | ||
108 | { | ||
109 | /* free the range so iommu could get some range less than 4G */ | ||
110 | dma32_free_bootmem(); | ||
111 | /* | ||
112 | * The order of these functions is important for | ||
113 | * fall-back/fail-over reasons | ||
114 | */ | ||
115 | #ifdef CONFIG_GART_IOMMU | ||
116 | gart_iommu_hole_init(); | ||
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_CALGARY_IOMMU | ||
120 | detect_calgary(); | ||
121 | #endif | ||
122 | |||
123 | detect_intel_iommu(); | ||
124 | |||
125 | #ifdef CONFIG_SWIOTLB | ||
126 | pci_swiotlb_init(); | ||
127 | #endif | ||
128 | } | ||
129 | #endif | ||
130 | |||
131 | /* | ||
132 | * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter | ||
133 | * documentation. | ||
134 | */ | ||
135 | static __init int iommu_setup(char *p) | ||
136 | { | ||
137 | iommu_merge = 1; | ||
138 | |||
139 | if (!p) | ||
140 | return -EINVAL; | ||
141 | |||
142 | while (*p) { | ||
143 | if (!strncmp(p, "off", 3)) | ||
144 | no_iommu = 1; | ||
145 | /* gart_parse_options has more force support */ | ||
146 | if (!strncmp(p, "force", 5)) | ||
147 | force_iommu = 1; | ||
148 | if (!strncmp(p, "noforce", 7)) { | ||
149 | iommu_merge = 0; | ||
150 | force_iommu = 0; | ||
151 | } | ||
152 | |||
153 | if (!strncmp(p, "biomerge", 8)) { | ||
154 | iommu_bio_merge = 4096; | ||
155 | iommu_merge = 1; | ||
156 | force_iommu = 1; | ||
157 | } | ||
158 | if (!strncmp(p, "panic", 5)) | ||
159 | panic_on_overflow = 1; | ||
160 | if (!strncmp(p, "nopanic", 7)) | ||
161 | panic_on_overflow = 0; | ||
162 | if (!strncmp(p, "merge", 5)) { | ||
163 | iommu_merge = 1; | ||
164 | force_iommu = 1; | ||
165 | } | ||
166 | if (!strncmp(p, "nomerge", 7)) | ||
167 | iommu_merge = 0; | ||
168 | if (!strncmp(p, "forcesac", 8)) | ||
169 | iommu_sac_force = 1; | ||
170 | if (!strncmp(p, "allowdac", 8)) | ||
171 | forbid_dac = 0; | ||
172 | if (!strncmp(p, "nodac", 5)) | ||
173 | forbid_dac = -1; | ||
174 | if (!strncmp(p, "usedac", 6)) { | ||
175 | forbid_dac = -1; | ||
176 | return 1; | ||
177 | } | ||
178 | #ifdef CONFIG_SWIOTLB | ||
179 | if (!strncmp(p, "soft", 4)) | ||
180 | swiotlb = 1; | ||
181 | #endif | ||
182 | |||
183 | #ifdef CONFIG_GART_IOMMU | ||
184 | gart_parse_options(p); | ||
185 | #endif | ||
186 | |||
187 | #ifdef CONFIG_CALGARY_IOMMU | ||
188 | if (!strncmp(p, "calgary", 7)) | ||
189 | use_calgary = 1; | ||
190 | #endif /* CONFIG_CALGARY_IOMMU */ | ||
191 | |||
192 | p += strcspn(p, ","); | ||
193 | if (*p == ',') | ||
194 | ++p; | ||
195 | } | ||
196 | return 0; | ||
197 | } | ||
198 | early_param("iommu", iommu_setup); | ||
199 | |||
200 | #ifdef CONFIG_X86_32 | ||
201 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
202 | dma_addr_t device_addr, size_t size, int flags) | ||
203 | { | ||
204 | void __iomem *mem_base = NULL; | ||
205 | int pages = size >> PAGE_SHIFT; | ||
206 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
207 | |||
208 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
209 | goto out; | ||
210 | if (!size) | ||
211 | goto out; | ||
212 | if (dev->dma_mem) | ||
213 | goto out; | ||
214 | |||
215 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
216 | |||
217 | mem_base = ioremap(bus_addr, size); | ||
218 | if (!mem_base) | ||
219 | goto out; | ||
220 | |||
221 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
222 | if (!dev->dma_mem) | ||
223 | goto out; | ||
224 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
225 | if (!dev->dma_mem->bitmap) | ||
226 | goto free1_out; | ||
227 | |||
228 | dev->dma_mem->virt_base = mem_base; | ||
229 | dev->dma_mem->device_base = device_addr; | ||
230 | dev->dma_mem->size = pages; | ||
231 | dev->dma_mem->flags = flags; | ||
232 | |||
233 | if (flags & DMA_MEMORY_MAP) | ||
234 | return DMA_MEMORY_MAP; | ||
235 | |||
236 | return DMA_MEMORY_IO; | ||
237 | |||
238 | free1_out: | ||
239 | kfree(dev->dma_mem); | ||
240 | out: | ||
241 | if (mem_base) | ||
242 | iounmap(mem_base); | ||
243 | return 0; | ||
244 | } | ||
245 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
246 | |||
247 | void dma_release_declared_memory(struct device *dev) | ||
248 | { | ||
249 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
250 | |||
251 | if (!mem) | ||
252 | return; | ||
253 | dev->dma_mem = NULL; | ||
254 | iounmap(mem->virt_base); | ||
255 | kfree(mem->bitmap); | ||
256 | kfree(mem); | ||
257 | } | ||
258 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
259 | |||
260 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
261 | dma_addr_t device_addr, size_t size) | ||
262 | { | ||
263 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
264 | int pos, err; | ||
265 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); | ||
266 | |||
267 | pages >>= PAGE_SHIFT; | ||
268 | |||
269 | if (!mem) | ||
270 | return ERR_PTR(-EINVAL); | ||
271 | |||
272 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
273 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
274 | if (err != 0) | ||
275 | return ERR_PTR(err); | ||
276 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
277 | } | ||
278 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
279 | |||
280 | static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, | ||
281 | dma_addr_t *dma_handle, void **ret) | ||
282 | { | ||
283 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
284 | int order = get_order(size); | ||
285 | |||
286 | if (mem) { | ||
287 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | ||
288 | order); | ||
289 | if (page >= 0) { | ||
290 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
291 | *ret = mem->virt_base + (page << PAGE_SHIFT); | ||
292 | memset(*ret, 0, size); | ||
293 | } | ||
294 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
295 | *ret = NULL; | ||
296 | } | ||
297 | return (mem != NULL); | ||
298 | } | ||
299 | |||
300 | static int dma_release_coherent(struct device *dev, int order, void *vaddr) | ||
301 | { | ||
302 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
303 | |||
304 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
305 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
306 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
307 | |||
308 | bitmap_release_region(mem->bitmap, page, order); | ||
309 | return 1; | ||
310 | } | ||
311 | return 0; | ||
312 | } | ||
313 | #else | ||
314 | #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) | ||
315 | #define dma_release_coherent(dev, order, vaddr) (0) | ||
316 | #endif /* CONFIG_X86_32 */ | ||
317 | |||
318 | int dma_supported(struct device *dev, u64 mask) | ||
319 | { | ||
320 | #ifdef CONFIG_PCI | ||
321 | if (mask > 0xffffffff && forbid_dac > 0) { | ||
322 | printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", | ||
323 | dev->bus_id); | ||
324 | return 0; | ||
325 | } | ||
326 | #endif | ||
327 | |||
328 | if (dma_ops->dma_supported) | ||
329 | return dma_ops->dma_supported(dev, mask); | ||
330 | |||
331 | /* Copied from i386. Doesn't make much sense, because it will | ||
332 | only work for pci_alloc_coherent. | ||
333 | The caller just has to use GFP_DMA in this case. */ | ||
334 | if (mask < DMA_24BIT_MASK) | ||
335 | return 0; | ||
336 | |||
337 | /* Tell the device to use SAC when IOMMU force is on. This | ||
338 | allows the driver to use cheaper accesses in some cases. | ||
339 | |||
340 | Problem with this is that if we overflow the IOMMU area and | ||
341 | return DAC as fallback address the device may not handle it | ||
342 | correctly. | ||
343 | |||
344 | As a special case some controllers have a 39bit address | ||
345 | mode that is as efficient as 32bit (aic79xx). Don't force | ||
346 | SAC for these. Assume all masks <= 40 bits are of this | ||
347 | type. Normally this doesn't make any difference, but gives | ||
348 | more gentle handling of IOMMU overflow. */ | ||
349 | if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | ||
350 | printk(KERN_INFO "%s: Force SAC with mask %Lx\n", | ||
351 | dev->bus_id, mask); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | return 1; | ||
356 | } | ||
357 | EXPORT_SYMBOL(dma_supported); | ||
358 | |||
359 | /* Allocate DMA memory on node near device */ | ||
360 | noinline struct page * | ||
361 | dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) | ||
362 | { | ||
363 | int node; | ||
364 | |||
365 | node = dev_to_node(dev); | ||
366 | |||
367 | return alloc_pages_node(node, gfp, order); | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Allocate memory for a coherent mapping. | ||
372 | */ | ||
373 | void * | ||
374 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
375 | gfp_t gfp) | ||
376 | { | ||
377 | void *memory = NULL; | ||
378 | struct page *page; | ||
379 | unsigned long dma_mask = 0; | ||
380 | dma_addr_t bus; | ||
381 | |||
382 | /* ignore region specifiers */ | ||
383 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | ||
384 | |||
385 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) | ||
386 | return memory; | ||
387 | |||
388 | if (!dev) | ||
389 | dev = &fallback_dev; | ||
390 | dma_mask = dev->coherent_dma_mask; | ||
391 | if (dma_mask == 0) | ||
392 | dma_mask = DMA_32BIT_MASK; | ||
393 | |||
394 | /* Device not DMA able */ | ||
395 | if (dev->dma_mask == NULL) | ||
396 | return NULL; | ||
397 | |||
398 | /* Don't invoke OOM killer */ | ||
399 | gfp |= __GFP_NORETRY; | ||
400 | |||
401 | #ifdef CONFIG_X86_64 | ||
402 | /* Why <=? Even when the mask is smaller than 4GB it is often | ||
403 | larger than 16MB and in this case we have a chance of | ||
404 | finding fitting memory in the next higher zone first. If | ||
405 | not retry with true GFP_DMA. -AK */ | ||
406 | if (dma_mask <= DMA_32BIT_MASK) | ||
407 | gfp |= GFP_DMA32; | ||
408 | #endif | ||
409 | |||
410 | again: | ||
411 | page = dma_alloc_pages(dev, gfp, get_order(size)); | ||
412 | if (page == NULL) | ||
413 | return NULL; | ||
414 | |||
415 | { | ||
416 | int high, mmu; | ||
417 | bus = page_to_phys(page); | ||
418 | memory = page_address(page); | ||
419 | high = (bus + size) >= dma_mask; | ||
420 | mmu = high; | ||
421 | if (force_iommu && !(gfp & GFP_DMA)) | ||
422 | mmu = 1; | ||
423 | else if (high) { | ||
424 | free_pages((unsigned long)memory, | ||
425 | get_order(size)); | ||
426 | |||
427 | /* Don't use the 16MB ZONE_DMA unless absolutely | ||
428 | needed. It's better to use remapping first. */ | ||
429 | if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { | ||
430 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | ||
431 | goto again; | ||
432 | } | ||
433 | |||
434 | /* Let low level make its own zone decisions */ | ||
435 | gfp &= ~(GFP_DMA32|GFP_DMA); | ||
436 | |||
437 | if (dma_ops->alloc_coherent) | ||
438 | return dma_ops->alloc_coherent(dev, size, | ||
439 | dma_handle, gfp); | ||
440 | return NULL; | ||
441 | } | ||
442 | |||
443 | memset(memory, 0, size); | ||
444 | if (!mmu) { | ||
445 | *dma_handle = bus; | ||
446 | return memory; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | if (dma_ops->alloc_coherent) { | ||
451 | free_pages((unsigned long)memory, get_order(size)); | ||
452 | gfp &= ~(GFP_DMA|GFP_DMA32); | ||
453 | return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | ||
454 | } | ||
455 | |||
456 | if (dma_ops->map_simple) { | ||
457 | *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), | ||
458 | size, | ||
459 | PCI_DMA_BIDIRECTIONAL); | ||
460 | if (*dma_handle != bad_dma_address) | ||
461 | return memory; | ||
462 | } | ||
463 | |||
464 | if (panic_on_overflow) | ||
465 | panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n", | ||
466 | (unsigned long)size); | ||
467 | free_pages((unsigned long)memory, get_order(size)); | ||
468 | return NULL; | ||
469 | } | ||
470 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
471 | |||
472 | /* | ||
473 | * Unmap coherent memory. | ||
474 | * The caller must ensure that the device has finished accessing the mapping. | ||
475 | */ | ||
476 | void dma_free_coherent(struct device *dev, size_t size, | ||
477 | void *vaddr, dma_addr_t bus) | ||
478 | { | ||
479 | int order = get_order(size); | ||
480 | WARN_ON(irqs_disabled()); /* for portability */ | ||
481 | if (dma_release_coherent(dev, order, vaddr)) | ||
482 | return; | ||
483 | if (dma_ops->unmap_single) | ||
484 | dma_ops->unmap_single(dev, bus, size, 0); | ||
485 | free_pages((unsigned long)vaddr, order); | ||
486 | } | ||
487 | EXPORT_SYMBOL(dma_free_coherent); | ||
488 | |||
489 | static int __init pci_iommu_init(void) | ||
490 | { | ||
491 | #ifdef CONFIG_CALGARY_IOMMU | ||
492 | calgary_iommu_init(); | ||
493 | #endif | ||
494 | |||
495 | intel_iommu_init(); | ||
496 | |||
497 | #ifdef CONFIG_GART_IOMMU | ||
498 | gart_iommu_init(); | ||
499 | #endif | ||
500 | |||
501 | no_iommu_init(); | ||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | void pci_iommu_shutdown(void) | ||
506 | { | ||
507 | gart_iommu_shutdown(); | ||
508 | } | ||
509 | /* Must execute after PCI subsystem */ | ||
510 | fs_initcall(pci_iommu_init); | ||
511 | |||
512 | #ifdef CONFIG_PCI | ||
513 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
514 | |||
515 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
516 | { | ||
517 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
518 | printk(KERN_INFO "PCI: VIA PCI bridge detected." | ||
519 | "Disabling DAC.\n"); | ||
520 | forbid_dac = 1; | ||
521 | } | ||
522 | } | ||
523 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
524 | #endif | ||