diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-04-09 12:18:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:19:58 -0400 |
commit | 098cb7f27ed69276e4db560a444b94b982e4bb8f (patch) | |
tree | 6c6a26d9423d3320632e0fd029d9244a07e760da /arch/x86/kernel/pci-dma.c | |
parent | bb8ada95a7c11adf3dad4e8d5c55ef1650560592 (diff) |
x86: integrate pci-dma.c
The code in pci-dma_{32,64}.c are now sufficiently
close to each other. We merge them in pci-dma.c.
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 175 |
1 files changed, 175 insertions, 0 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 00527e74e49c..388b113a7d88 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -38,6 +38,15 @@ EXPORT_SYMBOL(iommu_bio_merge); | |||
38 | dma_addr_t bad_dma_address __read_mostly = 0; | 38 | dma_addr_t bad_dma_address __read_mostly = 0; |
39 | EXPORT_SYMBOL(bad_dma_address); | 39 | EXPORT_SYMBOL(bad_dma_address); |
40 | 40 | ||
41 | /* Dummy device used for NULL arguments (normally ISA). Better would | ||
42 | be probably a smaller DMA mask, but this is bug-to-bug compatible | ||
43 | to older i386. */ | ||
44 | struct device fallback_dev = { | ||
45 | .bus_id = "fallback device", | ||
46 | .coherent_dma_mask = DMA_32BIT_MASK, | ||
47 | .dma_mask = &fallback_dev.coherent_dma_mask, | ||
48 | }; | ||
49 | |||
41 | int dma_set_mask(struct device *dev, u64 mask) | 50 | int dma_set_mask(struct device *dev, u64 mask) |
42 | { | 51 | { |
43 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 52 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
@@ -267,6 +276,43 @@ void *dma_mark_declared_memory_occupied(struct device *dev, | |||
267 | return mem->virt_base + (pos << PAGE_SHIFT); | 276 | return mem->virt_base + (pos << PAGE_SHIFT); |
268 | } | 277 | } |
269 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | 278 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); |
279 | |||
280 | static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, | ||
281 | dma_addr_t *dma_handle, void **ret) | ||
282 | { | ||
283 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
284 | int order = get_order(size); | ||
285 | |||
286 | if (mem) { | ||
287 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | ||
288 | order); | ||
289 | if (page >= 0) { | ||
290 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
291 | *ret = mem->virt_base + (page << PAGE_SHIFT); | ||
292 | memset(*ret, 0, size); | ||
293 | } | ||
294 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
295 | *ret = NULL; | ||
296 | } | ||
297 | return (mem != NULL); | ||
298 | } | ||
299 | |||
300 | static int dma_release_coherent(struct device *dev, int order, void *vaddr) | ||
301 | { | ||
302 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
303 | |||
304 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
305 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
306 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
307 | |||
308 | bitmap_release_region(mem->bitmap, page, order); | ||
309 | return 1; | ||
310 | } | ||
311 | return 0; | ||
312 | } | ||
313 | #else | ||
314 | #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) | ||
315 | #define dma_release_coherent(dev, order, vaddr) (0) | ||
270 | #endif /* CONFIG_X86_32 */ | 316 | #endif /* CONFIG_X86_32 */ |
271 | 317 | ||
272 | int dma_supported(struct device *dev, u64 mask) | 318 | int dma_supported(struct device *dev, u64 mask) |
@@ -310,6 +356,135 @@ int dma_supported(struct device *dev, u64 mask) | |||
310 | } | 356 | } |
311 | EXPORT_SYMBOL(dma_supported); | 357 | EXPORT_SYMBOL(dma_supported); |
312 | 358 | ||
359 | /* Allocate DMA memory on node near device */ | ||
360 | noinline struct page * | ||
361 | dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) | ||
362 | { | ||
363 | int node; | ||
364 | |||
365 | node = dev_to_node(dev); | ||
366 | |||
367 | return alloc_pages_node(node, gfp, order); | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Allocate memory for a coherent mapping. | ||
372 | */ | ||
373 | void * | ||
374 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
375 | gfp_t gfp) | ||
376 | { | ||
377 | void *memory = NULL; | ||
378 | struct page *page; | ||
379 | unsigned long dma_mask = 0; | ||
380 | dma_addr_t bus; | ||
381 | |||
382 | /* ignore region specifiers */ | ||
383 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | ||
384 | |||
385 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) | ||
386 | return memory; | ||
387 | |||
388 | if (!dev) | ||
389 | dev = &fallback_dev; | ||
390 | dma_mask = dev->coherent_dma_mask; | ||
391 | if (dma_mask == 0) | ||
392 | dma_mask = DMA_32BIT_MASK; | ||
393 | |||
394 | /* Device not DMA able */ | ||
395 | if (dev->dma_mask == NULL) | ||
396 | return NULL; | ||
397 | |||
398 | /* Don't invoke OOM killer */ | ||
399 | gfp |= __GFP_NORETRY; | ||
400 | |||
401 | #ifdef CONFIG_X86_64 | ||
402 | /* Why <=? Even when the mask is smaller than 4GB it is often | ||
403 | larger than 16MB and in this case we have a chance of | ||
404 | finding fitting memory in the next higher zone first. If | ||
405 | not retry with true GFP_DMA. -AK */ | ||
406 | if (dma_mask <= DMA_32BIT_MASK) | ||
407 | gfp |= GFP_DMA32; | ||
408 | #endif | ||
409 | |||
410 | again: | ||
411 | page = dma_alloc_pages(dev, gfp, get_order(size)); | ||
412 | if (page == NULL) | ||
413 | return NULL; | ||
414 | |||
415 | { | ||
416 | int high, mmu; | ||
417 | bus = page_to_phys(page); | ||
418 | memory = page_address(page); | ||
419 | high = (bus + size) >= dma_mask; | ||
420 | mmu = high; | ||
421 | if (force_iommu && !(gfp & GFP_DMA)) | ||
422 | mmu = 1; | ||
423 | else if (high) { | ||
424 | free_pages((unsigned long)memory, | ||
425 | get_order(size)); | ||
426 | |||
427 | /* Don't use the 16MB ZONE_DMA unless absolutely | ||
428 | needed. It's better to use remapping first. */ | ||
429 | if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { | ||
430 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | ||
431 | goto again; | ||
432 | } | ||
433 | |||
434 | /* Let low level make its own zone decisions */ | ||
435 | gfp &= ~(GFP_DMA32|GFP_DMA); | ||
436 | |||
437 | if (dma_ops->alloc_coherent) | ||
438 | return dma_ops->alloc_coherent(dev, size, | ||
439 | dma_handle, gfp); | ||
440 | return NULL; | ||
441 | } | ||
442 | |||
443 | memset(memory, 0, size); | ||
444 | if (!mmu) { | ||
445 | *dma_handle = bus; | ||
446 | return memory; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | if (dma_ops->alloc_coherent) { | ||
451 | free_pages((unsigned long)memory, get_order(size)); | ||
452 | gfp &= ~(GFP_DMA|GFP_DMA32); | ||
453 | return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | ||
454 | } | ||
455 | |||
456 | if (dma_ops->map_simple) { | ||
457 | *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), | ||
458 | size, | ||
459 | PCI_DMA_BIDIRECTIONAL); | ||
460 | if (*dma_handle != bad_dma_address) | ||
461 | return memory; | ||
462 | } | ||
463 | |||
464 | if (panic_on_overflow) | ||
465 | panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n", | ||
466 | (unsigned long)size); | ||
467 | free_pages((unsigned long)memory, get_order(size)); | ||
468 | return NULL; | ||
469 | } | ||
470 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
471 | |||
472 | /* | ||
473 | * Unmap coherent memory. | ||
474 | * The caller must ensure that the device has finished accessing the mapping. | ||
475 | */ | ||
476 | void dma_free_coherent(struct device *dev, size_t size, | ||
477 | void *vaddr, dma_addr_t bus) | ||
478 | { | ||
479 | int order = get_order(size); | ||
480 | WARN_ON(irqs_disabled()); /* for portability */ | ||
481 | if (dma_release_coherent(dev, order, vaddr)) | ||
482 | return; | ||
483 | if (dma_ops->unmap_single) | ||
484 | dma_ops->unmap_single(dev, bus, size, 0); | ||
485 | free_pages((unsigned long)vaddr, order); | ||
486 | } | ||
487 | EXPORT_SYMBOL(dma_free_coherent); | ||
313 | 488 | ||
314 | static int __init pci_iommu_init(void) | 489 | static int __init pci_iommu_init(void) |
315 | { | 490 | { |