aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c30
1 files changed, 12 insertions, 18 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 977edbdbc1de..b5f5d1133042 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -283,6 +283,11 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
283 return (addr & ~mask) != 0; 283 return (addr & ~mask) != 0;
284} 284}
285 285
286static int is_swiotlb_buffer(char *addr)
287{
288 return addr >= io_tlb_start && addr < io_tlb_end;
289}
290
286/* 291/*
287 * Allocates bounce buffer and returns its kernel virtual address. 292 * Allocates bounce buffer and returns its kernel virtual address.
288 */ 293 */
@@ -467,13 +472,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 void *ret; 472 void *ret;
468 int order = get_order(size); 473 int order = get_order(size);
469 474
470 /*
471 * XXX fix me: the DMA API should pass us an explicit DMA mask
472 * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
473 * bit range instead of a 16MB one).
474 */
475 flags |= GFP_DMA;
476
477 ret = (void *)__get_free_pages(flags, order); 475 ret = (void *)__get_free_pages(flags, order);
478 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { 476 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
479 /* 477 /*
@@ -490,12 +488,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
490 * swiotlb_map_single(), which will grab memory from 488 * swiotlb_map_single(), which will grab memory from
491 * the lowest available address range. 489 * the lowest available address range.
492 */ 490 */
493 dma_addr_t handle; 491 ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
494 handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); 492 if (!ret)
495 if (swiotlb_dma_mapping_error(hwdev, handle))
496 return NULL; 493 return NULL;
497
498 ret = bus_to_virt(handle);
499 } 494 }
500 495
501 memset(ret, 0, size); 496 memset(ret, 0, size);
@@ -518,12 +513,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
518 dma_addr_t dma_handle) 513 dma_addr_t dma_handle)
519{ 514{
520 WARN_ON(irqs_disabled()); 515 WARN_ON(irqs_disabled());
521 if (!(vaddr >= (void *)io_tlb_start 516 if (!is_swiotlb_buffer(vaddr))
522 && vaddr < (void *)io_tlb_end))
523 free_pages((unsigned long) vaddr, get_order(size)); 517 free_pages((unsigned long) vaddr, get_order(size));
524 else 518 else
525 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 519 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
526 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 520 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
527} 521}
528 522
529static void 523static void
@@ -612,7 +606,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
612 char *dma_addr = bus_to_virt(dev_addr); 606 char *dma_addr = bus_to_virt(dev_addr);
613 607
614 BUG_ON(dir == DMA_NONE); 608 BUG_ON(dir == DMA_NONE);
615 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 609 if (is_swiotlb_buffer(dma_addr))
616 unmap_single(hwdev, dma_addr, size, dir); 610 unmap_single(hwdev, dma_addr, size, dir);
617 else if (dir == DMA_FROM_DEVICE) 611 else if (dir == DMA_FROM_DEVICE)
618 dma_mark_clean(dma_addr, size); 612 dma_mark_clean(dma_addr, size);
@@ -642,7 +636,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
642 char *dma_addr = bus_to_virt(dev_addr); 636 char *dma_addr = bus_to_virt(dev_addr);
643 637
644 BUG_ON(dir == DMA_NONE); 638 BUG_ON(dir == DMA_NONE);
645 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 639 if (is_swiotlb_buffer(dma_addr))
646 sync_single(hwdev, dma_addr, size, dir, target); 640 sync_single(hwdev, dma_addr, size, dir, target);
647 else if (dir == DMA_FROM_DEVICE) 641 else if (dir == DMA_FROM_DEVICE)
648 dma_mark_clean(dma_addr, size); 642 dma_mark_clean(dma_addr, size);
@@ -673,7 +667,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
673 char *dma_addr = bus_to_virt(dev_addr) + offset; 667 char *dma_addr = bus_to_virt(dev_addr) + offset;
674 668
675 BUG_ON(dir == DMA_NONE); 669 BUG_ON(dir == DMA_NONE);
676 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 670 if (is_swiotlb_buffer(dma_addr))
677 sync_single(hwdev, dma_addr, size, dir, target); 671 sync_single(hwdev, dma_addr, size, dir, target);
678 else if (dir == DMA_FROM_DEVICE) 672 else if (dir == DMA_FROM_DEVICE)
679 dma_mark_clean(dma_addr, size); 673 dma_mark_clean(dma_addr, size);