aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/iommu-helper.c5
-rw-r--r--lib/swiotlb.c49
2 files changed, 22 insertions, 32 deletions
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c3f77a..5d90074dca75 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -30,8 +30,7 @@ again:
30 return index; 30 return index;
31} 31}
32 32
33static inline void set_bit_area(unsigned long *map, unsigned long i, 33void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
34 int len)
35{ 34{
36 unsigned long end = i + len; 35 unsigned long end = i + len;
37 while (i < end) { 36 while (i < end) {
@@ -64,7 +63,7 @@ again:
64 start = index + 1; 63 start = index + 1;
65 goto again; 64 goto again;
66 } 65 }
67 set_bit_area(map, index, nr); 66 iommu_area_reserve(map, index, nr);
68 } 67 }
69 return index; 68 return index;
70} 69}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8826fdf0f180..f8eebd489149 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,14 @@ cleanup1:
274} 274}
275 275
276static int 276static int
277address_needs_mapping(struct device *hwdev, dma_addr_t addr) 277address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
278{ 278{
279 dma_addr_t mask = 0xffffffff; 279 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
280 /* If the device has a mask, use it, otherwise default to 32 bits */ 280}
281 if (hwdev && hwdev->dma_mask) 281
282 mask = *hwdev->dma_mask; 282static int is_swiotlb_buffer(char *addr)
283 return (addr & ~mask) != 0; 283{
284 return addr >= io_tlb_start && addr < io_tlb_end;
284} 285}
285 286
286/* 287/*
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 void *ret; 468 void *ret;
468 int order = get_order(size); 469 int order = get_order(size);
469 470
470 /*
471 * XXX fix me: the DMA API should pass us an explicit DMA mask
472 * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
473 * bit range instead of a 16MB one).
474 */
475 flags |= GFP_DMA;
476
477 ret = (void *)__get_free_pages(flags, order); 471 ret = (void *)__get_free_pages(flags, order);
478 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { 472 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
479 /* 473 /*
480 * The allocated memory isn't reachable by the device. 474 * The allocated memory isn't reachable by the device.
481 * Fall back on swiotlb_map_single(). 475 * Fall back on swiotlb_map_single().
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
490 * swiotlb_map_single(), which will grab memory from 484 * swiotlb_map_single(), which will grab memory from
491 * the lowest available address range. 485 * the lowest available address range.
492 */ 486 */
493 dma_addr_t handle; 487 ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
494 handle = swiotlb_map_single(hwdev, NULL, size, DMA_FROM_DEVICE); 488 if (!ret)
495 if (swiotlb_dma_mapping_error(hwdev, handle))
496 return NULL; 489 return NULL;
497
498 ret = bus_to_virt(handle);
499 } 490 }
500 491
501 memset(ret, 0, size); 492 memset(ret, 0, size);
502 dev_addr = virt_to_bus(ret); 493 dev_addr = virt_to_bus(ret);
503 494
504 /* Confirm address can be DMA'd by device */ 495 /* Confirm address can be DMA'd by device */
505 if (address_needs_mapping(hwdev, dev_addr)) { 496 if (address_needs_mapping(hwdev, dev_addr, size)) {
506 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
507 (unsigned long long)*hwdev->dma_mask, 498 (unsigned long long)*hwdev->dma_mask,
508 (unsigned long long)dev_addr); 499 (unsigned long long)dev_addr);
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
518 dma_addr_t dma_handle) 509 dma_addr_t dma_handle)
519{ 510{
520 WARN_ON(irqs_disabled()); 511 WARN_ON(irqs_disabled());
521 if (!(vaddr >= (void *)io_tlb_start 512 if (!is_swiotlb_buffer(vaddr))
522 && vaddr < (void *)io_tlb_end))
523 free_pages((unsigned long) vaddr, get_order(size)); 513 free_pages((unsigned long) vaddr, get_order(size));
524 else 514 else
525 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 515 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
526 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 516 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
527} 517}
528 518
529static void 519static void
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
567 * we can safely return the device addr and not worry about bounce 557 * we can safely return the device addr and not worry about bounce
568 * buffering it. 558 * buffering it.
569 */ 559 */
570 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 560 if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
571 return dev_addr; 561 return dev_addr;
572 562
573 /* 563 /*
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
584 /* 574 /*
585 * Ensure that the address returned is DMA'ble 575 * Ensure that the address returned is DMA'ble
586 */ 576 */
587 if (address_needs_mapping(hwdev, dev_addr)) 577 if (address_needs_mapping(hwdev, dev_addr, size))
588 panic("map_single: bounce buffer is not DMA'ble"); 578 panic("map_single: bounce buffer is not DMA'ble");
589 579
590 return dev_addr; 580 return dev_addr;
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
612 char *dma_addr = bus_to_virt(dev_addr); 602 char *dma_addr = bus_to_virt(dev_addr);
613 603
614 BUG_ON(dir == DMA_NONE); 604 BUG_ON(dir == DMA_NONE);
615 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 605 if (is_swiotlb_buffer(dma_addr))
616 unmap_single(hwdev, dma_addr, size, dir); 606 unmap_single(hwdev, dma_addr, size, dir);
617 else if (dir == DMA_FROM_DEVICE) 607 else if (dir == DMA_FROM_DEVICE)
618 dma_mark_clean(dma_addr, size); 608 dma_mark_clean(dma_addr, size);
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
642 char *dma_addr = bus_to_virt(dev_addr); 632 char *dma_addr = bus_to_virt(dev_addr);
643 633
644 BUG_ON(dir == DMA_NONE); 634 BUG_ON(dir == DMA_NONE);
645 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 635 if (is_swiotlb_buffer(dma_addr))
646 sync_single(hwdev, dma_addr, size, dir, target); 636 sync_single(hwdev, dma_addr, size, dir, target);
647 else if (dir == DMA_FROM_DEVICE) 637 else if (dir == DMA_FROM_DEVICE)
648 dma_mark_clean(dma_addr, size); 638 dma_mark_clean(dma_addr, size);
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
673 char *dma_addr = bus_to_virt(dev_addr) + offset; 663 char *dma_addr = bus_to_virt(dev_addr) + offset;
674 664
675 BUG_ON(dir == DMA_NONE); 665 BUG_ON(dir == DMA_NONE);
676 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 666 if (is_swiotlb_buffer(dma_addr))
677 sync_single(hwdev, dma_addr, size, dir, target); 667 sync_single(hwdev, dma_addr, size, dir, target);
678 else if (dir == DMA_FROM_DEVICE) 668 else if (dir == DMA_FROM_DEVICE)
679 dma_mark_clean(dma_addr, size); 669 dma_mark_clean(dma_addr, size);
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
727 for_each_sg(sgl, sg, nelems, i) { 717 for_each_sg(sgl, sg, nelems, i) {
728 addr = SG_ENT_VIRT_ADDRESS(sg); 718 addr = SG_ENT_VIRT_ADDRESS(sg);
729 dev_addr = virt_to_bus(addr); 719 dev_addr = virt_to_bus(addr);
730 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 720 if (swiotlb_force ||
721 address_needs_mapping(hwdev, dev_addr, sg->length)) {
731 void *map = map_single(hwdev, addr, sg->length, dir); 722 void *map = map_single(hwdev, addr, sg->length, dir);
732 if (!map) { 723 if (!map) {
733 /* Don't panic here, we expect map_sg users 724 /* Don't panic here, we expect map_sg users