aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c45
1 files changed, 20 insertions, 25 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8826fdf0f18..240a67c2c97 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,18 @@ cleanup1:
274} 274}
275 275
276static int 276static int
277address_needs_mapping(struct device *hwdev, dma_addr_t addr) 277address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
278{ 278{
279 dma_addr_t mask = 0xffffffff; 279 dma_addr_t mask = 0xffffffff;
280 /* If the device has a mask, use it, otherwise default to 32 bits */ 280 /* If the device has a mask, use it, otherwise default to 32 bits */
281 if (hwdev && hwdev->dma_mask) 281 if (hwdev && hwdev->dma_mask)
282 mask = *hwdev->dma_mask; 282 mask = *hwdev->dma_mask;
283 return (addr & ~mask) != 0; 283 return !is_buffer_dma_capable(mask, addr, size);
284}
285
286static int is_swiotlb_buffer(char *addr)
287{
288 return addr >= io_tlb_start && addr < io_tlb_end;
284} 289}
285 290
286/* 291/*
@@ -467,15 +472,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 void *ret; 472 void *ret;
468 int order = get_order(size); 473 int order = get_order(size);
469 474
470 /*
471 * XXX fix me: the DMA API should pass us an explicit DMA mask
472 * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
473 * bit range instead of a 16MB one).
474 */
475 flags |= GFP_DMA;
476
477 ret = (void *)__get_free_pages(flags, order); 475 ret = (void *)__get_free_pages(flags, order);
478 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { 476 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
479 /* 477 /*
480 * The allocated memory isn't reachable by the device. 478 * The allocated memory isn't reachable by the device.
481 * Fall back on swiotlb_map_single(). 479 * Fall back on swiotlb_map_single().
@@ -490,19 +488,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
490 * swiotlb_map_single(), which will grab memory from 488 * swiotlb_map_single(), which will grab memory from
491 * the lowest available address range. 489 * the lowest available address range.
492 */ 490 */
493 dma_addr_t handle; 491 ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
494 handle = swiotlb_map_single(hwdev, NULL, size, DMA_FROM_DEVICE); 492 if (!ret)
495 if (swiotlb_dma_mapping_error(hwdev, handle))
496 return NULL; 493 return NULL;
497
498 ret = bus_to_virt(handle);
499 } 494 }
500 495
501 memset(ret, 0, size); 496 memset(ret, 0, size);
502 dev_addr = virt_to_bus(ret); 497 dev_addr = virt_to_bus(ret);
503 498
504 /* Confirm address can be DMA'd by device */ 499 /* Confirm address can be DMA'd by device */
505 if (address_needs_mapping(hwdev, dev_addr)) { 500 if (address_needs_mapping(hwdev, dev_addr, size)) {
506 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 501 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
507 (unsigned long long)*hwdev->dma_mask, 502 (unsigned long long)*hwdev->dma_mask,
508 (unsigned long long)dev_addr); 503 (unsigned long long)dev_addr);
@@ -518,12 +513,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
518 dma_addr_t dma_handle) 513 dma_addr_t dma_handle)
519{ 514{
520 WARN_ON(irqs_disabled()); 515 WARN_ON(irqs_disabled());
521 if (!(vaddr >= (void *)io_tlb_start 516 if (!is_swiotlb_buffer(vaddr))
522 && vaddr < (void *)io_tlb_end))
523 free_pages((unsigned long) vaddr, get_order(size)); 517 free_pages((unsigned long) vaddr, get_order(size));
524 else 518 else
525 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 519 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
526 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 520 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
527} 521}
528 522
529static void 523static void
@@ -567,7 +561,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
567 * we can safely return the device addr and not worry about bounce 561 * we can safely return the device addr and not worry about bounce
568 * buffering it. 562 * buffering it.
569 */ 563 */
570 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 564 if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
571 return dev_addr; 565 return dev_addr;
572 566
573 /* 567 /*
@@ -584,7 +578,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
584 /* 578 /*
585 * Ensure that the address returned is DMA'ble 579 * Ensure that the address returned is DMA'ble
586 */ 580 */
587 if (address_needs_mapping(hwdev, dev_addr)) 581 if (address_needs_mapping(hwdev, dev_addr, size))
588 panic("map_single: bounce buffer is not DMA'ble"); 582 panic("map_single: bounce buffer is not DMA'ble");
589 583
590 return dev_addr; 584 return dev_addr;
@@ -612,7 +606,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
612 char *dma_addr = bus_to_virt(dev_addr); 606 char *dma_addr = bus_to_virt(dev_addr);
613 607
614 BUG_ON(dir == DMA_NONE); 608 BUG_ON(dir == DMA_NONE);
615 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 609 if (is_swiotlb_buffer(dma_addr))
616 unmap_single(hwdev, dma_addr, size, dir); 610 unmap_single(hwdev, dma_addr, size, dir);
617 else if (dir == DMA_FROM_DEVICE) 611 else if (dir == DMA_FROM_DEVICE)
618 dma_mark_clean(dma_addr, size); 612 dma_mark_clean(dma_addr, size);
@@ -642,7 +636,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
642 char *dma_addr = bus_to_virt(dev_addr); 636 char *dma_addr = bus_to_virt(dev_addr);
643 637
644 BUG_ON(dir == DMA_NONE); 638 BUG_ON(dir == DMA_NONE);
645 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 639 if (is_swiotlb_buffer(dma_addr))
646 sync_single(hwdev, dma_addr, size, dir, target); 640 sync_single(hwdev, dma_addr, size, dir, target);
647 else if (dir == DMA_FROM_DEVICE) 641 else if (dir == DMA_FROM_DEVICE)
648 dma_mark_clean(dma_addr, size); 642 dma_mark_clean(dma_addr, size);
@@ -673,7 +667,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
673 char *dma_addr = bus_to_virt(dev_addr) + offset; 667 char *dma_addr = bus_to_virt(dev_addr) + offset;
674 668
675 BUG_ON(dir == DMA_NONE); 669 BUG_ON(dir == DMA_NONE);
676 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 670 if (is_swiotlb_buffer(dma_addr))
677 sync_single(hwdev, dma_addr, size, dir, target); 671 sync_single(hwdev, dma_addr, size, dir, target);
678 else if (dir == DMA_FROM_DEVICE) 672 else if (dir == DMA_FROM_DEVICE)
679 dma_mark_clean(dma_addr, size); 673 dma_mark_clean(dma_addr, size);
@@ -727,7 +721,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
727 for_each_sg(sgl, sg, nelems, i) { 721 for_each_sg(sgl, sg, nelems, i) {
728 addr = SG_ENT_VIRT_ADDRESS(sg); 722 addr = SG_ENT_VIRT_ADDRESS(sg);
729 dev_addr = virt_to_bus(addr); 723 dev_addr = virt_to_bus(addr);
730 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 724 if (swiotlb_force ||
725 address_needs_mapping(hwdev, dev_addr, sg->length)) {
731 void *map = map_single(hwdev, addr, sg->length, dir); 726 void *map = map_single(hwdev, addr, sg->length, dir);
732 if (!map) { 727 if (!map) {
733 /* Don't panic here, we expect map_sg users 728 /* Don't panic here, we expect map_sg users