diff options
Diffstat (limited to 'lib/swiotlb.c')
| -rw-r--r-- | lib/swiotlb.c | 55 |
1 files changed, 24 insertions, 31 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 8826fdf0f180..78330c37a61b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -274,13 +274,14 @@ cleanup1: | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | static int | 276 | static int |
| 277 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | 277 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) |
| 278 | { | 278 | { |
| 279 | dma_addr_t mask = 0xffffffff; | 279 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); |
| 280 | /* If the device has a mask, use it, otherwise default to 32 bits */ | 280 | } |
| 281 | if (hwdev && hwdev->dma_mask) | 281 | |
| 282 | mask = *hwdev->dma_mask; | 282 | static int is_swiotlb_buffer(char *addr) |
| 283 | return (addr & ~mask) != 0; | 283 | { |
| 284 | return addr >= io_tlb_start && addr < io_tlb_end; | ||
| 284 | } | 285 | } |
| 285 | 286 | ||
| 286 | /* | 287 | /* |
| @@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 467 | void *ret; | 468 | void *ret; |
| 468 | int order = get_order(size); | 469 | int order = get_order(size); |
| 469 | 470 | ||
| 470 | /* | ||
| 471 | * XXX fix me: the DMA API should pass us an explicit DMA mask | ||
| 472 | * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 | ||
| 473 | * bit range instead of a 16MB one). | ||
| 474 | */ | ||
| 475 | flags |= GFP_DMA; | ||
| 476 | |||
| 477 | ret = (void *)__get_free_pages(flags, order); | 471 | ret = (void *)__get_free_pages(flags, order); |
| 478 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { | 472 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { |
| 479 | /* | 473 | /* |
| 480 | * The allocated memory isn't reachable by the device. | 474 | * The allocated memory isn't reachable by the device. |
| 481 | * Fall back on swiotlb_map_single(). | 475 | * Fall back on swiotlb_map_single(). |
| @@ -490,24 +484,23 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 490 | * swiotlb_map_single(), which will grab memory from | 484 | * swiotlb_map_single(), which will grab memory from |
| 491 | * the lowest available address range. | 485 | * the lowest available address range. |
| 492 | */ | 486 | */ |
| 493 | dma_addr_t handle; | 487 | ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); |
| 494 | handle = swiotlb_map_single(hwdev, NULL, size, DMA_FROM_DEVICE); | 488 | if (!ret) |
| 495 | if (swiotlb_dma_mapping_error(hwdev, handle)) | ||
| 496 | return NULL; | 489 | return NULL; |
| 497 | |||
| 498 | ret = bus_to_virt(handle); | ||
| 499 | } | 490 | } |
| 500 | 491 | ||
| 501 | memset(ret, 0, size); | 492 | memset(ret, 0, size); |
| 502 | dev_addr = virt_to_bus(ret); | 493 | dev_addr = virt_to_bus(ret); |
| 503 | 494 | ||
| 504 | /* Confirm address can be DMA'd by device */ | 495 | /* Confirm address can be DMA'd by device */ |
| 505 | if (address_needs_mapping(hwdev, dev_addr)) { | 496 | if (address_needs_mapping(hwdev, dev_addr, size)) { |
| 506 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 497 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
| 507 | (unsigned long long)*hwdev->dma_mask, | 498 | (unsigned long long)*hwdev->dma_mask, |
| 508 | (unsigned long long)dev_addr); | 499 | (unsigned long long)dev_addr); |
| 509 | panic("swiotlb_alloc_coherent: allocated memory is out of " | 500 | |
| 510 | "range for device"); | 501 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
| 502 | unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | ||
| 503 | return NULL; | ||
| 511 | } | 504 | } |
| 512 | *dma_handle = dev_addr; | 505 | *dma_handle = dev_addr; |
| 513 | return ret; | 506 | return ret; |
| @@ -518,12 +511,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 518 | dma_addr_t dma_handle) | 511 | dma_addr_t dma_handle) |
| 519 | { | 512 | { |
| 520 | WARN_ON(irqs_disabled()); | 513 | WARN_ON(irqs_disabled()); |
| 521 | if (!(vaddr >= (void *)io_tlb_start | 514 | if (!is_swiotlb_buffer(vaddr)) |
| 522 | && vaddr < (void *)io_tlb_end)) | ||
| 523 | free_pages((unsigned long) vaddr, get_order(size)); | 515 | free_pages((unsigned long) vaddr, get_order(size)); |
| 524 | else | 516 | else |
| 525 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 517 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
| 526 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); | 518 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
| 527 | } | 519 | } |
| 528 | 520 | ||
| 529 | static void | 521 | static void |
| @@ -567,7 +559,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
| 567 | * we can safely return the device addr and not worry about bounce | 559 | * we can safely return the device addr and not worry about bounce |
| 568 | * buffering it. | 560 | * buffering it. |
| 569 | */ | 561 | */ |
| 570 | if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) | 562 | if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) |
| 571 | return dev_addr; | 563 | return dev_addr; |
| 572 | 564 | ||
| 573 | /* | 565 | /* |
| @@ -584,7 +576,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
| 584 | /* | 576 | /* |
| 585 | * Ensure that the address returned is DMA'ble | 577 | * Ensure that the address returned is DMA'ble |
| 586 | */ | 578 | */ |
| 587 | if (address_needs_mapping(hwdev, dev_addr)) | 579 | if (address_needs_mapping(hwdev, dev_addr, size)) |
| 588 | panic("map_single: bounce buffer is not DMA'ble"); | 580 | panic("map_single: bounce buffer is not DMA'ble"); |
| 589 | 581 | ||
| 590 | return dev_addr; | 582 | return dev_addr; |
| @@ -612,7 +604,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | |||
| 612 | char *dma_addr = bus_to_virt(dev_addr); | 604 | char *dma_addr = bus_to_virt(dev_addr); |
| 613 | 605 | ||
| 614 | BUG_ON(dir == DMA_NONE); | 606 | BUG_ON(dir == DMA_NONE); |
| 615 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 607 | if (is_swiotlb_buffer(dma_addr)) |
| 616 | unmap_single(hwdev, dma_addr, size, dir); | 608 | unmap_single(hwdev, dma_addr, size, dir); |
| 617 | else if (dir == DMA_FROM_DEVICE) | 609 | else if (dir == DMA_FROM_DEVICE) |
| 618 | dma_mark_clean(dma_addr, size); | 610 | dma_mark_clean(dma_addr, size); |
| @@ -642,7 +634,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
| 642 | char *dma_addr = bus_to_virt(dev_addr); | 634 | char *dma_addr = bus_to_virt(dev_addr); |
| 643 | 635 | ||
| 644 | BUG_ON(dir == DMA_NONE); | 636 | BUG_ON(dir == DMA_NONE); |
| 645 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 637 | if (is_swiotlb_buffer(dma_addr)) |
| 646 | sync_single(hwdev, dma_addr, size, dir, target); | 638 | sync_single(hwdev, dma_addr, size, dir, target); |
| 647 | else if (dir == DMA_FROM_DEVICE) | 639 | else if (dir == DMA_FROM_DEVICE) |
| 648 | dma_mark_clean(dma_addr, size); | 640 | dma_mark_clean(dma_addr, size); |
| @@ -673,7 +665,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
| 673 | char *dma_addr = bus_to_virt(dev_addr) + offset; | 665 | char *dma_addr = bus_to_virt(dev_addr) + offset; |
| 674 | 666 | ||
| 675 | BUG_ON(dir == DMA_NONE); | 667 | BUG_ON(dir == DMA_NONE); |
| 676 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 668 | if (is_swiotlb_buffer(dma_addr)) |
| 677 | sync_single(hwdev, dma_addr, size, dir, target); | 669 | sync_single(hwdev, dma_addr, size, dir, target); |
| 678 | else if (dir == DMA_FROM_DEVICE) | 670 | else if (dir == DMA_FROM_DEVICE) |
| 679 | dma_mark_clean(dma_addr, size); | 671 | dma_mark_clean(dma_addr, size); |
| @@ -727,7 +719,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
| 727 | for_each_sg(sgl, sg, nelems, i) { | 719 | for_each_sg(sgl, sg, nelems, i) { |
| 728 | addr = SG_ENT_VIRT_ADDRESS(sg); | 720 | addr = SG_ENT_VIRT_ADDRESS(sg); |
| 729 | dev_addr = virt_to_bus(addr); | 721 | dev_addr = virt_to_bus(addr); |
| 730 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { | 722 | if (swiotlb_force || |
| 723 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | ||
| 731 | void *map = map_single(hwdev, addr, sg->length, dir); | 724 | void *map = map_single(hwdev, addr, sg->length, dir); |
| 732 | if (!map) { | 725 | if (!map) { |
| 733 | /* Don't panic here, we expect map_sg users | 726 | /* Don't panic here, we expect map_sg users |
