diff options
Diffstat (limited to 'lib/swiotlb.c')
| -rw-r--r-- | lib/swiotlb.c | 169 |
1 files changed, 82 insertions, 87 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 437eedb5a53b..34e3082632d8 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/ctype.h> | 29 | #include <linux/ctype.h> |
| 30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
| 31 | #include <linux/gfp.h> | ||
| 31 | 32 | ||
| 32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
| 33 | #include <asm/dma.h> | 34 | #include <asm/dma.h> |
| @@ -49,19 +50,11 @@ | |||
| 49 | */ | 50 | */ |
| 50 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 51 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
| 51 | 52 | ||
| 52 | /* | ||
| 53 | * Enumeration for sync targets | ||
| 54 | */ | ||
| 55 | enum dma_sync_target { | ||
| 56 | SYNC_FOR_CPU = 0, | ||
| 57 | SYNC_FOR_DEVICE = 1, | ||
| 58 | }; | ||
| 59 | |||
| 60 | int swiotlb_force; | 53 | int swiotlb_force; |
| 61 | 54 | ||
| 62 | /* | 55 | /* |
| 63 | * Used to do a quick range check in unmap_single and | 56 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
| 64 | * sync_single_*, to see if the memory was in fact allocated by this | 57 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
| 65 | * API. | 58 | * API. |
| 66 | */ | 59 | */ |
| 67 | static char *io_tlb_start, *io_tlb_end; | 60 | static char *io_tlb_start, *io_tlb_end; |
| @@ -139,28 +132,14 @@ void swiotlb_print_info(void) | |||
| 139 | (unsigned long long)pend); | 132 | (unsigned long long)pend); |
| 140 | } | 133 | } |
| 141 | 134 | ||
| 142 | /* | 135 | void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) |
| 143 | * Statically reserve bounce buffer space and initialize bounce buffer data | ||
| 144 | * structures for the software IO TLB used to implement the DMA API. | ||
| 145 | */ | ||
| 146 | void __init | ||
| 147 | swiotlb_init_with_default_size(size_t default_size, int verbose) | ||
| 148 | { | 136 | { |
| 149 | unsigned long i, bytes; | 137 | unsigned long i, bytes; |
| 150 | 138 | ||
| 151 | if (!io_tlb_nslabs) { | 139 | bytes = nslabs << IO_TLB_SHIFT; |
| 152 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
| 153 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
| 154 | } | ||
| 155 | |||
| 156 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
| 157 | 140 | ||
| 158 | /* | 141 | io_tlb_nslabs = nslabs; |
| 159 | * Get IO TLB memory from the low pages | 142 | io_tlb_start = tlb; |
| 160 | */ | ||
| 161 | io_tlb_start = alloc_bootmem_low_pages(bytes); | ||
| 162 | if (!io_tlb_start) | ||
| 163 | panic("Cannot allocate SWIOTLB buffer"); | ||
| 164 | io_tlb_end = io_tlb_start + bytes; | 143 | io_tlb_end = io_tlb_start + bytes; |
| 165 | 144 | ||
| 166 | /* | 145 | /* |
| @@ -184,6 +163,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) | |||
| 184 | swiotlb_print_info(); | 163 | swiotlb_print_info(); |
| 185 | } | 164 | } |
| 186 | 165 | ||
| 166 | /* | ||
| 167 | * Statically reserve bounce buffer space and initialize bounce buffer data | ||
| 168 | * structures for the software IO TLB used to implement the DMA API. | ||
| 169 | */ | ||
| 170 | void __init | ||
| 171 | swiotlb_init_with_default_size(size_t default_size, int verbose) | ||
| 172 | { | ||
| 173 | unsigned long bytes; | ||
| 174 | |||
| 175 | if (!io_tlb_nslabs) { | ||
| 176 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
| 177 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
| 178 | } | ||
| 179 | |||
| 180 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
| 181 | |||
| 182 | /* | ||
| 183 | * Get IO TLB memory from the low pages | ||
| 184 | */ | ||
| 185 | io_tlb_start = alloc_bootmem_low_pages(bytes); | ||
| 186 | if (!io_tlb_start) | ||
| 187 | panic("Cannot allocate SWIOTLB buffer"); | ||
| 188 | |||
| 189 | swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); | ||
| 190 | } | ||
| 191 | |||
| 187 | void __init | 192 | void __init |
| 188 | swiotlb_init(int verbose) | 193 | swiotlb_init(int verbose) |
| 189 | { | 194 | { |
| @@ -322,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr) | |||
| 322 | /* | 327 | /* |
| 323 | * Bounce: copy the swiotlb buffer back to the original dma location | 328 | * Bounce: copy the swiotlb buffer back to the original dma location |
| 324 | */ | 329 | */ |
| 325 | static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | 330 | void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, |
| 326 | enum dma_data_direction dir) | 331 | enum dma_data_direction dir) |
| 327 | { | 332 | { |
| 328 | unsigned long pfn = PFN_DOWN(phys); | 333 | unsigned long pfn = PFN_DOWN(phys); |
| 329 | 334 | ||
| @@ -359,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |||
| 359 | memcpy(phys_to_virt(phys), dma_addr, size); | 364 | memcpy(phys_to_virt(phys), dma_addr, size); |
| 360 | } | 365 | } |
| 361 | } | 366 | } |
| 367 | EXPORT_SYMBOL_GPL(swiotlb_bounce); | ||
| 362 | 368 | ||
| 363 | /* | 369 | void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, |
| 364 | * Allocates bounce buffer and returns its kernel virtual address. | 370 | phys_addr_t phys, size_t size, |
| 365 | */ | 371 | enum dma_data_direction dir) |
| 366 | static void * | ||
| 367 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) | ||
| 368 | { | 372 | { |
| 369 | unsigned long flags; | 373 | unsigned long flags; |
| 370 | char *dma_addr; | 374 | char *dma_addr; |
| 371 | unsigned int nslots, stride, index, wrap; | 375 | unsigned int nslots, stride, index, wrap; |
| 372 | int i; | 376 | int i; |
| 373 | unsigned long start_dma_addr; | ||
| 374 | unsigned long mask; | 377 | unsigned long mask; |
| 375 | unsigned long offset_slots; | 378 | unsigned long offset_slots; |
| 376 | unsigned long max_slots; | 379 | unsigned long max_slots; |
| 377 | 380 | ||
| 378 | mask = dma_get_seg_boundary(hwdev); | 381 | mask = dma_get_seg_boundary(hwdev); |
| 379 | start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; | ||
| 380 | 382 | ||
| 381 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 383 | tbl_dma_addr &= mask; |
| 384 | |||
| 385 | offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | ||
| 382 | 386 | ||
| 383 | /* | 387 | /* |
| 384 | * Carefully handle integer overflow which can occur when mask == ~0UL. | 388 | * Carefully handle integer overflow which can occur when mask == ~0UL. |
| @@ -465,12 +469,27 @@ found: | |||
| 465 | 469 | ||
| 466 | return dma_addr; | 470 | return dma_addr; |
| 467 | } | 471 | } |
| 472 | EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); | ||
| 473 | |||
| 474 | /* | ||
| 475 | * Allocates bounce buffer and returns its kernel virtual address. | ||
| 476 | */ | ||
| 477 | |||
| 478 | static void * | ||
| 479 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, | ||
| 480 | enum dma_data_direction dir) | ||
| 481 | { | ||
| 482 | dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); | ||
| 483 | |||
| 484 | return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); | ||
| 485 | } | ||
| 468 | 486 | ||
| 469 | /* | 487 | /* |
| 470 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 488 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
| 471 | */ | 489 | */ |
| 472 | static void | 490 | void |
| 473 | do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 491 | swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, |
| 492 | enum dma_data_direction dir) | ||
| 474 | { | 493 | { |
| 475 | unsigned long flags; | 494 | unsigned long flags; |
| 476 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 495 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
| @@ -508,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
| 508 | } | 527 | } |
| 509 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 528 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
| 510 | } | 529 | } |
| 530 | EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); | ||
| 511 | 531 | ||
| 512 | static void | 532 | void |
| 513 | sync_single(struct device *hwdev, char *dma_addr, size_t size, | 533 | swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, |
| 514 | int dir, int target) | 534 | enum dma_data_direction dir, |
| 535 | enum dma_sync_target target) | ||
| 515 | { | 536 | { |
| 516 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 537 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
| 517 | phys_addr_t phys = io_tlb_orig_addr[index]; | 538 | phys_addr_t phys = io_tlb_orig_addr[index]; |
| @@ -535,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
| 535 | BUG(); | 556 | BUG(); |
| 536 | } | 557 | } |
| 537 | } | 558 | } |
| 559 | EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); | ||
| 538 | 560 | ||
| 539 | void * | 561 | void * |
| 540 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 562 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
| @@ -558,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 558 | } | 580 | } |
| 559 | if (!ret) { | 581 | if (!ret) { |
| 560 | /* | 582 | /* |
| 561 | * We are either out of memory or the device can't DMA | 583 | * We are either out of memory or the device can't DMA to |
| 562 | * to GFP_DMA memory; fall back on map_single(), which | 584 | * GFP_DMA memory; fall back on map_single(), which |
| 563 | * will grab memory from the lowest available address range. | 585 | * will grab memory from the lowest available address range. |
| 564 | */ | 586 | */ |
| 565 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 587 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); |
| @@ -577,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 577 | (unsigned long long)dev_addr); | 599 | (unsigned long long)dev_addr); |
| 578 | 600 | ||
| 579 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 601 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
| 580 | do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 602 | swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); |
| 581 | return NULL; | 603 | return NULL; |
| 582 | } | 604 | } |
| 583 | *dma_handle = dev_addr; | 605 | *dma_handle = dev_addr; |
| @@ -595,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 595 | if (!is_swiotlb_buffer(paddr)) | 617 | if (!is_swiotlb_buffer(paddr)) |
| 596 | free_pages((unsigned long)vaddr, get_order(size)); | 618 | free_pages((unsigned long)vaddr, get_order(size)); |
| 597 | else | 619 | else |
| 598 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 620 | /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ |
| 599 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 621 | swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
| 600 | } | 622 | } |
| 601 | EXPORT_SYMBOL(swiotlb_free_coherent); | 623 | EXPORT_SYMBOL(swiotlb_free_coherent); |
| 602 | 624 | ||
| 603 | static void | 625 | static void |
| 604 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | 626 | swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, |
| 627 | int do_panic) | ||
| 605 | { | 628 | { |
| 606 | /* | 629 | /* |
| 607 | * Ran out of IOMMU space for this operation. This is very bad. | 630 | * Ran out of IOMMU space for this operation. This is very bad. |
| @@ -679,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); | |||
| 679 | * whatever the device wrote there. | 702 | * whatever the device wrote there. |
| 680 | */ | 703 | */ |
| 681 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 704 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
| 682 | size_t size, int dir) | 705 | size_t size, enum dma_data_direction dir) |
| 683 | { | 706 | { |
| 684 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); | 707 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
| 685 | 708 | ||
| 686 | BUG_ON(dir == DMA_NONE); | 709 | BUG_ON(dir == DMA_NONE); |
| 687 | 710 | ||
| 688 | if (is_swiotlb_buffer(paddr)) { | 711 | if (is_swiotlb_buffer(paddr)) { |
| 689 | do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); | 712 | swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); |
| 690 | return; | 713 | return; |
| 691 | } | 714 | } |
| 692 | 715 | ||
| @@ -722,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | |||
| 722 | */ | 745 | */ |
| 723 | static void | 746 | static void |
| 724 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 747 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
| 725 | size_t size, int dir, int target) | 748 | size_t size, enum dma_data_direction dir, |
| 749 | enum dma_sync_target target) | ||
| 726 | { | 750 | { |
| 727 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); | 751 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
| 728 | 752 | ||
| 729 | BUG_ON(dir == DMA_NONE); | 753 | BUG_ON(dir == DMA_NONE); |
| 730 | 754 | ||
| 731 | if (is_swiotlb_buffer(paddr)) { | 755 | if (is_swiotlb_buffer(paddr)) { |
| 732 | sync_single(hwdev, phys_to_virt(paddr), size, dir, target); | 756 | swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, |
| 757 | target); | ||
| 733 | return; | 758 | return; |
| 734 | } | 759 | } |
| 735 | 760 | ||
| @@ -756,37 +781,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
| 756 | EXPORT_SYMBOL(swiotlb_sync_single_for_device); | 781 | EXPORT_SYMBOL(swiotlb_sync_single_for_device); |
| 757 | 782 | ||
| 758 | /* | 783 | /* |
| 759 | * Same as above, but for a sub-range of the mapping. | ||
| 760 | */ | ||
| 761 | static void | ||
| 762 | swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | ||
| 763 | unsigned long offset, size_t size, | ||
| 764 | int dir, int target) | ||
| 765 | { | ||
| 766 | swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target); | ||
| 767 | } | ||
| 768 | |||
| 769 | void | ||
| 770 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | ||
| 771 | unsigned long offset, size_t size, | ||
| 772 | enum dma_data_direction dir) | ||
| 773 | { | ||
| 774 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | ||
| 775 | SYNC_FOR_CPU); | ||
| 776 | } | ||
| 777 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | ||
| 778 | |||
| 779 | void | ||
| 780 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | ||
| 781 | unsigned long offset, size_t size, | ||
| 782 | enum dma_data_direction dir) | ||
| 783 | { | ||
| 784 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | ||
| 785 | SYNC_FOR_DEVICE); | ||
| 786 | } | ||
| 787 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | ||
| 788 | |||
| 789 | /* | ||
| 790 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 784 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
| 791 | * This is the scatter-gather version of the above swiotlb_map_page | 785 | * This is the scatter-gather version of the above swiotlb_map_page |
| 792 | * interface. Here the scatter gather list elements are each tagged with the | 786 | * interface. Here the scatter gather list elements are each tagged with the |
| @@ -839,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs); | |||
| 839 | 833 | ||
| 840 | int | 834 | int |
| 841 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 835 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
| 842 | int dir) | 836 | enum dma_data_direction dir) |
| 843 | { | 837 | { |
| 844 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 838 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
| 845 | } | 839 | } |
| @@ -866,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | |||
| 866 | 860 | ||
| 867 | void | 861 | void |
| 868 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 862 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
| 869 | int dir) | 863 | enum dma_data_direction dir) |
| 870 | { | 864 | { |
| 871 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 865 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
| 872 | } | 866 | } |
| @@ -881,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg); | |||
| 881 | */ | 875 | */ |
| 882 | static void | 876 | static void |
| 883 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | 877 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, |
| 884 | int nelems, int dir, int target) | 878 | int nelems, enum dma_data_direction dir, |
| 879 | enum dma_sync_target target) | ||
| 885 | { | 880 | { |
| 886 | struct scatterlist *sg; | 881 | struct scatterlist *sg; |
| 887 | int i; | 882 | int i; |
