aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c187
1 files changed, 91 insertions, 96 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 437eedb5a53b..c47bbe11b804 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/gfp.h>
31 32
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/dma.h> 34#include <asm/dma.h>
@@ -49,25 +50,17 @@
49 */ 50 */
50#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
51 52
52/*
53 * Enumeration for sync targets
54 */
55enum dma_sync_target {
56 SYNC_FOR_CPU = 0,
57 SYNC_FOR_DEVICE = 1,
58};
59
60int swiotlb_force; 53int swiotlb_force;
61 54
62/* 55/*
63 * Used to do a quick range check in unmap_single and 56 * Used to do a quick range check in swiotlb_tbl_unmap_single and
64 * sync_single_*, to see if the memory was in fact allocated by this 57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
65 * API. 58 * API.
66 */ 59 */
67static char *io_tlb_start, *io_tlb_end; 60static char *io_tlb_start, *io_tlb_end;
68 61
69/* 62/*
70 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and 63 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
71 * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. 64 * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
72 */ 65 */
73static unsigned long io_tlb_nslabs; 66static unsigned long io_tlb_nslabs;
@@ -77,7 +70,7 @@ static unsigned long io_tlb_nslabs;
77 */ 70 */
78static unsigned long io_tlb_overflow = 32*1024; 71static unsigned long io_tlb_overflow = 32*1024;
79 72
80void *io_tlb_overflow_buffer; 73static void *io_tlb_overflow_buffer;
81 74
82/* 75/*
83 * This is a free list describing the number of free entries available from 76 * This is a free list describing the number of free entries available from
@@ -139,28 +132,14 @@ void swiotlb_print_info(void)
139 (unsigned long long)pend); 132 (unsigned long long)pend);
140} 133}
141 134
142/* 135void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
143 * Statically reserve bounce buffer space and initialize bounce buffer data
144 * structures for the software IO TLB used to implement the DMA API.
145 */
146void __init
147swiotlb_init_with_default_size(size_t default_size, int verbose)
148{ 136{
149 unsigned long i, bytes; 137 unsigned long i, bytes;
150 138
151 if (!io_tlb_nslabs) { 139 bytes = nslabs << IO_TLB_SHIFT;
152 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
153 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
154 }
155
156 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
157 140
158 /* 141 io_tlb_nslabs = nslabs;
159 * Get IO TLB memory from the low pages 142 io_tlb_start = tlb;
160 */
161 io_tlb_start = alloc_bootmem_low_pages(bytes);
162 if (!io_tlb_start)
163 panic("Cannot allocate SWIOTLB buffer");
164 io_tlb_end = io_tlb_start + bytes; 143 io_tlb_end = io_tlb_start + bytes;
165 144
166 /* 145 /*
@@ -168,22 +147,48 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
168 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
169 * between io_tlb_start and io_tlb_end. 148 * between io_tlb_start and io_tlb_end.
170 */ 149 */
171 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 150 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
172 for (i = 0; i < io_tlb_nslabs; i++) 151 for (i = 0; i < io_tlb_nslabs; i++)
173 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
174 io_tlb_index = 0; 153 io_tlb_index = 0;
175 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); 154 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
176 155
177 /* 156 /*
178 * Get the overflow emergency buffer 157 * Get the overflow emergency buffer
179 */ 158 */
180 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 159 io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
181 if (!io_tlb_overflow_buffer) 160 if (!io_tlb_overflow_buffer)
182 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 161 panic("Cannot allocate SWIOTLB overflow buffer!\n");
183 if (verbose) 162 if (verbose)
184 swiotlb_print_info(); 163 swiotlb_print_info();
185} 164}
186 165
166/*
167 * Statically reserve bounce buffer space and initialize bounce buffer data
168 * structures for the software IO TLB used to implement the DMA API.
169 */
170void __init
171swiotlb_init_with_default_size(size_t default_size, int verbose)
172{
173 unsigned long bytes;
174
175 if (!io_tlb_nslabs) {
176 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
177 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
178 }
179
180 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer");
188
189 swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
190}
191
187void __init 192void __init
188swiotlb_init(int verbose) 193swiotlb_init(int verbose)
189{ 194{
@@ -303,13 +308,13 @@ void __init swiotlb_free(void)
303 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
304 } else { 309 } else {
305 free_bootmem_late(__pa(io_tlb_overflow_buffer), 310 free_bootmem_late(__pa(io_tlb_overflow_buffer),
306 io_tlb_overflow); 311 PAGE_ALIGN(io_tlb_overflow));
307 free_bootmem_late(__pa(io_tlb_orig_addr), 312 free_bootmem_late(__pa(io_tlb_orig_addr),
308 io_tlb_nslabs * sizeof(phys_addr_t)); 313 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
309 free_bootmem_late(__pa(io_tlb_list), 314 free_bootmem_late(__pa(io_tlb_list),
310 io_tlb_nslabs * sizeof(int)); 315 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
311 free_bootmem_late(__pa(io_tlb_start), 316 free_bootmem_late(__pa(io_tlb_start),
312 io_tlb_nslabs << IO_TLB_SHIFT); 317 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
313 } 318 }
314} 319}
315 320
@@ -322,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
322/* 327/*
323 * Bounce: copy the swiotlb buffer back to the original dma location 328 * Bounce: copy the swiotlb buffer back to the original dma location
324 */ 329 */
325static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 330void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
326 enum dma_data_direction dir) 331 enum dma_data_direction dir)
327{ 332{
328 unsigned long pfn = PFN_DOWN(phys); 333 unsigned long pfn = PFN_DOWN(phys);
329 334
@@ -359,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
359 memcpy(phys_to_virt(phys), dma_addr, size); 364 memcpy(phys_to_virt(phys), dma_addr, size);
360 } 365 }
361} 366}
367EXPORT_SYMBOL_GPL(swiotlb_bounce);
362 368
363/* 369void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
364 * Allocates bounce buffer and returns its kernel virtual address. 370 phys_addr_t phys, size_t size,
365 */ 371 enum dma_data_direction dir)
366static void *
367map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
368{ 372{
369 unsigned long flags; 373 unsigned long flags;
370 char *dma_addr; 374 char *dma_addr;
371 unsigned int nslots, stride, index, wrap; 375 unsigned int nslots, stride, index, wrap;
372 int i; 376 int i;
373 unsigned long start_dma_addr;
374 unsigned long mask; 377 unsigned long mask;
375 unsigned long offset_slots; 378 unsigned long offset_slots;
376 unsigned long max_slots; 379 unsigned long max_slots;
377 380
378 mask = dma_get_seg_boundary(hwdev); 381 mask = dma_get_seg_boundary(hwdev);
379 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
380 382
381 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 383 tbl_dma_addr &= mask;
384
385 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
382 386
383 /* 387 /*
384 * Carefully handle integer overflow which can occur when mask == ~0UL. 388 * Carefully handle integer overflow which can occur when mask == ~0UL.
@@ -465,12 +469,27 @@ found:
465 469
466 return dma_addr; 470 return dma_addr;
467} 471}
472EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
473
474/*
475 * Allocates bounce buffer and returns its kernel virtual address.
476 */
477
478static void *
479map_single(struct device *hwdev, phys_addr_t phys, size_t size,
480 enum dma_data_direction dir)
481{
482 dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
483
484 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
485}
468 486
469/* 487/*
470 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 488 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
471 */ 489 */
472static void 490void
473do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 491swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
492 enum dma_data_direction dir)
474{ 493{
475 unsigned long flags; 494 unsigned long flags;
476 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 495 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -508,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
508 } 527 }
509 spin_unlock_irqrestore(&io_tlb_lock, flags); 528 spin_unlock_irqrestore(&io_tlb_lock, flags);
510} 529}
530EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
511 531
512static void 532void
513sync_single(struct device *hwdev, char *dma_addr, size_t size, 533swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
514 int dir, int target) 534 enum dma_data_direction dir,
535 enum dma_sync_target target)
515{ 536{
516 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 537 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
517 phys_addr_t phys = io_tlb_orig_addr[index]; 538 phys_addr_t phys = io_tlb_orig_addr[index];
@@ -535,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
535 BUG(); 556 BUG();
536 } 557 }
537} 558}
559EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
538 560
539void * 561void *
540swiotlb_alloc_coherent(struct device *hwdev, size_t size, 562swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -558,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
558 } 580 }
559 if (!ret) { 581 if (!ret) {
560 /* 582 /*
561 * We are either out of memory or the device can't DMA 583 * We are either out of memory or the device can't DMA to
562 * to GFP_DMA memory; fall back on map_single(), which 584 * GFP_DMA memory; fall back on map_single(), which
563 * will grab memory from the lowest available address range. 585 * will grab memory from the lowest available address range.
564 */ 586 */
565 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 587 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
@@ -577,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
577 (unsigned long long)dev_addr); 599 (unsigned long long)dev_addr);
578 600
579 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 601 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
580 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 602 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
581 return NULL; 603 return NULL;
582 } 604 }
583 *dma_handle = dev_addr; 605 *dma_handle = dev_addr;
@@ -595,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
595 if (!is_swiotlb_buffer(paddr)) 617 if (!is_swiotlb_buffer(paddr))
596 free_pages((unsigned long)vaddr, get_order(size)); 618 free_pages((unsigned long)vaddr, get_order(size));
597 else 619 else
598 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 620 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
599 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 621 swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
600} 622}
601EXPORT_SYMBOL(swiotlb_free_coherent); 623EXPORT_SYMBOL(swiotlb_free_coherent);
602 624
603static void 625static void
604swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 626swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
627 int do_panic)
605{ 628{
606 /* 629 /*
607 * Ran out of IOMMU space for this operation. This is very bad. 630 * Ran out of IOMMU space for this operation. This is very bad.
@@ -679,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
679 * whatever the device wrote there. 702 * whatever the device wrote there.
680 */ 703 */
681static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 704static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
682 size_t size, int dir) 705 size_t size, enum dma_data_direction dir)
683{ 706{
684 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 707 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
685 708
686 BUG_ON(dir == DMA_NONE); 709 BUG_ON(dir == DMA_NONE);
687 710
688 if (is_swiotlb_buffer(paddr)) { 711 if (is_swiotlb_buffer(paddr)) {
689 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 712 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
690 return; 713 return;
691 } 714 }
692 715
@@ -722,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
722 */ 745 */
723static void 746static void
724swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 747swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
725 size_t size, int dir, int target) 748 size_t size, enum dma_data_direction dir,
749 enum dma_sync_target target)
726{ 750{
727 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
728 752
729 BUG_ON(dir == DMA_NONE); 753 BUG_ON(dir == DMA_NONE);
730 754
731 if (is_swiotlb_buffer(paddr)) { 755 if (is_swiotlb_buffer(paddr)) {
732 sync_single(hwdev, phys_to_virt(paddr), size, dir, target); 756 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
757 target);
733 return; 758 return;
734 } 759 }
735 760
@@ -756,37 +781,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
756EXPORT_SYMBOL(swiotlb_sync_single_for_device); 781EXPORT_SYMBOL(swiotlb_sync_single_for_device);
757 782
758/* 783/*
759 * Same as above, but for a sub-range of the mapping.
760 */
761static void
762swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
763 unsigned long offset, size_t size,
764 int dir, int target)
765{
766 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
767}
768
769void
770swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
771 unsigned long offset, size_t size,
772 enum dma_data_direction dir)
773{
774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
775 SYNC_FOR_CPU);
776}
777EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
778
779void
780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
781 unsigned long offset, size_t size,
782 enum dma_data_direction dir)
783{
784 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
785 SYNC_FOR_DEVICE);
786}
787EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
788
789/*
790 * Map a set of buffers described by scatterlist in streaming mode for DMA. 784 * Map a set of buffers described by scatterlist in streaming mode for DMA.
791 * This is the scatter-gather version of the above swiotlb_map_page 785 * This is the scatter-gather version of the above swiotlb_map_page
792 * interface. Here the scatter gather list elements are each tagged with the 786 * interface. Here the scatter gather list elements are each tagged with the
@@ -839,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs);
839 833
840int 834int
841swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 835swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
842 int dir) 836 enum dma_data_direction dir)
843{ 837{
844 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 838 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
845} 839}
@@ -866,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
866 860
867void 861void
868swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 862swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
869 int dir) 863 enum dma_data_direction dir)
870{ 864{
871 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 865 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
872} 866}
@@ -881,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
881 */ 875 */
882static void 876static void
883swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 877swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
884 int nelems, int dir, int target) 878 int nelems, enum dma_data_direction dir,
879 enum dma_sync_target target)
885{ 880{
886 struct scatterlist *sg; 881 struct scatterlist *sg;
887 int i; 882 int i;