aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c137
1 files changed, 81 insertions, 56 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index a009055140ec..34e3082632d8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -50,19 +50,11 @@
50 */ 50 */
51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
52 52
53/*
54 * Enumeration for sync targets
55 */
56enum dma_sync_target {
57 SYNC_FOR_CPU = 0,
58 SYNC_FOR_DEVICE = 1,
59};
60
61int swiotlb_force; 53int swiotlb_force;
62 54
63/* 55/*
64 * Used to do a quick range check in unmap_single and 56 * Used to do a quick range check in swiotlb_tbl_unmap_single and
65 * sync_single_*, to see if the memory was in fact allocated by this 57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
66 * API. 58 * API.
67 */ 59 */
68static char *io_tlb_start, *io_tlb_end; 60static char *io_tlb_start, *io_tlb_end;
@@ -140,28 +132,14 @@ void swiotlb_print_info(void)
140 (unsigned long long)pend); 132 (unsigned long long)pend);
141} 133}
142 134
143/* 135void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
144 * Statically reserve bounce buffer space and initialize bounce buffer data
145 * structures for the software IO TLB used to implement the DMA API.
146 */
147void __init
148swiotlb_init_with_default_size(size_t default_size, int verbose)
149{ 136{
150 unsigned long i, bytes; 137 unsigned long i, bytes;
151 138
152 if (!io_tlb_nslabs) { 139 bytes = nslabs << IO_TLB_SHIFT;
153 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
154 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
155 }
156
157 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
158 140
159 /* 141 io_tlb_nslabs = nslabs;
160 * Get IO TLB memory from the low pages 142 io_tlb_start = tlb;
161 */
162 io_tlb_start = alloc_bootmem_low_pages(bytes);
163 if (!io_tlb_start)
164 panic("Cannot allocate SWIOTLB buffer");
165 io_tlb_end = io_tlb_start + bytes; 143 io_tlb_end = io_tlb_start + bytes;
166 144
167 /* 145 /*
@@ -185,6 +163,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
185 swiotlb_print_info(); 163 swiotlb_print_info();
186} 164}
187 165
166/*
167 * Statically reserve bounce buffer space and initialize bounce buffer data
168 * structures for the software IO TLB used to implement the DMA API.
169 */
170void __init
171swiotlb_init_with_default_size(size_t default_size, int verbose)
172{
173 unsigned long bytes;
174
175 if (!io_tlb_nslabs) {
176 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
177 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
178 }
179
180 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes);
186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer");
188
189 swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
190}
191
188void __init 192void __init
189swiotlb_init(int verbose) 193swiotlb_init(int verbose)
190{ 194{
@@ -323,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
323/* 327/*
324 * Bounce: copy the swiotlb buffer back to the original dma location 328 * Bounce: copy the swiotlb buffer back to the original dma location
325 */ 329 */
326static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 330void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
327 enum dma_data_direction dir) 331 enum dma_data_direction dir)
328{ 332{
329 unsigned long pfn = PFN_DOWN(phys); 333 unsigned long pfn = PFN_DOWN(phys);
330 334
@@ -360,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
360 memcpy(phys_to_virt(phys), dma_addr, size); 364 memcpy(phys_to_virt(phys), dma_addr, size);
361 } 365 }
362} 366}
367EXPORT_SYMBOL_GPL(swiotlb_bounce);
363 368
364/* 369void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
365 * Allocates bounce buffer and returns its kernel virtual address. 370 phys_addr_t phys, size_t size,
366 */ 371 enum dma_data_direction dir)
367static void *
368map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
369{ 372{
370 unsigned long flags; 373 unsigned long flags;
371 char *dma_addr; 374 char *dma_addr;
372 unsigned int nslots, stride, index, wrap; 375 unsigned int nslots, stride, index, wrap;
373 int i; 376 int i;
374 unsigned long start_dma_addr;
375 unsigned long mask; 377 unsigned long mask;
376 unsigned long offset_slots; 378 unsigned long offset_slots;
377 unsigned long max_slots; 379 unsigned long max_slots;
378 380
379 mask = dma_get_seg_boundary(hwdev); 381 mask = dma_get_seg_boundary(hwdev);
380 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
381 382
382 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 383 tbl_dma_addr &= mask;
384
385 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
383 386
384 /* 387 /*
385 * Carefully handle integer overflow which can occur when mask == ~0UL. 388 * Carefully handle integer overflow which can occur when mask == ~0UL.
@@ -466,12 +469,27 @@ found:
466 469
467 return dma_addr; 470 return dma_addr;
468} 471}
472EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
473
474/*
475 * Allocates bounce buffer and returns its kernel virtual address.
476 */
477
478static void *
479map_single(struct device *hwdev, phys_addr_t phys, size_t size,
480 enum dma_data_direction dir)
481{
482 dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
483
484 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
485}
469 486
470/* 487/*
471 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 488 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
472 */ 489 */
473static void 490void
474do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 491swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
492 enum dma_data_direction dir)
475{ 493{
476 unsigned long flags; 494 unsigned long flags;
477 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 495 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -509,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
509 } 527 }
510 spin_unlock_irqrestore(&io_tlb_lock, flags); 528 spin_unlock_irqrestore(&io_tlb_lock, flags);
511} 529}
530EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
512 531
513static void 532void
514sync_single(struct device *hwdev, char *dma_addr, size_t size, 533swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
515 int dir, int target) 534 enum dma_data_direction dir,
535 enum dma_sync_target target)
516{ 536{
517 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 537 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
518 phys_addr_t phys = io_tlb_orig_addr[index]; 538 phys_addr_t phys = io_tlb_orig_addr[index];
@@ -536,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
536 BUG(); 556 BUG();
537 } 557 }
538} 558}
559EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
539 560
540void * 561void *
541swiotlb_alloc_coherent(struct device *hwdev, size_t size, 562swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -559,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
559 } 580 }
560 if (!ret) { 581 if (!ret) {
561 /* 582 /*
562 * We are either out of memory or the device can't DMA 583 * We are either out of memory or the device can't DMA to
563 * to GFP_DMA memory; fall back on map_single(), which 584 * GFP_DMA memory; fall back on map_single(), which
564 * will grab memory from the lowest available address range. 585 * will grab memory from the lowest available address range.
565 */ 586 */
566 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 587 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
@@ -578,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
578 (unsigned long long)dev_addr); 599 (unsigned long long)dev_addr);
579 600
580 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 601 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
581 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 602 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
582 return NULL; 603 return NULL;
583 } 604 }
584 *dma_handle = dev_addr; 605 *dma_handle = dev_addr;
@@ -596,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
596 if (!is_swiotlb_buffer(paddr)) 617 if (!is_swiotlb_buffer(paddr))
597 free_pages((unsigned long)vaddr, get_order(size)); 618 free_pages((unsigned long)vaddr, get_order(size));
598 else 619 else
599 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 620 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
600 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 621 swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
601} 622}
602EXPORT_SYMBOL(swiotlb_free_coherent); 623EXPORT_SYMBOL(swiotlb_free_coherent);
603 624
604static void 625static void
605swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 626swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
627 int do_panic)
606{ 628{
607 /* 629 /*
608 * Ran out of IOMMU space for this operation. This is very bad. 630 * Ran out of IOMMU space for this operation. This is very bad.
@@ -680,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
680 * whatever the device wrote there. 702 * whatever the device wrote there.
681 */ 703 */
682static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 704static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
683 size_t size, int dir) 705 size_t size, enum dma_data_direction dir)
684{ 706{
685 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 707 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
686 708
687 BUG_ON(dir == DMA_NONE); 709 BUG_ON(dir == DMA_NONE);
688 710
689 if (is_swiotlb_buffer(paddr)) { 711 if (is_swiotlb_buffer(paddr)) {
690 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 712 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
691 return; 713 return;
692 } 714 }
693 715
@@ -723,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
723 */ 745 */
724static void 746static void
725swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 747swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
726 size_t size, int dir, int target) 748 size_t size, enum dma_data_direction dir,
749 enum dma_sync_target target)
727{ 750{
728 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
729 752
730 BUG_ON(dir == DMA_NONE); 753 BUG_ON(dir == DMA_NONE);
731 754
732 if (is_swiotlb_buffer(paddr)) { 755 if (is_swiotlb_buffer(paddr)) {
733 sync_single(hwdev, phys_to_virt(paddr), size, dir, target); 756 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
757 target);
734 return; 758 return;
735 } 759 }
736 760
@@ -809,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs);
809 833
810int 834int
811swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 835swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
812 int dir) 836 enum dma_data_direction dir)
813{ 837{
814 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 838 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
815} 839}
@@ -836,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
836 860
837void 861void
838swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 862swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
839 int dir) 863 enum dma_data_direction dir)
840{ 864{
841 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 865 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
842} 866}
@@ -851,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
851 */ 875 */
852static void 876static void
853swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 877swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
854 int nelems, int dir, int target) 878 int nelems, enum dma_data_direction dir,
879 enum dma_sync_target target)
855{ 880{
856 struct scatterlist *sg; 881 struct scatterlist *sg;
857 int i; 882 int i;