aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorBecky Bruce <beckyb@kernel.crashing.org>2008-12-22 13:26:09 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-28 04:06:47 -0500
commitfb05a37929e0cd99016b4f5e5a5ef077fb10a947 (patch)
tree154b9d0d3b984a03480f54d00f82f56fb3407d2f /lib
parentbc40ac66988a7721f2a244b6df65f8c13d16479c (diff)
swiotlb: add support for systems with highmem
Impact: extend code for highmem - existing users unaffected On highmem systems, the original dma buffer might not have a virtual mapping - we need to kmap it in to perform the bounce. Extract the code that does the actual copy into a function that does the kmap if highmem is enabled, and default to the normal swiotlb memcpy if not. [ ported by Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> ] Signed-off-by: Becky Bruce <beckyb@kernel.crashing.org> Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c68
1 files changed, 51 insertions, 17 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 98a7a4450e02..785046e4c3ab 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -14,6 +14,7 @@
14 * 04/07/.. ak Better overflow handling. Assorted fixes. 14 * 04/07/.. ak Better overflow handling. Assorted fixes.
15 * 05/09/10 linville Add support for syncing ranges, support syncing for 15 * 05/09/10 linville Add support for syncing ranges, support syncing for
16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17 * 08/12/11 beckyb Add highmem support
17 */ 18 */
18 19
19#include <linux/cache.h> 20#include <linux/cache.h>
@@ -24,6 +25,7 @@
24#include <linux/swiotlb.h> 25#include <linux/swiotlb.h>
25#include <linux/string.h> 26#include <linux/string.h>
26#include <linux/swiotlb.h> 27#include <linux/swiotlb.h>
28#include <linux/pfn.h>
27#include <linux/types.h> 29#include <linux/types.h>
28#include <linux/ctype.h> 30#include <linux/ctype.h>
29#include <linux/highmem.h> 31#include <linux/highmem.h>
@@ -149,11 +151,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
149 return 0; 151 return 0;
150} 152}
151 153
152static dma_addr_t swiotlb_sg_to_bus(struct device *hwdev, struct scatterlist *sg)
153{
154 return swiotlb_phys_to_bus(hwdev, page_to_phys(sg_page(sg)) + sg->offset);
155}
156
157static void swiotlb_print_info(unsigned long bytes) 154static void swiotlb_print_info(unsigned long bytes)
158{ 155{
159 phys_addr_t pstart, pend; 156 phys_addr_t pstart, pend;
@@ -330,6 +327,47 @@ static int is_swiotlb_buffer(char *addr)
330} 327}
331 328
332/* 329/*
330 * Bounce: copy the swiotlb buffer back to the original dma location
331 */
332static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
333 enum dma_data_direction dir)
334{
335 unsigned long pfn = PFN_DOWN(phys);
336
337 if (PageHighMem(pfn_to_page(pfn))) {
338 /* The buffer does not have a mapping. Map it in and copy */
339 unsigned int offset = phys & ~PAGE_MASK;
340 char *buffer;
341 unsigned int sz = 0;
342 unsigned long flags;
343
344 while (size) {
345 sz = min(PAGE_SIZE - offset, size);
346
347 local_irq_save(flags);
348 buffer = kmap_atomic(pfn_to_page(pfn),
349 KM_BOUNCE_READ);
350 if (dir == DMA_TO_DEVICE)
351 memcpy(dma_addr, buffer + offset, sz);
352 else
353 memcpy(buffer + offset, dma_addr, sz);
354 kunmap_atomic(buffer, KM_BOUNCE_READ);
355 local_irq_restore(flags);
356
357 size -= sz;
358 pfn++;
359 dma_addr += sz;
360 offset = 0;
361 }
362 } else {
363 if (dir == DMA_TO_DEVICE)
364 memcpy(dma_addr, phys_to_virt(phys), size);
365 else
366 memcpy(phys_to_virt(phys), dma_addr, size);
367 }
368}
369
370/*
333 * Allocates bounce buffer and returns its kernel virtual address. 371 * Allocates bounce buffer and returns its kernel virtual address.
334 */ 372 */
335static void * 373static void *
@@ -430,7 +468,7 @@ found:
430 for (i = 0; i < nslots; i++) 468 for (i = 0; i < nslots; i++)
431 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); 469 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
432 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 470 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
433 memcpy(dma_addr, phys_to_virt(phys), size); 471 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
434 472
435 return dma_addr; 473 return dma_addr;
436} 474}
@@ -450,11 +488,7 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
450 * First, sync the memory before unmapping the entry 488 * First, sync the memory before unmapping the entry
451 */ 489 */
452 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 490 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
453 /* 491 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
454 * bounce... copy the data back into the original buffer * and
455 * delete the bounce buffer.
456 */
457 memcpy(phys_to_virt(phys), dma_addr, size);
458 492
459 /* 493 /*
460 * Return the buffer to the free list by setting the corresponding 494 * Return the buffer to the free list by setting the corresponding
@@ -494,13 +528,13 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
494 switch (target) { 528 switch (target) {
495 case SYNC_FOR_CPU: 529 case SYNC_FOR_CPU:
496 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 530 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
497 memcpy(phys_to_virt(phys), dma_addr, size); 531 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
498 else 532 else
499 BUG_ON(dir != DMA_TO_DEVICE); 533 BUG_ON(dir != DMA_TO_DEVICE);
500 break; 534 break;
501 case SYNC_FOR_DEVICE: 535 case SYNC_FOR_DEVICE:
502 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 536 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
503 memcpy(dma_addr, phys_to_virt(phys), size); 537 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
504 else 538 else
505 BUG_ON(dir != DMA_FROM_DEVICE); 539 BUG_ON(dir != DMA_FROM_DEVICE);
506 break; 540 break;
@@ -817,11 +851,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
817 BUG_ON(dir == DMA_NONE); 851 BUG_ON(dir == DMA_NONE);
818 852
819 for_each_sg(sgl, sg, nelems, i) { 853 for_each_sg(sgl, sg, nelems, i) {
820 if (sg->dma_address != swiotlb_sg_to_bus(hwdev, sg)) 854 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
821 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 855 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
822 sg->dma_length, dir); 856 sg->dma_length, dir);
823 else if (dir == DMA_FROM_DEVICE) 857 else if (dir == DMA_FROM_DEVICE)
824 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); 858 dma_mark_clean(sg_virt(sg), sg->dma_length);
825 } 859 }
826} 860}
827EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 861EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -850,11 +884,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
850 BUG_ON(dir == DMA_NONE); 884 BUG_ON(dir == DMA_NONE);
851 885
852 for_each_sg(sgl, sg, nelems, i) { 886 for_each_sg(sgl, sg, nelems, i) {
853 if (sg->dma_address != swiotlb_sg_to_bus(hwdev, sg)) 887 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
854 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 888 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
855 sg->dma_length, dir, target); 889 sg->dma_length, dir, target);
856 else if (dir == DMA_FROM_DEVICE) 890 else if (dir == DMA_FROM_DEVICE)
857 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); 891 dma_mark_clean(sg_virt(sg), sg->dma_length);
858 } 892 }
859} 893}
860 894