aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/swiotlb.h3
-rw-r--r--lib/swiotlb.c35
2 files changed, 16 insertions, 22 deletions
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index e0ac98fd81a9..071d62c214a6 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -53,9 +53,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
53 enum dma_sync_target target); 53 enum dma_sync_target target);
54 54
55/* Accessory functions. */ 55/* Accessory functions. */
56extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
57 enum dma_data_direction dir);
58
59extern void 56extern void
60*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 57*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
61 dma_addr_t *dma_handle, gfp_t flags); 58 dma_addr_t *dma_handle, gfp_t flags);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 16a548dc91ac..196b06984dec 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -355,14 +355,15 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
355/* 355/*
356 * Bounce: copy the swiotlb buffer back to the original dma location 356 * Bounce: copy the swiotlb buffer back to the original dma location
357 */ 357 */
358void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 358static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
359 enum dma_data_direction dir) 359 size_t size, enum dma_data_direction dir)
360{ 360{
361 unsigned long pfn = PFN_DOWN(phys); 361 unsigned long pfn = PFN_DOWN(orig_addr);
362 unsigned char *vaddr = phys_to_virt(tlb_addr);
362 363
363 if (PageHighMem(pfn_to_page(pfn))) { 364 if (PageHighMem(pfn_to_page(pfn))) {
364 /* The buffer does not have a mapping. Map it in and copy */ 365 /* The buffer does not have a mapping. Map it in and copy */
365 unsigned int offset = phys & ~PAGE_MASK; 366 unsigned int offset = orig_addr & ~PAGE_MASK;
366 char *buffer; 367 char *buffer;
367 unsigned int sz = 0; 368 unsigned int sz = 0;
368 unsigned long flags; 369 unsigned long flags;
@@ -373,25 +374,23 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
373 local_irq_save(flags); 374 local_irq_save(flags);
374 buffer = kmap_atomic(pfn_to_page(pfn)); 375 buffer = kmap_atomic(pfn_to_page(pfn));
375 if (dir == DMA_TO_DEVICE) 376 if (dir == DMA_TO_DEVICE)
376 memcpy(dma_addr, buffer + offset, sz); 377 memcpy(vaddr, buffer + offset, sz);
377 else 378 else
378 memcpy(buffer + offset, dma_addr, sz); 379 memcpy(buffer + offset, vaddr, sz);
379 kunmap_atomic(buffer); 380 kunmap_atomic(buffer);
380 local_irq_restore(flags); 381 local_irq_restore(flags);
381 382
382 size -= sz; 383 size -= sz;
383 pfn++; 384 pfn++;
384 dma_addr += sz; 385 vaddr += sz;
385 offset = 0; 386 offset = 0;
386 } 387 }
388 } else if (dir == DMA_TO_DEVICE) {
389 memcpy(vaddr, phys_to_virt(orig_addr), size);
387 } else { 390 } else {
388 if (dir == DMA_TO_DEVICE) 391 memcpy(phys_to_virt(orig_addr), vaddr, size);
389 memcpy(dma_addr, phys_to_virt(phys), size);
390 else
391 memcpy(phys_to_virt(phys), dma_addr, size);
392 } 392 }
393} 393}
394EXPORT_SYMBOL_GPL(swiotlb_bounce);
395 394
396phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 395phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
397 dma_addr_t tbl_dma_addr, 396 dma_addr_t tbl_dma_addr,
@@ -493,8 +492,7 @@ found:
493 for (i = 0; i < nslots; i++) 492 for (i = 0; i < nslots; i++)
494 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); 493 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
495 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 494 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
496 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), size, 495 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
497 DMA_TO_DEVICE);
498 496
499 return tlb_addr; 497 return tlb_addr;
500} 498}
@@ -526,9 +524,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
526 /* 524 /*
527 * First, sync the memory before unmapping the entry 525 * First, sync the memory before unmapping the entry
528 */ 526 */
529 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 527 if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
530 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), 528 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
531 size, DMA_FROM_DEVICE);
532 529
533 /* 530 /*
534 * Return the buffer to the free list by setting the corresponding 531 * Return the buffer to the free list by setting the corresponding
@@ -569,14 +566,14 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
569 switch (target) { 566 switch (target) {
570 case SYNC_FOR_CPU: 567 case SYNC_FOR_CPU:
571 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 568 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
572 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), 569 swiotlb_bounce(orig_addr, tlb_addr,
573 size, DMA_FROM_DEVICE); 570 size, DMA_FROM_DEVICE);
574 else 571 else
575 BUG_ON(dir != DMA_TO_DEVICE); 572 BUG_ON(dir != DMA_TO_DEVICE);
576 break; 573 break;
577 case SYNC_FOR_DEVICE: 574 case SYNC_FOR_DEVICE:
578 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 575 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
579 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), 576 swiotlb_bounce(orig_addr, tlb_addr,
580 size, DMA_TO_DEVICE); 577 size, DMA_TO_DEVICE);
581 else 578 else
582 BUG_ON(dir != DMA_FROM_DEVICE); 579 BUG_ON(dir != DMA_FROM_DEVICE);