aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-10-15 13:19:55 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-10-30 09:32:07 -0400
commitaf51a9f1848ff50079a10def56a2c064f326af22 (patch)
treec20814b1488a23a9f861f2f91bb74abcd30977bc /lib/swiotlb.c
parentfbfda893eb570bbe9e9ad9128b6e9cf2a1e48c87 (diff)
swiotlb: Do not export swiotlb_bounce since there are no external consumers
Currently swiotlb is the only consumer for swiotlb_bounce. Since that is the case it doesn't make much sense to be exporting it so make it a static function only. In addition we can save a few more lines of code by making it so that it accepts the DMA address as a physical address instead of a virtual one. This is the last piece in essentially pushing all of the DMA address values to use physical addresses in swiotlb. In order to clarify things since we now have 2 physical addresses in use inside of swiotlb_bounce I am renaming phys to orig_addr, and dma_addr to tlb_addr. This way is should be clear that orig_addr is contained within io_orig_addr and tlb_addr is an address within the io_tlb_addr buffer. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 16a548dc91ac..196b06984dec 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -355,14 +355,15 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
355/* 355/*
356 * Bounce: copy the swiotlb buffer back to the original dma location 356 * Bounce: copy the swiotlb buffer back to the original dma location
357 */ 357 */
358void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 358static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
359 enum dma_data_direction dir) 359 size_t size, enum dma_data_direction dir)
360{ 360{
361 unsigned long pfn = PFN_DOWN(phys); 361 unsigned long pfn = PFN_DOWN(orig_addr);
362 unsigned char *vaddr = phys_to_virt(tlb_addr);
362 363
363 if (PageHighMem(pfn_to_page(pfn))) { 364 if (PageHighMem(pfn_to_page(pfn))) {
364 /* The buffer does not have a mapping. Map it in and copy */ 365 /* The buffer does not have a mapping. Map it in and copy */
365 unsigned int offset = phys & ~PAGE_MASK; 366 unsigned int offset = orig_addr & ~PAGE_MASK;
366 char *buffer; 367 char *buffer;
367 unsigned int sz = 0; 368 unsigned int sz = 0;
368 unsigned long flags; 369 unsigned long flags;
@@ -373,25 +374,23 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
373 local_irq_save(flags); 374 local_irq_save(flags);
374 buffer = kmap_atomic(pfn_to_page(pfn)); 375 buffer = kmap_atomic(pfn_to_page(pfn));
375 if (dir == DMA_TO_DEVICE) 376 if (dir == DMA_TO_DEVICE)
376 memcpy(dma_addr, buffer + offset, sz); 377 memcpy(vaddr, buffer + offset, sz);
377 else 378 else
378 memcpy(buffer + offset, dma_addr, sz); 379 memcpy(buffer + offset, vaddr, sz);
379 kunmap_atomic(buffer); 380 kunmap_atomic(buffer);
380 local_irq_restore(flags); 381 local_irq_restore(flags);
381 382
382 size -= sz; 383 size -= sz;
383 pfn++; 384 pfn++;
384 dma_addr += sz; 385 vaddr += sz;
385 offset = 0; 386 offset = 0;
386 } 387 }
388 } else if (dir == DMA_TO_DEVICE) {
389 memcpy(vaddr, phys_to_virt(orig_addr), size);
387 } else { 390 } else {
388 if (dir == DMA_TO_DEVICE) 391 memcpy(phys_to_virt(orig_addr), vaddr, size);
389 memcpy(dma_addr, phys_to_virt(phys), size);
390 else
391 memcpy(phys_to_virt(phys), dma_addr, size);
392 } 392 }
393} 393}
394EXPORT_SYMBOL_GPL(swiotlb_bounce);
395 394
396phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 395phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
397 dma_addr_t tbl_dma_addr, 396 dma_addr_t tbl_dma_addr,
@@ -493,8 +492,7 @@ found:
493 for (i = 0; i < nslots; i++) 492 for (i = 0; i < nslots; i++)
494 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); 493 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
495 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 494 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
496 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), size, 495 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
497 DMA_TO_DEVICE);
498 496
499 return tlb_addr; 497 return tlb_addr;
500} 498}
@@ -526,9 +524,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
526 /* 524 /*
527 * First, sync the memory before unmapping the entry 525 * First, sync the memory before unmapping the entry
528 */ 526 */
529 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 527 if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
530 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), 528 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
531 size, DMA_FROM_DEVICE);
532 529
533 /* 530 /*
534 * Return the buffer to the free list by setting the corresponding 531 * Return the buffer to the free list by setting the corresponding
@@ -569,14 +566,14 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
569 switch (target) { 566 switch (target) {
570 case SYNC_FOR_CPU: 567 case SYNC_FOR_CPU:
571 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 568 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
572 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), 569 swiotlb_bounce(orig_addr, tlb_addr,
573 size, DMA_FROM_DEVICE); 570 size, DMA_FROM_DEVICE);
574 else 571 else
575 BUG_ON(dir != DMA_TO_DEVICE); 572 BUG_ON(dir != DMA_TO_DEVICE);
576 break; 573 break;
577 case SYNC_FOR_DEVICE: 574 case SYNC_FOR_DEVICE:
578 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 575 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
579 swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), 576 swiotlb_bounce(orig_addr, tlb_addr,
580 size, DMA_TO_DEVICE); 577 size, DMA_TO_DEVICE);
581 else 578 else
582 BUG_ON(dir != DMA_FROM_DEVICE); 579 BUG_ON(dir != DMA_FROM_DEVICE);