diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2010-05-10 15:54:20 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2010-06-07 11:59:26 -0400 |
commit | bfc5501f6d816082274e10cd45a2d5f32603b328 (patch) | |
tree | c7b0e2cdfd8b98562c4090bbc63f41aeaed098d3 /lib/swiotlb.c | |
parent | abbceff7d7a884968e876e52578da1db4a4f6b54 (diff) |
swiotlb: Make internal bookkeeping functions have 'swiotlb_tbl' prefix.
The functions that operate on io_tlb_list/io_tlb_start/io_tlb_orig_addr
have the prefix 'swiotlb_tbl' now.
See "swiotlb: swiotlb: add swiotlb_tbl_map_single library function" for
full description of patchset.
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Tested-by: Albert Herranz <albert_herranz@yahoo.es>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r-- | lib/swiotlb.c | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ec61e1507d0a..1fc15bf63945 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -61,8 +61,8 @@ enum dma_sync_target { | |||
61 | int swiotlb_force; | 61 | int swiotlb_force; |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Used to do a quick range check in unmap_single and | 64 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
65 | * sync_single_*, to see if the memory was in fact allocated by this | 65 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
66 | * API. | 66 | * API. |
67 | */ | 67 | */ |
68 | static char *io_tlb_start, *io_tlb_end; | 68 | static char *io_tlb_start, *io_tlb_end; |
@@ -492,7 +492,8 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) | |||
492 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 492 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
493 | */ | 493 | */ |
494 | static void | 494 | static void |
495 | do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 495 | swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, |
496 | int dir) | ||
496 | { | 497 | { |
497 | unsigned long flags; | 498 | unsigned long flags; |
498 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 499 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
@@ -532,7 +533,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
532 | } | 533 | } |
533 | 534 | ||
534 | static void | 535 | static void |
535 | sync_single(struct device *hwdev, char *dma_addr, size_t size, | 536 | swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, |
536 | int dir, int target) | 537 | int dir, int target) |
537 | { | 538 | { |
538 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 539 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
@@ -580,8 +581,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
580 | } | 581 | } |
581 | if (!ret) { | 582 | if (!ret) { |
582 | /* | 583 | /* |
583 | * We are either out of memory or the device can't DMA | 584 | * We are either out of memory or the device can't DMA to |
584 | * to GFP_DMA memory; fall back on map_single(), which | 585 | * GFP_DMA memory; fall back on map_single(), which |
585 | * will grab memory from the lowest available address range. | 586 | * will grab memory from the lowest available address range. |
586 | */ | 587 | */ |
587 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 588 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); |
@@ -599,7 +600,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
599 | (unsigned long long)dev_addr); | 600 | (unsigned long long)dev_addr); |
600 | 601 | ||
601 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 602 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
602 | do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 603 | swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); |
603 | return NULL; | 604 | return NULL; |
604 | } | 605 | } |
605 | *dma_handle = dev_addr; | 606 | *dma_handle = dev_addr; |
@@ -617,8 +618,8 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
617 | if (!is_swiotlb_buffer(paddr)) | 618 | if (!is_swiotlb_buffer(paddr)) |
618 | free_pages((unsigned long)vaddr, get_order(size)); | 619 | free_pages((unsigned long)vaddr, get_order(size)); |
619 | else | 620 | else |
620 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 621 | /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ |
621 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 622 | swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
622 | } | 623 | } |
623 | EXPORT_SYMBOL(swiotlb_free_coherent); | 624 | EXPORT_SYMBOL(swiotlb_free_coherent); |
624 | 625 | ||
@@ -708,7 +709,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |||
708 | BUG_ON(dir == DMA_NONE); | 709 | BUG_ON(dir == DMA_NONE); |
709 | 710 | ||
710 | if (is_swiotlb_buffer(paddr)) { | 711 | if (is_swiotlb_buffer(paddr)) { |
711 | do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); | 712 | swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); |
712 | return; | 713 | return; |
713 | } | 714 | } |
714 | 715 | ||
@@ -751,7 +752,8 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
751 | BUG_ON(dir == DMA_NONE); | 752 | BUG_ON(dir == DMA_NONE); |
752 | 753 | ||
753 | if (is_swiotlb_buffer(paddr)) { | 754 | if (is_swiotlb_buffer(paddr)) { |
754 | sync_single(hwdev, phys_to_virt(paddr), size, dir, target); | 755 | swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, |
756 | target); | ||
755 | return; | 757 | return; |
756 | } | 758 | } |
757 | 759 | ||