aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-05-28 11:37:10 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-06-07 11:59:27 -0400
commitd7ef1533a90f432615d25729c2477bac9e72051d (patch)
tree1cb27e7e3e7341b870ca94bcd38e9da1115ef11b /lib
parent22d48269984fc93a71f65a54aa422aacf5fdb926 (diff)
swiotlb: Make swiotlb bookkeeping functions visible in the header file.
We put the functions dealing with the operations on the SWIOTLB buffer in the header and make those functions non-static. And also make the functions exported via EXPORT_SYMBOL_GPL. See "swiotlb: swiotlb: add swiotlb_tbl_map_single library function" for full description of patchset. [v2: swiotlb_sync_single_range_for_* no more. Remove usage.] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Tested-by: Albert Herranz <albert_herranz@yahoo.es>
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5f60157f31d8..34e3082632d8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -50,14 +50,6 @@
50 */ 50 */
51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
52 52
53/*
54 * Enumeration for sync targets
55 */
56enum dma_sync_target {
57 SYNC_FOR_CPU = 0,
58 SYNC_FOR_DEVICE = 1,
59};
60
61int swiotlb_force; 53int swiotlb_force;
62 54
63/* 55/*
@@ -335,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
335/* 327/*
336 * Bounce: copy the swiotlb buffer back to the original dma location 328 * Bounce: copy the swiotlb buffer back to the original dma location
337 */ 329 */
338static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 330void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
339 enum dma_data_direction dir) 331 enum dma_data_direction dir)
340{ 332{
341 unsigned long pfn = PFN_DOWN(phys); 333 unsigned long pfn = PFN_DOWN(phys);
342 334
@@ -372,6 +364,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
372 memcpy(phys_to_virt(phys), dma_addr, size); 364 memcpy(phys_to_virt(phys), dma_addr, size);
373 } 365 }
374} 366}
367EXPORT_SYMBOL_GPL(swiotlb_bounce);
375 368
376void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, 369void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
377 phys_addr_t phys, size_t size, 370 phys_addr_t phys, size_t size,
@@ -476,6 +469,7 @@ found:
476 469
477 return dma_addr; 470 return dma_addr;
478} 471}
472EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
479 473
480/* 474/*
481 * Allocates bounce buffer and returns its kernel virtual address. 475 * Allocates bounce buffer and returns its kernel virtual address.
@@ -493,7 +487,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
493/* 487/*
494 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 488 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
495 */ 489 */
496static void 490void
497swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, 491swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
498 enum dma_data_direction dir) 492 enum dma_data_direction dir)
499{ 493{
@@ -533,10 +527,12 @@ swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
533 } 527 }
534 spin_unlock_irqrestore(&io_tlb_lock, flags); 528 spin_unlock_irqrestore(&io_tlb_lock, flags);
535} 529}
530EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
536 531
537static void 532void
538swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, 533swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
539 enum dma_data_direction dir, int target) 534 enum dma_data_direction dir,
535 enum dma_sync_target target)
540{ 536{
541 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 537 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
542 phys_addr_t phys = io_tlb_orig_addr[index]; 538 phys_addr_t phys = io_tlb_orig_addr[index];
@@ -560,6 +556,7 @@ swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
560 BUG(); 556 BUG();
561 } 557 }
562} 558}
559EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
563 560
564void * 561void *
565swiotlb_alloc_coherent(struct device *hwdev, size_t size, 562swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -748,7 +745,8 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
748 */ 745 */
749static void 746static void
750swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 747swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
751 size_t size, enum dma_data_direction dir, int target) 748 size_t size, enum dma_data_direction dir,
749 enum dma_sync_target target)
752{ 750{
753 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
754 752
@@ -877,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
877 */ 875 */
878static void 876static void
879swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 877swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
880 int nelems, enum dma_data_direction dir, int target) 878 int nelems, enum dma_data_direction dir,
879 enum dma_sync_target target)
881{ 880{
882 struct scatterlist *sg; 881 struct scatterlist *sg;
883 int i; 882 int i;