aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2010-05-10 15:14:54 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-06-07 11:59:13 -0400
commiteb605a5754d050a25a9f00d718fb173f24c486ef (patch)
tree32676152d490e886576206c7de51457560bfbd3b /lib/swiotlb.c
parentad8456361fa19068cf49b50a4f98e41b73c08e76 (diff)
swiotlb: add swiotlb_tbl_map_single library function
swiotlb_tbl_map_single() takes the dma address of iotlb instead of using swiotlb_virt_to_bus(). [v2: changed swiotlb_tlb to swiotlb_tbl] [v3: changed u64 to dma_addr_t] This patch: This is a set of patches that separate the address translation (virt_to_phys, virt_to_bus, etc) and allocation of the SWIOTLB buffer from the SWIOTLB library. The idea behind this set of patches is to make it possible to have separate mechanisms for translating virtual to physical or virtual to DMA addresses on platforms which need an SWIOTLB, and where physical != PCI bus address and also to allocate the core IOTLB memory outside SWIOTLB. One customers of this is the pv-ops project, which can switch between different modes of operation depending on the environment it is running in: bare-metal or virtualized (Xen for now). Another is the Wii DMA - used to implement the MEM2 DMA facility needed by its EHCI controller (for details: http://lkml.org/lkml/2010/5/18/303) On bare-metal SWIOTLB is used when there are no hardware IOMMU. In virtualized environment it used when PCI pass-through is enabled for the guest. The problems with PCI pass-through is that the guest's idea of PFN's is not the real thing. To fix that, there is translation layer for PFN->machine frame number and vice-versa. To bubble that up to the SWIOTLB layer there are two possible solutions. One solution has been to wholesale copy the SWIOTLB, stick it in arch/x86/xen/swiotlb.c and modify the virt_to_phys, phys_to_virt and others to use the Xen address translation functions. Unfortunately, since the kernel can run on bare-metal, there would be big code overlap with the real SWIOTLB. (git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen.git xen/dom0/swiotlb-new) Another approach, which this set of patches explores, is to abstract the address translation and address determination functions away from the SWIOTLB book-keeping functions. This way the core SWIOTLB library functions are present in one place, while the address related functions are in a separate library that can be loaded when running under non-bare-metal platform. Changelog: Since the last posting [v8.2] Konrad has done: - Added this changelog in the patch and referenced in the other patches this description. - 'enum dma_data_direction direction' to 'enum dma.. dir' so to be unified. [v8-v8.2 changes:] - Rolled-up the last two patches in one. - Rebased against linus latest. That meant dealing with swiotlb_sync_single_range_* changes. - added Acked-by: Fujita Tomonori and Tested-by: Albert Herranz [v7-v8 changes:] - Minimized the list of exported functions. - Integrated Fujita's patches and changed "swiotlb_tlb" to "swiotlb_tbl" in them. [v6-v7 changes:] - Minimized the amount of exported functions/variable with a prefix of: "swiotbl_tbl". - Made the usage of 'int dir' to be 'enum dma_data_direction'. [v5-v6 changes:] - Made the exported functions/variables have the 'swiotlb_bk' prefix. - dropped the checkpatches/other reworks Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Albert Herranz <albert_herranz@yahoo.es>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index a009055140ec..783aff00024c 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -361,25 +361,22 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
361 } 361 }
362} 362}
363 363
364/* 364void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
365 * Allocates bounce buffer and returns its kernel virtual address. 365 phys_addr_t phys, size_t size, int dir)
366 */
367static void *
368map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
369{ 366{
370 unsigned long flags; 367 unsigned long flags;
371 char *dma_addr; 368 char *dma_addr;
372 unsigned int nslots, stride, index, wrap; 369 unsigned int nslots, stride, index, wrap;
373 int i; 370 int i;
374 unsigned long start_dma_addr;
375 unsigned long mask; 371 unsigned long mask;
376 unsigned long offset_slots; 372 unsigned long offset_slots;
377 unsigned long max_slots; 373 unsigned long max_slots;
378 374
379 mask = dma_get_seg_boundary(hwdev); 375 mask = dma_get_seg_boundary(hwdev);
380 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
381 376
382 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 377 tbl_dma_addr &= mask;
378
379 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
383 380
384 /* 381 /*
385 * Carefully handle integer overflow which can occur when mask == ~0UL. 382 * Carefully handle integer overflow which can occur when mask == ~0UL.
@@ -468,6 +465,18 @@ found:
468} 465}
469 466
470/* 467/*
468 * Allocates bounce buffer and returns its kernel virtual address.
469 */
470
471static void *
472map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
473{
474 dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
475
476 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
477}
478
479/*
471 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 480 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
472 */ 481 */
473static void 482static void