aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/dma-swiotlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/dma-swiotlb.c')
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c48
1 files changed, 1 insertions, 47 deletions
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 68ccf11e4f1..e8a57de85bc 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -24,50 +24,12 @@
24int swiotlb __read_mostly; 24int swiotlb __read_mostly;
25unsigned int ppc_swiotlb_enable; 25unsigned int ppc_swiotlb_enable;
26 26
27void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
28{
29 unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
30 void *pageaddr = page_address(pfn_to_page(pfn));
31
32 if (pageaddr != NULL)
33 return pageaddr + (addr % PAGE_SIZE);
34 return NULL;
35}
36
37dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
38{
39 return paddr + get_dma_direct_offset(hwdev);
40}
41
42phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
43
44{
45 return baddr - get_dma_direct_offset(hwdev);
46}
47
48/*
49 * Determine if an address needs bounce buffering via swiotlb.
50 * Going forward I expect the swiotlb code to generalize on using
51 * a dma_ops->addr_needs_map, and this function will move from here to the
52 * generic swiotlb code.
53 */
54int
55swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
56 size_t size)
57{
58 struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
59
60 BUG_ON(!dma_ops);
61 return dma_ops->addr_needs_map(hwdev, addr, size);
62}
63
64/* 27/*
65 * Determine if an address is reachable by a pci device, or if we must bounce. 28 * Determine if an address is reachable by a pci device, or if we must bounce.
66 */ 29 */
67static int 30static int
68swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) 31swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
69{ 32{
70 u64 mask = dma_get_mask(hwdev);
71 dma_addr_t max; 33 dma_addr_t max;
72 struct pci_controller *hose; 34 struct pci_controller *hose;
73 struct pci_dev *pdev = to_pci_dev(hwdev); 35 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
79 if ((addr + size > max) | (addr < hose->dma_window_base_cur)) 41 if ((addr + size > max) | (addr < hose->dma_window_base_cur))
80 return 1; 42 return 1;
81 43
82 return !is_buffer_dma_capable(mask, addr, size); 44 return 0;
83}
84
85static int
86swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
87{
88 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
89} 45}
90 46
91
92/* 47/*
93 * At the moment, all platforms that use this code only require 48 * At the moment, all platforms that use this code only require
94 * swiotlb to be used if we're operating on HIGHMEM. Since 49 * swiotlb to be used if we're operating on HIGHMEM. Since
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = {
104 .dma_supported = swiotlb_dma_supported, 59 .dma_supported = swiotlb_dma_supported,
105 .map_page = swiotlb_map_page, 60 .map_page = swiotlb_map_page,
106 .unmap_page = swiotlb_unmap_page, 61 .unmap_page = swiotlb_unmap_page,
107 .addr_needs_map = swiotlb_addr_needs_map,
108 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 62 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
109 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 63 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
110 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 64 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,