aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/dma.c
diff options
context:
space:
mode:
authorBecky Bruce <becky.bruce@freescale.com>2008-09-12 06:34:46 -0400
committerKumar Gala <galak@kernel.crashing.org>2008-09-24 17:26:45 -0400
commit4fc665b88a79a45bae8bbf3a05563c27c7337c3d (patch)
treeca668c2fab7c3a4d62b92174f4a5fcae2625cdd1 /arch/powerpc/kernel/dma.c
parent8fae0353247530d2124b2419052fa6120462fa99 (diff)
powerpc: Merge 32 and 64-bit dma code
We essentially adopt the 64-bit dma code, with some changes to support 32-bit systems, including HIGHMEM. dma functions on 32-bit are now invoked via accessor functions which call the correct op for a device based on archdata dma_ops. If there is no archdata dma_ops, this defaults to dma_direct_ops. In addition, the dma_map/unmap_page functions are added to dma_ops because we can't just fall back on map/unmap_single when HIGHMEM is enabled. In the case of dma_direct_*, we stop using map/unmap_single and just use the page version - this saves a lot of ugly ifdeffing. We leave map/unmap_single in the dma_ops definition, though, because they are needed by the iommu code, which does not implement map/unmap_page. Ideally, going forward, we will completely eliminate map/unmap_single and just have map/unmap_page, if it's workable for 64-bit. Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r--arch/powerpc/kernel/dma.c69
1 files changed, 46 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 124f86743bde..41fdd48bf433 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -16,21 +16,30 @@
16 * This implementation supports a per-device offset that can be applied if 16 * This implementation supports a per-device offset that can be applied if
17 * the address at which memory is visible to devices is not 0. Platform code 17 * the address at which memory is visible to devices is not 0. Platform code
18 * can set archdata.dma_data to an unsigned long holding the offset. By 18 * can set archdata.dma_data to an unsigned long holding the offset. By
19 * default the offset is zero. 19 * default the offset is PCI_DRAM_OFFSET.
20 */ 20 */
21 21
22static unsigned long get_dma_direct_offset(struct device *dev) 22static unsigned long get_dma_direct_offset(struct device *dev)
23{ 23{
24 return (unsigned long)dev->archdata.dma_data; 24 if (dev)
25 return (unsigned long)dev->archdata.dma_data;
26
27 return PCI_DRAM_OFFSET;
25} 28}
26 29
27static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 30void *dma_direct_alloc_coherent(struct device *dev, size_t size,
28 dma_addr_t *dma_handle, gfp_t flag) 31 dma_addr_t *dma_handle, gfp_t flag)
29{ 32{
33#ifdef CONFIG_NOT_COHERENT_CACHE
34 return __dma_alloc_coherent(size, dma_handle, flag);
35#else
30 struct page *page; 36 struct page *page;
31 void *ret; 37 void *ret;
32 int node = dev_to_node(dev); 38 int node = dev_to_node(dev);
33 39
40 /* ignore region specifiers */
41 flag &= ~(__GFP_HIGHMEM);
42
34 page = alloc_pages_node(node, flag, get_order(size)); 43 page = alloc_pages_node(node, flag, get_order(size));
35 if (page == NULL) 44 if (page == NULL)
36 return NULL; 45 return NULL;
@@ -39,27 +48,17 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
39 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); 48 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
40 49
41 return ret; 50 return ret;
51#endif
42} 52}
43 53
44static void dma_direct_free_coherent(struct device *dev, size_t size, 54void dma_direct_free_coherent(struct device *dev, size_t size,
45 void *vaddr, dma_addr_t dma_handle) 55 void *vaddr, dma_addr_t dma_handle)
46{ 56{
57#ifdef CONFIG_NOT_COHERENT_CACHE
58 __dma_free_coherent(size, vaddr);
59#else
47 free_pages((unsigned long)vaddr, get_order(size)); 60 free_pages((unsigned long)vaddr, get_order(size));
48} 61#endif
49
50static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
51 size_t size,
52 enum dma_data_direction direction,
53 struct dma_attrs *attrs)
54{
55 return virt_to_abs(ptr) + get_dma_direct_offset(dev);
56}
57
58static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
59 size_t size,
60 enum dma_data_direction direction,
61 struct dma_attrs *attrs)
62{
63} 62}
64 63
65static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 64static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -85,20 +84,44 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
85 84
86static int dma_direct_dma_supported(struct device *dev, u64 mask) 85static int dma_direct_dma_supported(struct device *dev, u64 mask)
87{ 86{
87#ifdef CONFIG_PPC64
88 /* Could be improved to check for memory though it better be 88 /* Could be improved to check for memory though it better be
89 * done via some global so platforms can set the limit in case 89 * done via some global so platforms can set the limit in case
90 * they have limited DMA windows 90 * they have limited DMA windows
91 */ 91 */
92 return mask >= DMA_32BIT_MASK; 92 return mask >= DMA_32BIT_MASK;
93#else
94 return 1;
95#endif
96}
97
98static inline dma_addr_t dma_direct_map_page(struct device *dev,
99 struct page *page,
100 unsigned long offset,
101 size_t size,
102 enum dma_data_direction dir,
103 struct dma_attrs *attrs)
104{
105 BUG_ON(dir == DMA_NONE);
106 __dma_sync_page(page, offset, size, dir);
107 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
108}
109
110static inline void dma_direct_unmap_page(struct device *dev,
111 dma_addr_t dma_address,
112 size_t size,
113 enum dma_data_direction direction,
114 struct dma_attrs *attrs)
115{
93} 116}
94 117
95struct dma_mapping_ops dma_direct_ops = { 118struct dma_mapping_ops dma_direct_ops = {
96 .alloc_coherent = dma_direct_alloc_coherent, 119 .alloc_coherent = dma_direct_alloc_coherent,
97 .free_coherent = dma_direct_free_coherent, 120 .free_coherent = dma_direct_free_coherent,
98 .map_single = dma_direct_map_single,
99 .unmap_single = dma_direct_unmap_single,
100 .map_sg = dma_direct_map_sg, 121 .map_sg = dma_direct_map_sg,
101 .unmap_sg = dma_direct_unmap_sg, 122 .unmap_sg = dma_direct_unmap_sg,
102 .dma_supported = dma_direct_dma_supported, 123 .dma_supported = dma_direct_dma_supported,
124 .map_page = dma_direct_map_page,
125 .unmap_page = dma_direct_unmap_page,
103}; 126};
104EXPORT_SYMBOL(dma_direct_ops); 127EXPORT_SYMBOL(dma_direct_ops);