aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-10-01 16:27:32 -0400
committerRalf Baechle <ralf@linux-mips.org>2010-10-29 14:08:31 -0400
commit48e1fd5a81416a037f5a48120bf281102f2584e2 (patch)
treec5c4bd344f50493bb1d1c36d485300e9061c5aa2 /arch
parent43e4f7ae4b4a96b5e84f6e1592d2e9353138e88c (diff)
MIPS: Convert DMA to use dma-mapping-common.h
Use asm-generic/dma-mapping-common.h to handle all DMA mapping operations and establish a default get_dma_ops() that forwards all operations to the existing code. Augment dev_archdata to carry a pointer to the struct dma_map_ops, allowing DMA operations to be overridden on a per device basis. Currently this is never filled in, so the default dma_map_ops are used. A follow-on patch sets this for Octeon PCI devices. Also initialize the dma_debug system as it is now used if it is configured. Includes fixes by Kevin Cernekee <cernekee@gmail.com>. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Patchwork: http://patchwork.linux-mips.org/patch/1637/ Patchwork: http://patchwork.linux-mips.org/patch/1678/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/device.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h96
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h3
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h3
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h3
-rw-r--r--arch/mips/mm/dma-default.c165
8 files changed, 136 insertions, 153 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4d4744f07e5..535a08ad69b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -14,6 +14,8 @@ config MIPS
14 select HAVE_KRETPROBES 14 select HAVE_KRETPROBES
15 select RTC_LIB if !MACH_LOONGSON 15 select RTC_LIB if !MACH_LOONGSON
16 select GENERIC_ATOMIC64 if !64BIT 16 select GENERIC_ATOMIC64 if !64BIT
17 select HAVE_DMA_ATTRS
18 select HAVE_DMA_API_DEBUG
17 19
18menu "Machine selection" 20menu "Machine selection"
19 21
diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h
index 06746c5e809..c94fafba9e6 100644
--- a/arch/mips/include/asm/device.h
+++ b/arch/mips/include/asm/device.h
@@ -3,4 +3,17 @@
3 * 3 *
4 * This file is released under the GPLv2 4 * This file is released under the GPLv2
5 */ 5 */
6#include <asm-generic/device.h> 6#ifndef _ASM_MIPS_DEVICE_H
7#define _ASM_MIPS_DEVICE_H
8
9struct dma_map_ops;
10
11struct dev_archdata {
12 /* DMA operations on that device */
13 struct dma_map_ops *dma_ops;
14};
15
16struct pdev_archdata {
17};
18
19#endif /* _ASM_MIPS_DEVICE_H*/
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 18fbf7af8e9..655f849bd08 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -5,51 +5,41 @@
5#include <asm/cache.h> 5#include <asm/cache.h>
6#include <asm-generic/dma-coherent.h> 6#include <asm-generic/dma-coherent.h>
7 7
8void *dma_alloc_noncoherent(struct device *dev, size_t size, 8#include <dma-coherence.h>
9 dma_addr_t *dma_handle, gfp_t flag);
10 9
11void dma_free_noncoherent(struct device *dev, size_t size, 10extern struct dma_map_ops *mips_dma_map_ops;
12 void *vaddr, dma_addr_t dma_handle);
13 11
14void *dma_alloc_coherent(struct device *dev, size_t size, 12static inline struct dma_map_ops *get_dma_ops(struct device *dev)
15 dma_addr_t *dma_handle, gfp_t flag); 13{
14 if (dev && dev->archdata.dma_ops)
15 return dev->archdata.dma_ops;
16 else
17 return mips_dma_map_ops;
18}
16 19
17void dma_free_coherent(struct device *dev, size_t size, 20static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
18 void *vaddr, dma_addr_t dma_handle); 21{
22 if (!dev->dma_mask)
23 return 0;
19 24
20extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 25 return addr + size <= *dev->dma_mask;
21 enum dma_data_direction direction); 26}
22extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 27
23 size_t size, enum dma_data_direction direction); 28static inline void dma_mark_clean(void *addr, size_t size) {}
24extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 29
25 enum dma_data_direction direction); 30#include <asm-generic/dma-mapping-common.h>
26extern dma_addr_t dma_map_page(struct device *dev, struct page *page, 31
27 unsigned long offset, size_t size, enum dma_data_direction direction); 32static inline int dma_supported(struct device *dev, u64 mask)
28
29static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
30 size_t size, enum dma_data_direction direction)
31{ 33{
32 dma_unmap_single(dev, dma_address, size, direction); 34 struct dma_map_ops *ops = get_dma_ops(dev);
35 return ops->dma_supported(dev, mask);
33} 36}
34 37
35extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 38static inline int dma_mapping_error(struct device *dev, u64 mask)
36 int nhwentries, enum dma_data_direction direction); 39{
37extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 40 struct dma_map_ops *ops = get_dma_ops(dev);
38 size_t size, enum dma_data_direction direction); 41 return ops->mapping_error(dev, mask);
39extern void dma_sync_single_for_device(struct device *dev, 42}
40 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
41extern void dma_sync_single_range_for_cpu(struct device *dev,
42 dma_addr_t dma_handle, unsigned long offset, size_t size,
43 enum dma_data_direction direction);
44extern void dma_sync_single_range_for_device(struct device *dev,
45 dma_addr_t dma_handle, unsigned long offset, size_t size,
46 enum dma_data_direction direction);
47extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
48 int nelems, enum dma_data_direction direction);
49extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
50 int nelems, enum dma_data_direction direction);
51extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
52extern int dma_supported(struct device *dev, u64 mask);
53 43
54static inline int 44static inline int
55dma_set_mask(struct device *dev, u64 mask) 45dma_set_mask(struct device *dev, u64 mask)
@@ -65,4 +55,34 @@ dma_set_mask(struct device *dev, u64 mask)
65extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 55extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
66 enum dma_data_direction direction); 56 enum dma_data_direction direction);
67 57
58static inline void *dma_alloc_coherent(struct device *dev, size_t size,
59 dma_addr_t *dma_handle, gfp_t gfp)
60{
61 void *ret;
62 struct dma_map_ops *ops = get_dma_ops(dev);
63
64 ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
65
66 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
67
68 return ret;
69}
70
71static inline void dma_free_coherent(struct device *dev, size_t size,
72 void *vaddr, dma_addr_t dma_handle)
73{
74 struct dma_map_ops *ops = get_dma_ops(dev);
75
76 ops->free_coherent(dev, size, vaddr, dma_handle);
77
78 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
79}
80
81
82void *dma_alloc_noncoherent(struct device *dev, size_t size,
83 dma_addr_t *dma_handle, gfp_t flag);
84
85void dma_free_noncoherent(struct device *dev, size_t size,
86 void *vaddr, dma_addr_t dma_handle);
87
68#endif /* _ASM_DMA_MAPPING_H */ 88#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index 17d579471ec..f768f6fe712 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -27,7 +27,7 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
27static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, 27static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
28 struct page *page) 28 struct page *page)
29{ 29{
30 return octeon_map_dma_mem(dev, page_address(page), PAGE_SIZE); 30 BUG();
31} 31}
32 32
33static inline unsigned long plat_dma_addr_to_phys(struct device *dev, 33static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index 7aa5ef9c19b..016d0989b14 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -26,7 +26,8 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
26 return pa; 26 return pa;
27} 27}
28 28
29static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) 29static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
30 struct page *page)
30{ 31{
31 dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page)); 32 dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
32 33
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 55123fc0b2f..c8fb5aacf50 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -37,7 +37,8 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
37 return pa; 37 return pa;
38} 38}
39 39
40static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) 40static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
41 struct page *page)
41{ 42{
42 dma_addr_t pa; 43 dma_addr_t pa;
43 44
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index 2a10920473e..302101b54ac 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -17,7 +17,8 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t
17 return vdma_alloc(virt_to_phys(addr), size); 17 return vdma_alloc(virt_to_phys(addr), size);
18} 18}
19 19
20static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) 20static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
21 struct page *page)
21{ 22{
22 return vdma_alloc(page_to_phys(page), PAGE_SIZE); 23 return vdma_alloc(page_to_phys(page), PAGE_SIZE);
23} 24}
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 469d4019f79..4fc1a0fbe00 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -95,10 +95,9 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
95 95
96 return ret; 96 return ret;
97} 97}
98
99EXPORT_SYMBOL(dma_alloc_noncoherent); 98EXPORT_SYMBOL(dma_alloc_noncoherent);
100 99
101void *dma_alloc_coherent(struct device *dev, size_t size, 100static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
102 dma_addr_t * dma_handle, gfp_t gfp) 101 dma_addr_t * dma_handle, gfp_t gfp)
103{ 102{
104 void *ret; 103 void *ret;
@@ -123,7 +122,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
123 return ret; 122 return ret;
124} 123}
125 124
126EXPORT_SYMBOL(dma_alloc_coherent);
127 125
128void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, 126void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
129 dma_addr_t dma_handle) 127 dma_addr_t dma_handle)
@@ -131,10 +129,9 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
131 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); 129 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
132 free_pages((unsigned long) vaddr, get_order(size)); 130 free_pages((unsigned long) vaddr, get_order(size));
133} 131}
134
135EXPORT_SYMBOL(dma_free_noncoherent); 132EXPORT_SYMBOL(dma_free_noncoherent);
136 133
137void dma_free_coherent(struct device *dev, size_t size, void *vaddr, 134static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
138 dma_addr_t dma_handle) 135 dma_addr_t dma_handle)
139{ 136{
140 unsigned long addr = (unsigned long) vaddr; 137 unsigned long addr = (unsigned long) vaddr;
@@ -151,8 +148,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
151 free_pages(addr, get_order(size)); 148 free_pages(addr, get_order(size));
152} 149}
153 150
154EXPORT_SYMBOL(dma_free_coherent);
155
156static inline void __dma_sync(unsigned long addr, size_t size, 151static inline void __dma_sync(unsigned long addr, size_t size,
157 enum dma_data_direction direction) 152 enum dma_data_direction direction)
158{ 153{
@@ -174,21 +169,8 @@ static inline void __dma_sync(unsigned long addr, size_t size,
174 } 169 }
175} 170}
176 171
177dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 172static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
178 enum dma_data_direction direction) 173 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
179{
180 unsigned long addr = (unsigned long) ptr;
181
182 if (!plat_device_is_coherent(dev))
183 __dma_sync(addr, size, direction);
184
185 return plat_map_dma_mem(dev, ptr, size);
186}
187
188EXPORT_SYMBOL(dma_map_single);
189
190void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
191 enum dma_data_direction direction)
192{ 174{
193 if (cpu_is_noncoherent_r10000(dev)) 175 if (cpu_is_noncoherent_r10000(dev))
194 __dma_sync(dma_addr_to_virt(dev, dma_addr), size, 176 __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
@@ -197,15 +179,11 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
197 plat_unmap_dma_mem(dev, dma_addr, size, direction); 179 plat_unmap_dma_mem(dev, dma_addr, size, direction);
198} 180}
199 181
200EXPORT_SYMBOL(dma_unmap_single); 182static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
201 183 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
202int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
203 enum dma_data_direction direction)
204{ 184{
205 int i; 185 int i;
206 186
207 BUG_ON(direction == DMA_NONE);
208
209 for (i = 0; i < nents; i++, sg++) { 187 for (i = 0; i < nents; i++, sg++) {
210 unsigned long addr; 188 unsigned long addr;
211 189
@@ -219,33 +197,27 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
219 return nents; 197 return nents;
220} 198}
221 199
222EXPORT_SYMBOL(dma_map_sg); 200static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
223 201 unsigned long offset, size_t size, enum dma_data_direction direction,
224dma_addr_t dma_map_page(struct device *dev, struct page *page, 202 struct dma_attrs *attrs)
225 unsigned long offset, size_t size, enum dma_data_direction direction)
226{ 203{
227 BUG_ON(direction == DMA_NONE); 204 unsigned long addr;
228 205
229 if (!plat_device_is_coherent(dev)) { 206 addr = (unsigned long) page_address(page) + offset;
230 unsigned long addr;
231 207
232 addr = (unsigned long) page_address(page) + offset; 208 if (!plat_device_is_coherent(dev))
233 __dma_sync(addr, size, direction); 209 __dma_sync(addr, size, direction);
234 }
235 210
236 return plat_map_dma_mem_page(dev, page) + offset; 211 return plat_map_dma_mem(dev, (void *)addr, size);
237} 212}
238 213
239EXPORT_SYMBOL(dma_map_page); 214static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
240 215 int nhwentries, enum dma_data_direction direction,
241void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 216 struct dma_attrs *attrs)
242 enum dma_data_direction direction)
243{ 217{
244 unsigned long addr; 218 unsigned long addr;
245 int i; 219 int i;
246 220
247 BUG_ON(direction == DMA_NONE);
248
249 for (i = 0; i < nhwentries; i++, sg++) { 221 for (i = 0; i < nhwentries; i++, sg++) {
250 if (!plat_device_is_coherent(dev) && 222 if (!plat_device_is_coherent(dev) &&
251 direction != DMA_TO_DEVICE) { 223 direction != DMA_TO_DEVICE) {
@@ -257,13 +229,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
257 } 229 }
258} 230}
259 231
260EXPORT_SYMBOL(dma_unmap_sg); 232static void mips_dma_sync_single_for_cpu(struct device *dev,
261 233 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
262void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
263 size_t size, enum dma_data_direction direction)
264{ 234{
265 BUG_ON(direction == DMA_NONE);
266
267 if (cpu_is_noncoherent_r10000(dev)) { 235 if (cpu_is_noncoherent_r10000(dev)) {
268 unsigned long addr; 236 unsigned long addr;
269 237
@@ -272,13 +240,9 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
272 } 240 }
273} 241}
274 242
275EXPORT_SYMBOL(dma_sync_single_for_cpu); 243static void mips_dma_sync_single_for_device(struct device *dev,
276 244 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
277void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
278 size_t size, enum dma_data_direction direction)
279{ 245{
280 BUG_ON(direction == DMA_NONE);
281
282 plat_extra_sync_for_device(dev); 246 plat_extra_sync_for_device(dev);
283 if (!plat_device_is_coherent(dev)) { 247 if (!plat_device_is_coherent(dev)) {
284 unsigned long addr; 248 unsigned long addr;
@@ -288,46 +252,11 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
288 } 252 }
289} 253}
290 254
291EXPORT_SYMBOL(dma_sync_single_for_device); 255static void mips_dma_sync_sg_for_cpu(struct device *dev,
292 256 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
293void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
294 unsigned long offset, size_t size, enum dma_data_direction direction)
295{
296 BUG_ON(direction == DMA_NONE);
297
298 if (cpu_is_noncoherent_r10000(dev)) {
299 unsigned long addr;
300
301 addr = dma_addr_to_virt(dev, dma_handle);
302 __dma_sync(addr + offset, size, direction);
303 }
304}
305
306EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
307
308void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
309 unsigned long offset, size_t size, enum dma_data_direction direction)
310{
311 BUG_ON(direction == DMA_NONE);
312
313 plat_extra_sync_for_device(dev);
314 if (!plat_device_is_coherent(dev)) {
315 unsigned long addr;
316
317 addr = dma_addr_to_virt(dev, dma_handle);
318 __dma_sync(addr + offset, size, direction);
319 }
320}
321
322EXPORT_SYMBOL(dma_sync_single_range_for_device);
323
324void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
325 enum dma_data_direction direction)
326{ 257{
327 int i; 258 int i;
328 259
329 BUG_ON(direction == DMA_NONE);
330
331 /* Make sure that gcc doesn't leave the empty loop body. */ 260 /* Make sure that gcc doesn't leave the empty loop body. */
332 for (i = 0; i < nelems; i++, sg++) { 261 for (i = 0; i < nelems; i++, sg++) {
333 if (cpu_is_noncoherent_r10000(dev)) 262 if (cpu_is_noncoherent_r10000(dev))
@@ -336,15 +265,11 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
336 } 265 }
337} 266}
338 267
339EXPORT_SYMBOL(dma_sync_sg_for_cpu); 268static void mips_dma_sync_sg_for_device(struct device *dev,
340 269 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
341void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
342 enum dma_data_direction direction)
343{ 270{
344 int i; 271 int i;
345 272
346 BUG_ON(direction == DMA_NONE);
347
348 /* Make sure that gcc doesn't leave the empty loop body. */ 273 /* Make sure that gcc doesn't leave the empty loop body. */
349 for (i = 0; i < nelems; i++, sg++) { 274 for (i = 0; i < nelems; i++, sg++) {
350 if (!plat_device_is_coherent(dev)) 275 if (!plat_device_is_coherent(dev))
@@ -353,24 +278,18 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
353 } 278 }
354} 279}
355 280
356EXPORT_SYMBOL(dma_sync_sg_for_device); 281int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
357
358int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
359{ 282{
360 return plat_dma_mapping_error(dev, dma_addr); 283 return plat_dma_mapping_error(dev, dma_addr);
361} 284}
362 285
363EXPORT_SYMBOL(dma_mapping_error); 286int mips_dma_supported(struct device *dev, u64 mask)
364
365int dma_supported(struct device *dev, u64 mask)
366{ 287{
367 return plat_dma_supported(dev, mask); 288 return plat_dma_supported(dev, mask);
368} 289}
369 290
370EXPORT_SYMBOL(dma_supported); 291void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
371 292 enum dma_data_direction direction)
372void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
373 enum dma_data_direction direction)
374{ 293{
375 BUG_ON(direction == DMA_NONE); 294 BUG_ON(direction == DMA_NONE);
376 295
@@ -379,4 +298,30 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
379 __dma_sync((unsigned long)vaddr, size, direction); 298 __dma_sync((unsigned long)vaddr, size, direction);
380} 299}
381 300
382EXPORT_SYMBOL(dma_cache_sync); 301static struct dma_map_ops mips_default_dma_map_ops = {
302 .alloc_coherent = mips_dma_alloc_coherent,
303 .free_coherent = mips_dma_free_coherent,
304 .map_page = mips_dma_map_page,
305 .unmap_page = mips_dma_unmap_page,
306 .map_sg = mips_dma_map_sg,
307 .unmap_sg = mips_dma_unmap_sg,
308 .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
309 .sync_single_for_device = mips_dma_sync_single_for_device,
310 .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
311 .sync_sg_for_device = mips_dma_sync_sg_for_device,
312 .mapping_error = mips_dma_mapping_error,
313 .dma_supported = mips_dma_supported
314};
315
316struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
317EXPORT_SYMBOL(mips_dma_map_ops);
318
319#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
320
321static int __init mips_dma_init(void)
322{
323 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
324
325 return 0;
326}
327fs_initcall(mips_dma_init);