aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2008-12-11 18:33:36 -0500
committerRalf Baechle <ralf@linux-mips.org>2009-01-11 04:57:24 -0500
commit843aef4930b9953c9ca624a990b201440304b56f (patch)
tree9debbaa7d9caa8c73db65ea2674e7ed26e285893 /arch/mips
parentec454d8c4fee3b2feb87e594d806c0987c5dd538 (diff)
MIPS: Adjust the dma-common.c platform hooks.
We add a dev parameter to plat_unmap_dma_mem(), and hooks for plat_dma_supported() and plat_extra_sync_for_device() which should be nop changes for all existing targets. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-lemote/dma-coherence.h26
-rw-r--r--arch/mips/mm/dma-default.c25
6 files changed, 135 insertions, 20 deletions
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 76e04e7feb84..36c611b6c597 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -28,10 +28,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
28 return dma_addr; 28 return dma_addr;
29} 29}
30 30
31static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) 31static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
32{ 32{
33} 33}
34 34
35static inline int plat_dma_supported(struct device *dev, u64 mask)
36{
37 /*
38 * we fall back to GFP_DMA when the mask isn't all 1s,
39 * so we can't guarantee allocations that must be
40 * within a tighter range than GFP_DMA..
41 */
42 if (mask < DMA_BIT_MASK(24))
43 return 0;
44
45 return 1;
46}
47
48static inline void plat_extra_sync_for_device(struct device *dev)
49{
50 return;
51}
52
53static inline int plat_dma_mapping_error(struct device *dev,
54 dma_addr_t dma_addr)
55{
56 return 0;
57}
58
35static inline int plat_device_is_coherent(struct device *dev) 59static inline int plat_device_is_coherent(struct device *dev)
36{ 60{
37#ifdef CONFIG_DMA_COHERENT 61#ifdef CONFIG_DMA_COHERENT
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index ed7e6222dc15..4c21bfca10c3 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -38,10 +38,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
38 return dma_addr & ~(0xffUL << 56); 38 return dma_addr & ~(0xffUL << 56);
39} 39}
40 40
41static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) 41static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
42{ 42{
43} 43}
44 44
45static inline int plat_dma_supported(struct device *dev, u64 mask)
46{
47 /*
48 * we fall back to GFP_DMA when the mask isn't all 1s,
49 * so we can't guarantee allocations that must be
50 * within a tighter range than GFP_DMA..
51 */
52 if (mask < DMA_BIT_MASK(24))
53 return 0;
54
55 return 1;
56}
57
58static inline void plat_extra_sync_for_device(struct device *dev)
59{
60 return;
61}
62
63static inline int plat_dma_mapping_error(struct device *dev,
64 dma_addr_t dma_addr)
65{
66 return 0;
67}
68
45static inline int plat_device_is_coherent(struct device *dev) 69static inline int plat_device_is_coherent(struct device *dev)
46{ 70{
47 return 1; /* IP27 non-cohernet mode is unsupported */ 71 return 1; /* IP27 non-cohernet mode is unsupported */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index a5511ebb2d53..7ae40f4b1c80 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -60,10 +60,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
60 return addr; 60 return addr;
61} 61}
62 62
63static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) 63static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
64{ 64{
65} 65}
66 66
67static inline int plat_dma_supported(struct device *dev, u64 mask)
68{
69 /*
70 * we fall back to GFP_DMA when the mask isn't all 1s,
71 * so we can't guarantee allocations that must be
72 * within a tighter range than GFP_DMA..
73 */
74 if (mask < DMA_BIT_MASK(24))
75 return 0;
76
77 return 1;
78}
79
80static inline void plat_extra_sync_for_device(struct device *dev)
81{
82 return;
83}
84
85static inline int plat_dma_mapping_error(struct device *dev,
86 dma_addr_t dma_addr)
87{
88 return 0;
89}
90
67static inline int plat_device_is_coherent(struct device *dev) 91static inline int plat_device_is_coherent(struct device *dev)
68{ 92{
69 return 0; /* IP32 is non-cohernet */ 93 return 0; /* IP32 is non-cohernet */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index d66979a124a8..1c7cd27efa7b 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -27,11 +27,35 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
27 return vdma_log2phys(dma_addr); 27 return vdma_log2phys(dma_addr);
28} 28}
29 29
30static void plat_unmap_dma_mem(dma_addr_t dma_addr) 30static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
31{ 31{
32 vdma_free(dma_addr); 32 vdma_free(dma_addr);
33} 33}
34 34
35static inline int plat_dma_supported(struct device *dev, u64 mask)
36{
37 /*
38 * we fall back to GFP_DMA when the mask isn't all 1s,
39 * so we can't guarantee allocations that must be
40 * within a tighter range than GFP_DMA..
41 */
42 if (mask < DMA_BIT_MASK(24))
43 return 0;
44
45 return 1;
46}
47
48static inline void plat_extra_sync_for_device(struct device *dev)
49{
50 return;
51}
52
53static inline int plat_dma_mapping_error(struct device *dev,
54 dma_addr_t dma_addr)
55{
56 return 0;
57}
58
35static inline int plat_device_is_coherent(struct device *dev) 59static inline int plat_device_is_coherent(struct device *dev)
36{ 60{
37 return 0; 61 return 0;
diff --git a/arch/mips/include/asm/mach-lemote/dma-coherence.h b/arch/mips/include/asm/mach-lemote/dma-coherence.h
index 7e914777ebc4..38fad7dfe7da 100644
--- a/arch/mips/include/asm/mach-lemote/dma-coherence.h
+++ b/arch/mips/include/asm/mach-lemote/dma-coherence.h
@@ -30,10 +30,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
30 return dma_addr & 0x7fffffff; 30 return dma_addr & 0x7fffffff;
31} 31}
32 32
33static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) 33static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
34{ 34{
35} 35}
36 36
37static inline int plat_dma_supported(struct device *dev, u64 mask)
38{
39 /*
40 * we fall back to GFP_DMA when the mask isn't all 1s,
41 * so we can't guarantee allocations that must be
42 * within a tighter range than GFP_DMA..
43 */
44 if (mask < DMA_BIT_MASK(24))
45 return 0;
46
47 return 1;
48}
49
50static inline void plat_extra_sync_for_device(struct device *dev)
51{
52 return;
53}
54
55static inline int plat_dma_mapping_error(struct device *dev,
56 dma_addr_t dma_addr)
57{
58 return 0;
59}
60
37static inline int plat_device_is_coherent(struct device *dev) 61static inline int plat_device_is_coherent(struct device *dev)
38{ 62{
39 return 0; 63 return 0;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e6708b3ad343..546e6977d4ff 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
111void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, 111void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
112 dma_addr_t dma_handle) 112 dma_addr_t dma_handle)
113{ 113{
114 plat_unmap_dma_mem(dma_handle); 114 plat_unmap_dma_mem(dev, dma_handle);
115 free_pages((unsigned long) vaddr, get_order(size)); 115 free_pages((unsigned long) vaddr, get_order(size));
116} 116}
117 117
@@ -122,7 +122,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
122{ 122{
123 unsigned long addr = (unsigned long) vaddr; 123 unsigned long addr = (unsigned long) vaddr;
124 124
125 plat_unmap_dma_mem(dma_handle); 125 plat_unmap_dma_mem(dev, dma_handle);
126 126
127 if (!plat_device_is_coherent(dev)) 127 if (!plat_device_is_coherent(dev))
128 addr = CAC_ADDR(addr); 128 addr = CAC_ADDR(addr);
@@ -173,7 +173,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
173 __dma_sync(dma_addr_to_virt(dma_addr), size, 173 __dma_sync(dma_addr_to_virt(dma_addr), size,
174 direction); 174 direction);
175 175
176 plat_unmap_dma_mem(dma_addr); 176 plat_unmap_dma_mem(dev, dma_addr);
177} 177}
178 178
179EXPORT_SYMBOL(dma_unmap_single); 179EXPORT_SYMBOL(dma_unmap_single);
@@ -229,7 +229,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
229 dma_cache_wback_inv(addr, size); 229 dma_cache_wback_inv(addr, size);
230 } 230 }
231 231
232 plat_unmap_dma_mem(dma_address); 232 plat_unmap_dma_mem(dev, dma_address);
233} 233}
234 234
235EXPORT_SYMBOL(dma_unmap_page); 235EXPORT_SYMBOL(dma_unmap_page);
@@ -249,7 +249,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
249 if (addr) 249 if (addr)
250 __dma_sync(addr, sg->length, direction); 250 __dma_sync(addr, sg->length, direction);
251 } 251 }
252 plat_unmap_dma_mem(sg->dma_address); 252 plat_unmap_dma_mem(dev, sg->dma_address);
253 } 253 }
254} 254}
255 255
@@ -275,6 +275,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
275{ 275{
276 BUG_ON(direction == DMA_NONE); 276 BUG_ON(direction == DMA_NONE);
277 277
278 plat_extra_sync_for_device(dev);
278 if (!plat_device_is_coherent(dev)) { 279 if (!plat_device_is_coherent(dev)) {
279 unsigned long addr; 280 unsigned long addr;
280 281
@@ -305,6 +306,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
305{ 306{
306 BUG_ON(direction == DMA_NONE); 307 BUG_ON(direction == DMA_NONE);
307 308
309 plat_extra_sync_for_device(dev);
308 if (!plat_device_is_coherent(dev)) { 310 if (!plat_device_is_coherent(dev)) {
309 unsigned long addr; 311 unsigned long addr;
310 312
@@ -351,22 +353,14 @@ EXPORT_SYMBOL(dma_sync_sg_for_device);
351 353
352int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 354int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
353{ 355{
354 return 0; 356 return plat_dma_mapping_error(dev, dma_addr);
355} 357}
356 358
357EXPORT_SYMBOL(dma_mapping_error); 359EXPORT_SYMBOL(dma_mapping_error);
358 360
359int dma_supported(struct device *dev, u64 mask) 361int dma_supported(struct device *dev, u64 mask)
360{ 362{
361 /* 363 return plat_dma_supported(dev, mask);
362 * we fall back to GFP_DMA when the mask isn't all 1s,
363 * so we can't guarantee allocations that must be
364 * within a tighter range than GFP_DMA..
365 */
366 if (mask < DMA_BIT_MASK(24))
367 return 0;
368
369 return 1;
370} 364}
371 365
372EXPORT_SYMBOL(dma_supported); 366EXPORT_SYMBOL(dma_supported);
@@ -383,6 +377,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
383{ 377{
384 BUG_ON(direction == DMA_NONE); 378 BUG_ON(direction == DMA_NONE);
385 379
380 plat_extra_sync_for_device(dev);
386 if (!plat_device_is_coherent(dev)) 381 if (!plat_device_is_coherent(dev))
387 __dma_sync((unsigned long)vaddr, size, direction); 382 __dma_sync((unsigned long)vaddr, size, direction);
388} 383}