diff options
author | David Daney <ddaney@caviumnetworks.com> | 2008-12-11 18:33:36 -0500 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2009-01-11 04:57:24 -0500 |
commit | 843aef4930b9953c9ca624a990b201440304b56f (patch) | |
tree | 9debbaa7d9caa8c73db65ea2674e7ed26e285893 /arch/mips/mm | |
parent | ec454d8c4fee3b2feb87e594d806c0987c5dd538 (diff) |
MIPS: Adjust the dma-common.c platform hooks.
We add a dev parameter to plat_unmap_dma_mem(), and hooks for
plat_dma_supported() and plat_extra_sync_for_device() which should be
nop changes for all existing targets.
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/dma-default.c | 25 |
1 files changed, 10 insertions, 15 deletions
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index e6708b3ad343..546e6977d4ff 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent); | |||
111 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | 111 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
112 | dma_addr_t dma_handle) | 112 | dma_addr_t dma_handle) |
113 | { | 113 | { |
114 | plat_unmap_dma_mem(dma_handle); | 114 | plat_unmap_dma_mem(dev, dma_handle); |
115 | free_pages((unsigned long) vaddr, get_order(size)); | 115 | free_pages((unsigned long) vaddr, get_order(size)); |
116 | } | 116 | } |
117 | 117 | ||
@@ -122,7 +122,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
122 | { | 122 | { |
123 | unsigned long addr = (unsigned long) vaddr; | 123 | unsigned long addr = (unsigned long) vaddr; |
124 | 124 | ||
125 | plat_unmap_dma_mem(dma_handle); | 125 | plat_unmap_dma_mem(dev, dma_handle); |
126 | 126 | ||
127 | if (!plat_device_is_coherent(dev)) | 127 | if (!plat_device_is_coherent(dev)) |
128 | addr = CAC_ADDR(addr); | 128 | addr = CAC_ADDR(addr); |
@@ -173,7 +173,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
173 | __dma_sync(dma_addr_to_virt(dma_addr), size, | 173 | __dma_sync(dma_addr_to_virt(dma_addr), size, |
174 | direction); | 174 | direction); |
175 | 175 | ||
176 | plat_unmap_dma_mem(dma_addr); | 176 | plat_unmap_dma_mem(dev, dma_addr); |
177 | } | 177 | } |
178 | 178 | ||
179 | EXPORT_SYMBOL(dma_unmap_single); | 179 | EXPORT_SYMBOL(dma_unmap_single); |
@@ -229,7 +229,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |||
229 | dma_cache_wback_inv(addr, size); | 229 | dma_cache_wback_inv(addr, size); |
230 | } | 230 | } |
231 | 231 | ||
232 | plat_unmap_dma_mem(dma_address); | 232 | plat_unmap_dma_mem(dev, dma_address); |
233 | } | 233 | } |
234 | 234 | ||
235 | EXPORT_SYMBOL(dma_unmap_page); | 235 | EXPORT_SYMBOL(dma_unmap_page); |
@@ -249,7 +249,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
249 | if (addr) | 249 | if (addr) |
250 | __dma_sync(addr, sg->length, direction); | 250 | __dma_sync(addr, sg->length, direction); |
251 | } | 251 | } |
252 | plat_unmap_dma_mem(sg->dma_address); | 252 | plat_unmap_dma_mem(dev, sg->dma_address); |
253 | } | 253 | } |
254 | } | 254 | } |
255 | 255 | ||
@@ -275,6 +275,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |||
275 | { | 275 | { |
276 | BUG_ON(direction == DMA_NONE); | 276 | BUG_ON(direction == DMA_NONE); |
277 | 277 | ||
278 | plat_extra_sync_for_device(dev); | ||
278 | if (!plat_device_is_coherent(dev)) { | 279 | if (!plat_device_is_coherent(dev)) { |
279 | unsigned long addr; | 280 | unsigned long addr; |
280 | 281 | ||
@@ -305,6 +306,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |||
305 | { | 306 | { |
306 | BUG_ON(direction == DMA_NONE); | 307 | BUG_ON(direction == DMA_NONE); |
307 | 308 | ||
309 | plat_extra_sync_for_device(dev); | ||
308 | if (!plat_device_is_coherent(dev)) { | 310 | if (!plat_device_is_coherent(dev)) { |
309 | unsigned long addr; | 311 | unsigned long addr; |
310 | 312 | ||
@@ -351,22 +353,14 @@ EXPORT_SYMBOL(dma_sync_sg_for_device); | |||
351 | 353 | ||
352 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 354 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
353 | { | 355 | { |
354 | return 0; | 356 | return plat_dma_mapping_error(dev, dma_addr); |
355 | } | 357 | } |
356 | 358 | ||
357 | EXPORT_SYMBOL(dma_mapping_error); | 359 | EXPORT_SYMBOL(dma_mapping_error); |
358 | 360 | ||
359 | int dma_supported(struct device *dev, u64 mask) | 361 | int dma_supported(struct device *dev, u64 mask) |
360 | { | 362 | { |
361 | /* | 363 | return plat_dma_supported(dev, mask); |
362 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
363 | * so we can't guarantee allocations that must be | ||
364 | * within a tighter range than GFP_DMA.. | ||
365 | */ | ||
366 | if (mask < DMA_BIT_MASK(24)) | ||
367 | return 0; | ||
368 | |||
369 | return 1; | ||
370 | } | 364 | } |
371 | 365 | ||
372 | EXPORT_SYMBOL(dma_supported); | 366 | EXPORT_SYMBOL(dma_supported); |
@@ -383,6 +377,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
383 | { | 377 | { |
384 | BUG_ON(direction == DMA_NONE); | 378 | BUG_ON(direction == DMA_NONE); |
385 | 379 | ||
380 | plat_extra_sync_for_device(dev); | ||
386 | if (!plat_device_is_coherent(dev)) | 381 | if (!plat_device_is_coherent(dev)) |
387 | __dma_sync((unsigned long)vaddr, size, direction); | 382 | __dma_sync((unsigned long)vaddr, size, direction); |
388 | } | 383 | } |