diff options
author | Magnus Damm <magnus.damm@gmail.com> | 2008-01-24 22:42:48 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-02-14 00:22:07 -0500 |
commit | 2a3eeba88f935b200245d1536b99cd4b7eec1d4a (patch) | |
tree | ac201b3def115a6c695b317a968743a28bf86681 /arch/sh/mm | |
parent | e760e716d47b48caf98da348368fd41b4a9b9e7e (diff) |
sh: declared coherent memory support V2 fix
This patch fixes the recently introduced declared coherent memory support.
Without this fix a cached memory area is returned by dma_alloc_coherent() -
unless dma_declare_coherent_memory() has setup a separate area.
This patch makes sure an uncached memory area is returned. With this patch
it is now possible to ping through an rtl8139 interface on r2d-plus.
Signed-off-by: Magnus Damm <damm@igel.co.jp>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/consistent.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 7b2131c9eeda..d3c33fc5b1c2 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -26,7 +26,7 @@ struct dma_coherent_mem { | |||
26 | void *dma_alloc_coherent(struct device *dev, size_t size, | 26 | void *dma_alloc_coherent(struct device *dev, size_t size, |
27 | dma_addr_t *dma_handle, gfp_t gfp) | 27 | dma_addr_t *dma_handle, gfp_t gfp) |
28 | { | 28 | { |
29 | void *ret; | 29 | void *ret, *ret_nocache; |
30 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 30 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
31 | int order = get_order(size); | 31 | int order = get_order(size); |
32 | 32 | ||
@@ -44,17 +44,24 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
44 | } | 44 | } |
45 | 45 | ||
46 | ret = (void *)__get_free_pages(gfp, order); | 46 | ret = (void *)__get_free_pages(gfp, order); |
47 | 47 | if (!ret) | |
48 | if (ret != NULL) { | 48 | return NULL; |
49 | memset(ret, 0, size); | 49 | |
50 | /* | 50 | memset(ret, 0, size); |
51 | * Pages from the page allocator may have data present in | 51 | /* |
52 | * cache. So flush the cache before using uncached memory. | 52 | * Pages from the page allocator may have data present in |
53 | */ | 53 | * cache. So flush the cache before using uncached memory. |
54 | dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL); | 54 | */ |
55 | *dma_handle = virt_to_phys(ret); | 55 | dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); |
56 | |||
57 | ret_nocache = ioremap_nocache(virt_to_phys(ret), size); | ||
58 | if (!ret_nocache) { | ||
59 | free_pages((unsigned long)ret, order); | ||
60 | return NULL; | ||
56 | } | 61 | } |
57 | return ret; | 62 | |
63 | *dma_handle = virt_to_phys(ret); | ||
64 | return ret_nocache; | ||
58 | } | 65 | } |
59 | EXPORT_SYMBOL(dma_alloc_coherent); | 66 | EXPORT_SYMBOL(dma_alloc_coherent); |
60 | 67 | ||
@@ -71,7 +78,8 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
71 | } else { | 78 | } else { |
72 | WARN_ON(irqs_disabled()); /* for portability */ | 79 | WARN_ON(irqs_disabled()); /* for portability */ |
73 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); | 80 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); |
74 | free_pages((unsigned long)vaddr, order); | 81 | free_pages((unsigned long)phys_to_virt(dma_handle), order); |
82 | iounmap(vaddr); | ||
75 | } | 83 | } |
76 | } | 84 | } |
77 | EXPORT_SYMBOL(dma_free_coherent); | 85 | EXPORT_SYMBOL(dma_free_coherent); |