aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2010-03-11 08:15:48 -0500
committerMichal Simek <monstr@monstr.eu>2010-03-11 08:15:48 -0500
commit1be53e084a5bd8f59850348e1066d25aa0200031 (patch)
treebd8c92ae855b4624872e364c1279bf4252dddeba /arch/microblaze/kernel
parent3a0d7a4dd5b3a6545e5764735b48ab84e64af723 (diff)
microblaze: Fix dma alloc and free coherent dma functions
We have to use consistent code to be able to do coherent dma function. In consistent code is used cache inhibit page mapping. Xilinx reported that there is bug in Microblaze for WB and d-cache_always use option. Microblaze 7.30.a should be first version where is this bug removed. Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/dma.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index fbe1e8184eff..b1084974fccd 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -43,9 +43,14 @@ static unsigned long get_dma_direct_offset(struct device *dev)
43 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ 43 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
44} 44}
45 45
46void *dma_direct_alloc_coherent(struct device *dev, size_t size, 46#define NOT_COHERENT_CACHE
47
48static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
47 dma_addr_t *dma_handle, gfp_t flag) 49 dma_addr_t *dma_handle, gfp_t flag)
48{ 50{
51#ifdef NOT_COHERENT_CACHE
52 return consistent_alloc(flag, size, dma_handle);
53#else
49 void *ret; 54 void *ret;
50 struct page *page; 55 struct page *page;
51 int node = dev_to_node(dev); 56 int node = dev_to_node(dev);
@@ -61,12 +66,17 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
61 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); 66 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
62 67
63 return ret; 68 return ret;
69#endif
64} 70}
65 71
66void dma_direct_free_coherent(struct device *dev, size_t size, 72static void dma_direct_free_coherent(struct device *dev, size_t size,
67 void *vaddr, dma_addr_t dma_handle) 73 void *vaddr, dma_addr_t dma_handle)
68{ 74{
75#ifdef NOT_COHERENT_CACHE
76 consistent_free(vaddr);
77#else
69 free_pages((unsigned long)vaddr, get_order(size)); 78 free_pages((unsigned long)vaddr, get_order(size));
79#endif
70} 80}
71 81
72static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 82static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -105,7 +115,6 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
105 enum dma_data_direction direction, 115 enum dma_data_direction direction,
106 struct dma_attrs *attrs) 116 struct dma_attrs *attrs)
107{ 117{
108 BUG_ON(direction == DMA_NONE);
109 __dma_sync_page(page_to_phys(page), offset, size, direction); 118 __dma_sync_page(page_to_phys(page), offset, size, direction);
110 return page_to_phys(page) + offset + get_dma_direct_offset(dev); 119 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
111} 120}
@@ -121,7 +130,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
121 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and 130 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
122 * dma_address is physical address 131 * dma_address is physical address
123 */ 132 */
124 __dma_sync_page((void *)dma_address, 0 , size, direction); 133 __dma_sync_page(dma_address, 0 , size, direction);
125} 134}
126 135
127struct dma_map_ops dma_direct_ops = { 136struct dma_map_ops dma_direct_ops = {