diff options
Diffstat (limited to 'arch/microblaze/kernel/dma.c')
-rw-r--r-- | arch/microblaze/kernel/dma.c | 82 |
1 files changed, 63 insertions, 19 deletions
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index b159b8a847d6..65a4af4cbbbe 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/dma-debug.h> | 12 | #include <linux/dma-debug.h> |
13 | #include <linux/export.h> | 13 | #include <linux/export.h> |
14 | #include <asm/bug.h> | 14 | #include <asm/bug.h> |
15 | #include <asm/cacheflush.h> | ||
16 | 15 | ||
17 | /* | 16 | /* |
18 | * Generic direct DMA implementation | 17 | * Generic direct DMA implementation |
@@ -22,21 +21,6 @@ | |||
22 | * can set archdata.dma_data to an unsigned long holding the offset. By | 21 | * can set archdata.dma_data to an unsigned long holding the offset. By |
23 | * default the offset is PCI_DRAM_OFFSET. | 22 | * default the offset is PCI_DRAM_OFFSET. |
24 | */ | 23 | */ |
25 | static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, | ||
26 | size_t size, enum dma_data_direction direction) | ||
27 | { | ||
28 | switch (direction) { | ||
29 | case DMA_TO_DEVICE: | ||
30 | case DMA_BIDIRECTIONAL: | ||
31 | flush_dcache_range(paddr + offset, paddr + offset + size); | ||
32 | break; | ||
33 | case DMA_FROM_DEVICE: | ||
34 | invalidate_dcache_range(paddr + offset, paddr + offset + size); | ||
35 | break; | ||
36 | default: | ||
37 | BUG(); | ||
38 | } | ||
39 | } | ||
40 | 24 | ||
41 | static unsigned long get_dma_direct_offset(struct device *dev) | 25 | static unsigned long get_dma_direct_offset(struct device *dev) |
42 | { | 26 | { |
@@ -92,7 +76,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |||
92 | /* FIXME this part of code is untested */ | 76 | /* FIXME this part of code is untested */ |
93 | for_each_sg(sgl, sg, nents, i) { | 77 | for_each_sg(sgl, sg, nents, i) { |
94 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | 78 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); |
95 | __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, | 79 | __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, |
96 | sg->length, direction); | 80 | sg->length, direction); |
97 | } | 81 | } |
98 | 82 | ||
@@ -117,7 +101,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, | |||
117 | enum dma_data_direction direction, | 101 | enum dma_data_direction direction, |
118 | struct dma_attrs *attrs) | 102 | struct dma_attrs *attrs) |
119 | { | 103 | { |
120 | __dma_sync_page(page_to_phys(page), offset, size, direction); | 104 | __dma_sync(page_to_phys(page) + offset, size, direction); |
121 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); | 105 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); |
122 | } | 106 | } |
123 | 107 | ||
@@ -132,7 +116,63 @@ static inline void dma_direct_unmap_page(struct device *dev, | |||
132 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and | 116 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and |
133 | * dma_address is physical address | 117 | * dma_address is physical address |
134 | */ | 118 | */ |
135 | __dma_sync_page(dma_address, 0 , size, direction); | 119 | __dma_sync(dma_address, size, direction); |
120 | } | ||
121 | |||
122 | static inline void | ||
123 | dma_direct_sync_single_for_cpu(struct device *dev, | ||
124 | dma_addr_t dma_handle, size_t size, | ||
125 | enum dma_data_direction direction) | ||
126 | { | ||
127 | /* | ||
128 | * It's pointless to flush the cache as the memory segment | ||
129 | * is given to the CPU | ||
130 | */ | ||
131 | |||
132 | if (direction == DMA_FROM_DEVICE) | ||
133 | __dma_sync(dma_handle, size, direction); | ||
134 | } | ||
135 | |||
136 | static inline void | ||
137 | dma_direct_sync_single_for_device(struct device *dev, | ||
138 | dma_addr_t dma_handle, size_t size, | ||
139 | enum dma_data_direction direction) | ||
140 | { | ||
141 | /* | ||
142 | * It's pointless to invalidate the cache if the device isn't | ||
143 | * supposed to write to the relevant region | ||
144 | */ | ||
145 | |||
146 | if (direction == DMA_TO_DEVICE) | ||
147 | __dma_sync(dma_handle, size, direction); | ||
148 | } | ||
149 | |||
150 | static inline void | ||
151 | dma_direct_sync_sg_for_cpu(struct device *dev, | ||
152 | struct scatterlist *sgl, int nents, | ||
153 | enum dma_data_direction direction) | ||
154 | { | ||
155 | struct scatterlist *sg; | ||
156 | int i; | ||
157 | |||
158 | /* FIXME this part of code is untested */ | ||
159 | if (direction == DMA_FROM_DEVICE) | ||
160 | for_each_sg(sgl, sg, nents, i) | ||
161 | __dma_sync(sg->dma_address, sg->length, direction); | ||
162 | } | ||
163 | |||
164 | static inline void | ||
165 | dma_direct_sync_sg_for_device(struct device *dev, | ||
166 | struct scatterlist *sgl, int nents, | ||
167 | enum dma_data_direction direction) | ||
168 | { | ||
169 | struct scatterlist *sg; | ||
170 | int i; | ||
171 | |||
172 | /* FIXME this part of code is untested */ | ||
173 | if (direction == DMA_TO_DEVICE) | ||
174 | for_each_sg(sgl, sg, nents, i) | ||
175 | __dma_sync(sg->dma_address, sg->length, direction); | ||
136 | } | 176 | } |
137 | 177 | ||
138 | struct dma_map_ops dma_direct_ops = { | 178 | struct dma_map_ops dma_direct_ops = { |
@@ -143,6 +183,10 @@ struct dma_map_ops dma_direct_ops = { | |||
143 | .dma_supported = dma_direct_dma_supported, | 183 | .dma_supported = dma_direct_dma_supported, |
144 | .map_page = dma_direct_map_page, | 184 | .map_page = dma_direct_map_page, |
145 | .unmap_page = dma_direct_unmap_page, | 185 | .unmap_page = dma_direct_unmap_page, |
186 | .sync_single_for_cpu = dma_direct_sync_single_for_cpu, | ||
187 | .sync_single_for_device = dma_direct_sync_single_for_device, | ||
188 | .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, | ||
189 | .sync_sg_for_device = dma_direct_sync_sg_for_device, | ||
146 | }; | 190 | }; |
147 | EXPORT_SYMBOL(dma_direct_ops); | 191 | EXPORT_SYMBOL(dma_direct_ops); |
148 | 192 | ||