aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEli Billauer <eli.billauer@gmail.com>2011-09-11 15:43:07 -0400
committerMichal Simek <monstr@monstr.eu>2011-10-14 06:24:30 -0400
commit0fb2a6f283f25731217841f961cefa0a19bd449d (patch)
treeed3712d2ebc0595a13dad5889eb9de8775aa9916
parentcf560c1801e518abfe0951008c4f2df4bbb3f5e8 (diff)
microblaze: Added DMA sync operations
Added support gor dma_direct_sync_single_for_*() and dma_direct_sync_sg_for_*() Signed-off-by: Eli Billauer <eli.billauer@gmail.com> Signed-off-by: Michal Simek <monstr@monstr.eu>
-rw-r--r--arch/microblaze/kernel/dma.c60
1 files changed, 60 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 595130bceadd..dc6416d265d6 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -118,6 +118,62 @@ static inline void dma_direct_unmap_page(struct device *dev,
118 __dma_sync(dma_address, size, direction); 118 __dma_sync(dma_address, size, direction);
119} 119}
120 120
121static inline void
122dma_direct_sync_single_for_cpu(struct device *dev,
123 dma_addr_t dma_handle, size_t size,
124 enum dma_data_direction direction)
125{
126 /*
127 * It's pointless to flush the cache as the memory segment
128 * is given to the CPU
129 */
130
131 if (direction == DMA_FROM_DEVICE)
132 __dma_sync(dma_handle, size, direction);
133}
134
135static inline void
136dma_direct_sync_single_for_device(struct device *dev,
137 dma_addr_t dma_handle, size_t size,
138 enum dma_data_direction direction)
139{
140 /*
141 * It's pointless to invalidate the cache if the device isn't
142 * supposed to write to the relevant region
143 */
144
145 if (direction == DMA_TO_DEVICE)
146 __dma_sync(dma_handle, size, direction);
147}
148
149static inline void
150dma_direct_sync_sg_for_cpu(struct device *dev,
151 struct scatterlist *sgl, int nents,
152 enum dma_data_direction direction)
153{
154 struct scatterlist *sg;
155 int i;
156
157 /* FIXME this part of code is untested */
158 if (direction == DMA_FROM_DEVICE)
159 for_each_sg(sgl, sg, nents, i)
160 __dma_sync(sg->dma_address, sg->length, direction);
161}
162
163static inline void
164dma_direct_sync_sg_for_device(struct device *dev,
165 struct scatterlist *sgl, int nents,
166 enum dma_data_direction direction)
167{
168 struct scatterlist *sg;
169 int i;
170
171 /* FIXME this part of code is untested */
172 if (direction == DMA_TO_DEVICE)
173 for_each_sg(sgl, sg, nents, i)
174 __dma_sync(sg->dma_address, sg->length, direction);
175}
176
121struct dma_map_ops dma_direct_ops = { 177struct dma_map_ops dma_direct_ops = {
122 .alloc_coherent = dma_direct_alloc_coherent, 178 .alloc_coherent = dma_direct_alloc_coherent,
123 .free_coherent = dma_direct_free_coherent, 179 .free_coherent = dma_direct_free_coherent,
@@ -126,6 +182,10 @@ struct dma_map_ops dma_direct_ops = {
126 .dma_supported = dma_direct_dma_supported, 182 .dma_supported = dma_direct_dma_supported,
127 .map_page = dma_direct_map_page, 183 .map_page = dma_direct_map_page,
128 .unmap_page = dma_direct_unmap_page, 184 .unmap_page = dma_direct_unmap_page,
185 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
186 .sync_single_for_device = dma_direct_sync_single_for_device,
187 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
188 .sync_sg_for_device = dma_direct_sync_sg_for_device,
129}; 189};
130EXPORT_SYMBOL(dma_direct_ops); 190EXPORT_SYMBOL(dma_direct_ops);
131 191