aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/dma-mapping.h')
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h156
1 files changed, 95 insertions, 61 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index fddb229bd74f..86cef7ddc8d5 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -60,12 +60,6 @@ struct dma_mapping_ops {
60 dma_addr_t *dma_handle, gfp_t flag); 60 dma_addr_t *dma_handle, gfp_t flag);
61 void (*free_coherent)(struct device *dev, size_t size, 61 void (*free_coherent)(struct device *dev, size_t size,
62 void *vaddr, dma_addr_t dma_handle); 62 void *vaddr, dma_addr_t dma_handle);
63 dma_addr_t (*map_single)(struct device *dev, void *ptr,
64 size_t size, enum dma_data_direction direction,
65 struct dma_attrs *attrs);
66 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
67 size_t size, enum dma_data_direction direction,
68 struct dma_attrs *attrs);
69 int (*map_sg)(struct device *dev, struct scatterlist *sg, 63 int (*map_sg)(struct device *dev, struct scatterlist *sg,
70 int nents, enum dma_data_direction direction, 64 int nents, enum dma_data_direction direction,
71 struct dma_attrs *attrs); 65 struct dma_attrs *attrs);
@@ -82,6 +76,22 @@ struct dma_mapping_ops {
82 dma_addr_t dma_address, size_t size, 76 dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction, 77 enum dma_data_direction direction,
84 struct dma_attrs *attrs); 78 struct dma_attrs *attrs);
79#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
80 void (*sync_single_range_for_cpu)(struct device *hwdev,
81 dma_addr_t dma_handle, unsigned long offset,
82 size_t size,
83 enum dma_data_direction direction);
84 void (*sync_single_range_for_device)(struct device *hwdev,
85 dma_addr_t dma_handle, unsigned long offset,
86 size_t size,
87 enum dma_data_direction direction);
88 void (*sync_sg_for_cpu)(struct device *hwdev,
89 struct scatterlist *sg, int nelems,
90 enum dma_data_direction direction);
91 void (*sync_sg_for_device)(struct device *hwdev,
92 struct scatterlist *sg, int nelems,
93 enum dma_data_direction direction);
94#endif
85}; 95};
86 96
87/* 97/*
@@ -149,10 +159,9 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
149} 159}
150 160
151/* 161/*
152 * TODO: map_/unmap_single will ideally go away, to be completely 162 * map_/unmap_single actually call through to map/unmap_page now that all the
153 * replaced by map/unmap_page. Until then, we allow dma_ops to have 163 * dma_mapping_ops have been converted over. We just have to get the page and
154 * one or the other, or both by checking to see if the specific 164 * offset to pass through to map_page
155 * function requested exists; and if not, falling back on the other set.
156 */ 165 */
157static inline dma_addr_t dma_map_single_attrs(struct device *dev, 166static inline dma_addr_t dma_map_single_attrs(struct device *dev,
158 void *cpu_addr, 167 void *cpu_addr,
@@ -164,10 +173,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
164 173
165 BUG_ON(!dma_ops); 174 BUG_ON(!dma_ops);
166 175
167 if (dma_ops->map_single)
168 return dma_ops->map_single(dev, cpu_addr, size, direction,
169 attrs);
170
171 return dma_ops->map_page(dev, virt_to_page(cpu_addr), 176 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
172 (unsigned long)cpu_addr % PAGE_SIZE, size, 177 (unsigned long)cpu_addr % PAGE_SIZE, size,
173 direction, attrs); 178 direction, attrs);
@@ -183,11 +188,6 @@ static inline void dma_unmap_single_attrs(struct device *dev,
183 188
184 BUG_ON(!dma_ops); 189 BUG_ON(!dma_ops);
185 190
186 if (dma_ops->unmap_single) {
187 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
188 return;
189 }
190
191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); 191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
192} 192}
193 193
@@ -201,12 +201,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
201 201
202 BUG_ON(!dma_ops); 202 BUG_ON(!dma_ops);
203 203
204 if (dma_ops->map_page) 204 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
205 return dma_ops->map_page(dev, page, offset, size, direction,
206 attrs);
207
208 return dma_ops->map_single(dev, page_address(page) + offset, size,
209 direction, attrs);
210} 205}
211 206
212static inline void dma_unmap_page_attrs(struct device *dev, 207static inline void dma_unmap_page_attrs(struct device *dev,
@@ -219,12 +214,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
219 214
220 BUG_ON(!dma_ops); 215 BUG_ON(!dma_ops);
221 216
222 if (dma_ops->unmap_page) { 217 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
223 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
224 return;
225 }
226
227 dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
228} 218}
229 219
230static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 220static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
@@ -308,47 +298,107 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
308 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); 298 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
309} 299}
310 300
301#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
311static inline void dma_sync_single_for_cpu(struct device *dev, 302static inline void dma_sync_single_for_cpu(struct device *dev,
312 dma_addr_t dma_handle, size_t size, 303 dma_addr_t dma_handle, size_t size,
313 enum dma_data_direction direction) 304 enum dma_data_direction direction)
314{ 305{
315 BUG_ON(direction == DMA_NONE); 306 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
316 __dma_sync(bus_to_virt(dma_handle), size, direction); 307
308 BUG_ON(!dma_ops);
309 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
310 size, direction);
317} 311}
318 312
319static inline void dma_sync_single_for_device(struct device *dev, 313static inline void dma_sync_single_for_device(struct device *dev,
320 dma_addr_t dma_handle, size_t size, 314 dma_addr_t dma_handle, size_t size,
321 enum dma_data_direction direction) 315 enum dma_data_direction direction)
322{ 316{
323 BUG_ON(direction == DMA_NONE); 317 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
324 __dma_sync(bus_to_virt(dma_handle), size, direction); 318
319 BUG_ON(!dma_ops);
320 dma_ops->sync_single_range_for_device(dev, dma_handle,
321 0, size, direction);
325} 322}
326 323
327static inline void dma_sync_sg_for_cpu(struct device *dev, 324static inline void dma_sync_sg_for_cpu(struct device *dev,
328 struct scatterlist *sgl, int nents, 325 struct scatterlist *sgl, int nents,
329 enum dma_data_direction direction) 326 enum dma_data_direction direction)
330{ 327{
331 struct scatterlist *sg; 328 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
332 int i;
333 329
334 BUG_ON(direction == DMA_NONE); 330 BUG_ON(!dma_ops);
331 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
332}
333
334static inline void dma_sync_sg_for_device(struct device *dev,
335 struct scatterlist *sgl, int nents,
336 enum dma_data_direction direction)
337{
338 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
339
340 BUG_ON(!dma_ops);
341 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
342}
343
344static inline void dma_sync_single_range_for_cpu(struct device *dev,
345 dma_addr_t dma_handle, unsigned long offset, size_t size,
346 enum dma_data_direction direction)
347{
348 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
335 349
336 for_each_sg(sgl, sg, nents, i) 350 BUG_ON(!dma_ops);
337 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 351 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
352 offset, size, direction);
353}
354
355static inline void dma_sync_single_range_for_device(struct device *dev,
356 dma_addr_t dma_handle, unsigned long offset, size_t size,
357 enum dma_data_direction direction)
358{
359 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
360
361 BUG_ON(!dma_ops);
362 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
363 size, direction);
364}
365#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
366static inline void dma_sync_single_for_cpu(struct device *dev,
367 dma_addr_t dma_handle, size_t size,
368 enum dma_data_direction direction)
369{
370}
371
372static inline void dma_sync_single_for_device(struct device *dev,
373 dma_addr_t dma_handle, size_t size,
374 enum dma_data_direction direction)
375{
376}
377
378static inline void dma_sync_sg_for_cpu(struct device *dev,
379 struct scatterlist *sgl, int nents,
380 enum dma_data_direction direction)
381{
338} 382}
339 383
340static inline void dma_sync_sg_for_device(struct device *dev, 384static inline void dma_sync_sg_for_device(struct device *dev,
341 struct scatterlist *sgl, int nents, 385 struct scatterlist *sgl, int nents,
342 enum dma_data_direction direction) 386 enum dma_data_direction direction)
343{ 387{
344 struct scatterlist *sg; 388}
345 int i;
346 389
347 BUG_ON(direction == DMA_NONE); 390static inline void dma_sync_single_range_for_cpu(struct device *dev,
391 dma_addr_t dma_handle, unsigned long offset, size_t size,
392 enum dma_data_direction direction)
393{
394}
348 395
349 for_each_sg(sgl, sg, nents, i) 396static inline void dma_sync_single_range_for_device(struct device *dev,
350 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 397 dma_addr_t dma_handle, unsigned long offset, size_t size,
398 enum dma_data_direction direction)
399{
351} 400}
401#endif
352 402
353static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 403static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354{ 404{
@@ -382,22 +432,6 @@ static inline int dma_get_cache_alignment(void)
382#endif 432#endif
383} 433}
384 434
385static inline void dma_sync_single_range_for_cpu(struct device *dev,
386 dma_addr_t dma_handle, unsigned long offset, size_t size,
387 enum dma_data_direction direction)
388{
389 /* just sync everything for now */
390 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
391}
392
393static inline void dma_sync_single_range_for_device(struct device *dev,
394 dma_addr_t dma_handle, unsigned long offset, size_t size,
395 enum dma_data_direction direction)
396{
397 /* just sync everything for now */
398 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
399}
400
401static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 435static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
402 enum dma_data_direction direction) 436 enum dma_data_direction direction)
403{ 437{