aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorBecky Bruce <becky.bruce@freescale.com>2008-11-20 01:49:16 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-03 04:46:36 -0500
commit15e09c0ecaab4a1b4a7ed69db536c38948b92279 (patch)
tree9cf8f2cbfe002e220db4d001d08fbc50bfaf4433 /arch/powerpc/include
parentc4d04be11f99cc9ce4e3801a5da235727db704a9 (diff)
powerpc: Add sync_*_for_* to dma_ops
We need to swap these out once we start using swiotlb, so add them to dma_ops. Create CONFIG_PPC_NEED_DMA_SYNC_OPS Kconfig option; this is currently enabled automatically if we're CONFIG_NOT_COHERENT_CACHE. In the future, this will also be enabled for builds that need swiotlb. If PPC_NEED_DMA_SYNC_OPS is not defined, the dma_sync_*_for_* ops compile to nothing. Otherwise, they access the dma_ops pointers for the sync ops. This patch also changes dma_sync_single_range_* to actually sync the range - previously it was using a generous dma_sync_single. dma_sync_single_* is now implemented as a dma_sync_single_range with an offset of 0. Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h93
1 files changed, 62 insertions, 31 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 3c4a2c21d606..9063184fa6fe 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -76,6 +76,22 @@ struct dma_mapping_ops {
76 dma_addr_t dma_address, size_t size, 76 dma_addr_t dma_address, size_t size,
77 enum dma_data_direction direction, 77 enum dma_data_direction direction,
78 struct dma_attrs *attrs); 78 struct dma_attrs *attrs);
79#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
80 void (*sync_single_range_for_cpu)(struct device *hwdev,
81 dma_addr_t dma_handle, unsigned long offset,
82 size_t size,
83 enum dma_data_direction direction);
84 void (*sync_single_range_for_device)(struct device *hwdev,
85 dma_addr_t dma_handle, unsigned long offset,
86 size_t size,
87 enum dma_data_direction direction);
88 void (*sync_sg_for_cpu)(struct device *hwdev,
89 struct scatterlist *sg, int nelems,
90 enum dma_data_direction direction);
91 void (*sync_sg_for_device)(struct device *hwdev,
92 struct scatterlist *sg, int nelems,
93 enum dma_data_direction direction);
94#endif
79}; 95};
80 96
81/* 97/*
@@ -282,47 +298,78 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
282 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); 298 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
283} 299}
284 300
301#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
285static inline void dma_sync_single_for_cpu(struct device *dev, 302static inline void dma_sync_single_for_cpu(struct device *dev,
286 dma_addr_t dma_handle, size_t size, 303 dma_addr_t dma_handle, size_t size,
287 enum dma_data_direction direction) 304 enum dma_data_direction direction)
288{ 305{
289 BUG_ON(direction == DMA_NONE); 306 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
290 __dma_sync(bus_to_virt(dma_handle), size, direction); 307
308 BUG_ON(!dma_ops);
309 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
310 size, direction);
291} 311}
292 312
293static inline void dma_sync_single_for_device(struct device *dev, 313static inline void dma_sync_single_for_device(struct device *dev,
294 dma_addr_t dma_handle, size_t size, 314 dma_addr_t dma_handle, size_t size,
295 enum dma_data_direction direction) 315 enum dma_data_direction direction)
296{ 316{
297 BUG_ON(direction == DMA_NONE); 317 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
298 __dma_sync(bus_to_virt(dma_handle), size, direction); 318
319 BUG_ON(!dma_ops);
320 dma_ops->sync_single_range_for_device(dev, dma_handle,
321 0, size, direction);
299} 322}
300 323
301static inline void dma_sync_sg_for_cpu(struct device *dev, 324static inline void dma_sync_sg_for_cpu(struct device *dev,
302 struct scatterlist *sgl, int nents, 325 struct scatterlist *sgl, int nents,
303 enum dma_data_direction direction) 326 enum dma_data_direction direction)
304{ 327{
305 struct scatterlist *sg; 328 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
306 int i;
307
308 BUG_ON(direction == DMA_NONE);
309 329
310 for_each_sg(sgl, sg, nents, i) 330 BUG_ON(!dma_ops);
311 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 331 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
312} 332}
313 333
314static inline void dma_sync_sg_for_device(struct device *dev, 334static inline void dma_sync_sg_for_device(struct device *dev,
315 struct scatterlist *sgl, int nents, 335 struct scatterlist *sgl, int nents,
316 enum dma_data_direction direction) 336 enum dma_data_direction direction)
317{ 337{
318 struct scatterlist *sg; 338 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
319 int i;
320 339
321 BUG_ON(direction == DMA_NONE); 340 BUG_ON(!dma_ops);
341 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
342}
343
344static inline void dma_sync_single_range_for_cpu(struct device *dev,
345 dma_addr_t dma_handle, unsigned long offset, size_t size,
346 enum dma_data_direction direction)
347{
348 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
322 349
323 for_each_sg(sgl, sg, nents, i) 350 BUG_ON(!dma_ops);
324 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 351 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
352 offset, size, direction);
353}
354
355static inline void dma_sync_single_range_for_device(struct device *dev,
356 dma_addr_t dma_handle, unsigned long offset, size_t size,
357 enum dma_data_direction direction)
358{
359 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
360
361 BUG_ON(!dma_ops);
362 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
363 size, direction);
325} 364}
365#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
366#define dma_sync_single_for_cpu(d, h, s, dir) ((void)0)
367#define dma_sync_single_for_device(d, h, s, dir) ((void)0)
368#define dma_sync_single_range_for_cpu(d, h, o, s, dir) ((void)0)
369#define dma_sync_single_range_for_device(d, h, o, s, dir) ((void)0)
370#define dma_sync_sg_for_cpu(d, s, n, dir) ((void)0)
371#define dma_sync_sg_for_device(d, s, n, dir) ((void)0)
372#endif
326 373
327static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 374static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
328{ 375{
@@ -356,22 +403,6 @@ static inline int dma_get_cache_alignment(void)
356#endif 403#endif
357} 404}
358 405
359static inline void dma_sync_single_range_for_cpu(struct device *dev,
360 dma_addr_t dma_handle, unsigned long offset, size_t size,
361 enum dma_data_direction direction)
362{
363 /* just sync everything for now */
364 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
365}
366
367static inline void dma_sync_single_range_for_device(struct device *dev,
368 dma_addr_t dma_handle, unsigned long offset, size_t size,
369 enum dma_data_direction direction)
370{
371 /* just sync everything for now */
372 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
373}
374
375static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 406static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
376 enum dma_data_direction direction) 407 enum dma_data_direction direction)
377{ 408{