diff options
author | Becky Bruce <becky.bruce@freescale.com> | 2008-11-20 01:49:16 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-03 04:46:36 -0500 |
commit | 15e09c0ecaab4a1b4a7ed69db536c38948b92279 (patch) | |
tree | 9cf8f2cbfe002e220db4d001d08fbc50bfaf4433 /arch/powerpc | |
parent | c4d04be11f99cc9ce4e3801a5da235727db704a9 (diff) |
powerpc: Add sync_*_for_* to dma_ops
We need to swap these out once we start using swiotlb, so add
them to dma_ops. Create CONFIG_PPC_NEED_DMA_SYNC_OPS Kconfig
option; this is currently enabled automatically if we're
CONFIG_NOT_COHERENT_CACHE. In the future, this will also
be enabled for builds that need swiotlb. If PPC_NEED_DMA_SYNC_OPS
is not defined, the dma_sync_*_for_* ops compile to nothing.
Otherwise, they access the dma_ops pointers for the sync ops.
This patch also changes dma_sync_single_range_* to actually
sync the range - previously it was using a generous
dma_sync_single. dma_sync_single_* is now implemented
as a dma_sync_single_range with an offset of 0.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/Kconfig | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 93 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma.c | 26 |
3 files changed, 92 insertions, 31 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 525c13a4de93..be4f99b7cbbb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -285,6 +285,10 @@ config IOMMU_VMERGE | |||
285 | config IOMMU_HELPER | 285 | config IOMMU_HELPER |
286 | def_bool PPC64 | 286 | def_bool PPC64 |
287 | 287 | ||
288 | config PPC_NEED_DMA_SYNC_OPS | ||
289 | def_bool y | ||
290 | depends on NOT_COHERENT_CACHE | ||
291 | |||
288 | config HOTPLUG_CPU | 292 | config HOTPLUG_CPU |
289 | bool "Support for enabling/disabling CPUs" | 293 | bool "Support for enabling/disabling CPUs" |
290 | depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) | 294 | depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 3c4a2c21d606..9063184fa6fe 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -76,6 +76,22 @@ struct dma_mapping_ops { | |||
76 | dma_addr_t dma_address, size_t size, | 76 | dma_addr_t dma_address, size_t size, |
77 | enum dma_data_direction direction, | 77 | enum dma_data_direction direction, |
78 | struct dma_attrs *attrs); | 78 | struct dma_attrs *attrs); |
79 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS | ||
80 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
81 | dma_addr_t dma_handle, unsigned long offset, | ||
82 | size_t size, | ||
83 | enum dma_data_direction direction); | ||
84 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
85 | dma_addr_t dma_handle, unsigned long offset, | ||
86 | size_t size, | ||
87 | enum dma_data_direction direction); | ||
88 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
89 | struct scatterlist *sg, int nelems, | ||
90 | enum dma_data_direction direction); | ||
91 | void (*sync_sg_for_device)(struct device *hwdev, | ||
92 | struct scatterlist *sg, int nelems, | ||
93 | enum dma_data_direction direction); | ||
94 | #endif | ||
79 | }; | 95 | }; |
80 | 96 | ||
81 | /* | 97 | /* |
@@ -282,47 +298,78 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
282 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); | 298 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); |
283 | } | 299 | } |
284 | 300 | ||
301 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS | ||
285 | static inline void dma_sync_single_for_cpu(struct device *dev, | 302 | static inline void dma_sync_single_for_cpu(struct device *dev, |
286 | dma_addr_t dma_handle, size_t size, | 303 | dma_addr_t dma_handle, size_t size, |
287 | enum dma_data_direction direction) | 304 | enum dma_data_direction direction) |
288 | { | 305 | { |
289 | BUG_ON(direction == DMA_NONE); | 306 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
290 | __dma_sync(bus_to_virt(dma_handle), size, direction); | 307 | |
308 | BUG_ON(!dma_ops); | ||
309 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | ||
310 | size, direction); | ||
291 | } | 311 | } |
292 | 312 | ||
293 | static inline void dma_sync_single_for_device(struct device *dev, | 313 | static inline void dma_sync_single_for_device(struct device *dev, |
294 | dma_addr_t dma_handle, size_t size, | 314 | dma_addr_t dma_handle, size_t size, |
295 | enum dma_data_direction direction) | 315 | enum dma_data_direction direction) |
296 | { | 316 | { |
297 | BUG_ON(direction == DMA_NONE); | 317 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
298 | __dma_sync(bus_to_virt(dma_handle), size, direction); | 318 | |
319 | BUG_ON(!dma_ops); | ||
320 | dma_ops->sync_single_range_for_device(dev, dma_handle, | ||
321 | 0, size, direction); | ||
299 | } | 322 | } |
300 | 323 | ||
301 | static inline void dma_sync_sg_for_cpu(struct device *dev, | 324 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
302 | struct scatterlist *sgl, int nents, | 325 | struct scatterlist *sgl, int nents, |
303 | enum dma_data_direction direction) | 326 | enum dma_data_direction direction) |
304 | { | 327 | { |
305 | struct scatterlist *sg; | 328 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
306 | int i; | ||
307 | |||
308 | BUG_ON(direction == DMA_NONE); | ||
309 | 329 | ||
310 | for_each_sg(sgl, sg, nents, i) | 330 | BUG_ON(!dma_ops); |
311 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | 331 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); |
312 | } | 332 | } |
313 | 333 | ||
314 | static inline void dma_sync_sg_for_device(struct device *dev, | 334 | static inline void dma_sync_sg_for_device(struct device *dev, |
315 | struct scatterlist *sgl, int nents, | 335 | struct scatterlist *sgl, int nents, |
316 | enum dma_data_direction direction) | 336 | enum dma_data_direction direction) |
317 | { | 337 | { |
318 | struct scatterlist *sg; | 338 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
319 | int i; | ||
320 | 339 | ||
321 | BUG_ON(direction == DMA_NONE); | 340 | BUG_ON(!dma_ops); |
341 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | ||
342 | } | ||
343 | |||
344 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
345 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
346 | enum dma_data_direction direction) | ||
347 | { | ||
348 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
322 | 349 | ||
323 | for_each_sg(sgl, sg, nents, i) | 350 | BUG_ON(!dma_ops); |
324 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | 351 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, |
352 | offset, size, direction); | ||
353 | } | ||
354 | |||
355 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
356 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
357 | enum dma_data_direction direction) | ||
358 | { | ||
359 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
360 | |||
361 | BUG_ON(!dma_ops); | ||
362 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | ||
363 | size, direction); | ||
325 | } | 364 | } |
365 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ | ||
366 | #define dma_sync_single_for_cpu(d, h, s, dir) ((void)0) | ||
367 | #define dma_sync_single_for_device(d, h, s, dir) ((void)0) | ||
368 | #define dma_sync_single_range_for_cpu(d, h, o, s, dir) ((void)0) | ||
369 | #define dma_sync_single_range_for_device(d, h, o, s, dir) ((void)0) | ||
370 | #define dma_sync_sg_for_cpu(d, s, n, dir) ((void)0) | ||
371 | #define dma_sync_sg_for_device(d, s, n, dir) ((void)0) | ||
372 | #endif | ||
326 | 373 | ||
327 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 374 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
328 | { | 375 | { |
@@ -356,22 +403,6 @@ static inline int dma_get_cache_alignment(void) | |||
356 | #endif | 403 | #endif |
357 | } | 404 | } |
358 | 405 | ||
359 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
360 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
361 | enum dma_data_direction direction) | ||
362 | { | ||
363 | /* just sync everything for now */ | ||
364 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); | ||
365 | } | ||
366 | |||
367 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
368 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
369 | enum dma_data_direction direction) | ||
370 | { | ||
371 | /* just sync everything for now */ | ||
372 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); | ||
373 | } | ||
374 | |||
375 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 406 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
376 | enum dma_data_direction direction) | 407 | enum dma_data_direction direction) |
377 | { | 408 | { |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 3a6eaa876ee1..1c5c8a6fc129 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev, | |||
120 | { | 120 | { |
121 | } | 121 | } |
122 | 122 | ||
123 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
124 | static inline void dma_direct_sync_sg(struct device *dev, | ||
125 | struct scatterlist *sgl, int nents, | ||
126 | enum dma_data_direction direction) | ||
127 | { | ||
128 | struct scatterlist *sg; | ||
129 | int i; | ||
130 | |||
131 | for_each_sg(sgl, sg, nents, i) | ||
132 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | ||
133 | } | ||
134 | |||
135 | static inline void dma_direct_sync_single_range(struct device *dev, | ||
136 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
137 | enum dma_data_direction direction) | ||
138 | { | ||
139 | __dma_sync(bus_to_virt(dma_handle+offset), size, direction); | ||
140 | } | ||
141 | #endif | ||
142 | |||
123 | struct dma_mapping_ops dma_direct_ops = { | 143 | struct dma_mapping_ops dma_direct_ops = { |
124 | .alloc_coherent = dma_direct_alloc_coherent, | 144 | .alloc_coherent = dma_direct_alloc_coherent, |
125 | .free_coherent = dma_direct_free_coherent, | 145 | .free_coherent = dma_direct_free_coherent, |
@@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = { | |||
128 | .dma_supported = dma_direct_dma_supported, | 148 | .dma_supported = dma_direct_dma_supported, |
129 | .map_page = dma_direct_map_page, | 149 | .map_page = dma_direct_map_page, |
130 | .unmap_page = dma_direct_unmap_page, | 150 | .unmap_page = dma_direct_unmap_page, |
151 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
152 | .sync_single_range_for_cpu = dma_direct_sync_single_range, | ||
153 | .sync_single_range_for_device = dma_direct_sync_single_range, | ||
154 | .sync_sg_for_cpu = dma_direct_sync_sg, | ||
155 | .sync_sg_for_device = dma_direct_sync_sg, | ||
156 | #endif | ||
131 | }; | 157 | }; |
132 | EXPORT_SYMBOL(dma_direct_ops); | 158 | EXPORT_SYMBOL(dma_direct_ops); |