aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorBecky Bruce <becky.bruce@freescale.com>2008-11-20 01:49:16 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-03 04:46:36 -0500
commit15e09c0ecaab4a1b4a7ed69db536c38948b92279 (patch)
tree9cf8f2cbfe002e220db4d001d08fbc50bfaf4433 /arch/powerpc/kernel
parentc4d04be11f99cc9ce4e3801a5da235727db704a9 (diff)
powerpc: Add sync_*_for_* to dma_ops
We need to swap these out once we start using swiotlb, so add them to dma_ops. Create CONFIG_PPC_NEED_DMA_SYNC_OPS Kconfig option; this is currently enabled automatically if we're CONFIG_NOT_COHERENT_CACHE. In the future, this will also be enabled for builds that need swiotlb. If PPC_NEED_DMA_SYNC_OPS is not defined, the dma_sync_*_for_* ops compile to nothing. Otherwise, they access the dma_ops pointers for the sync ops. This patch also changes dma_sync_single_range_* to actually sync the range - previously it was using a generous dma_sync_single. dma_sync_single_* is now implemented as a dma_sync_single_range with an offset of 0. Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/dma.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 3a6eaa876ee1..1c5c8a6fc129 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev,
120{ 120{
121} 121}
122 122
123#ifdef CONFIG_NOT_COHERENT_CACHE
124static inline void dma_direct_sync_sg(struct device *dev,
125 struct scatterlist *sgl, int nents,
126 enum dma_data_direction direction)
127{
128 struct scatterlist *sg;
129 int i;
130
131 for_each_sg(sgl, sg, nents, i)
132 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
133}
134
135static inline void dma_direct_sync_single_range(struct device *dev,
136 dma_addr_t dma_handle, unsigned long offset, size_t size,
137 enum dma_data_direction direction)
138{
139 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
140}
141#endif
142
123struct dma_mapping_ops dma_direct_ops = { 143struct dma_mapping_ops dma_direct_ops = {
124 .alloc_coherent = dma_direct_alloc_coherent, 144 .alloc_coherent = dma_direct_alloc_coherent,
125 .free_coherent = dma_direct_free_coherent, 145 .free_coherent = dma_direct_free_coherent,
@@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = {
128 .dma_supported = dma_direct_dma_supported, 148 .dma_supported = dma_direct_dma_supported,
129 .map_page = dma_direct_map_page, 149 .map_page = dma_direct_map_page,
130 .unmap_page = dma_direct_unmap_page, 150 .unmap_page = dma_direct_unmap_page,
151#ifdef CONFIG_NOT_COHERENT_CACHE
152 .sync_single_range_for_cpu = dma_direct_sync_single_range,
153 .sync_single_range_for_device = dma_direct_sync_single_range,
154 .sync_sg_for_cpu = dma_direct_sync_sg,
155 .sync_sg_for_device = dma_direct_sync_sg,
156#endif
131}; 157};
132EXPORT_SYMBOL(dma_direct_ops); 158EXPORT_SYMBOL(dma_direct_ops);