diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-08-04 15:08:26 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-08-28 00:24:10 -0400 |
commit | 46bab4e4b45ec522ecd5fa4a0e2b4a6e6d1f153a (patch) | |
tree | e73698c33f76704a8be1bda4e28bfc1de0af9fd5 /arch | |
parent | 45223c549273bbb2c6e1bc6e3629174e8765ad01 (diff) |
powerpc: Use asm-generic/dma-mapping-common.h
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Becky Bruce <beckyb@kernel.crashing.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/Kconfig | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 242 |
2 files changed, 7 insertions, 241 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4c0747e8ed74..6078253c6d76 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -120,7 +120,7 @@ config PPC | |||
120 | select HAVE_KRETPROBES | 120 | select HAVE_KRETPROBES |
121 | select HAVE_ARCH_TRACEHOOK | 121 | select HAVE_ARCH_TRACEHOOK |
122 | select HAVE_LMB | 122 | select HAVE_LMB |
123 | select HAVE_DMA_ATTRS if PPC64 | 123 | select HAVE_DMA_ATTRS |
124 | select USE_GENERIC_SMP_HELPERS if SMP | 124 | select USE_GENERIC_SMP_HELPERS if SMP |
125 | select HAVE_OPROFILE | 125 | select HAVE_OPROFILE |
126 | select HAVE_SYSCALL_WRAPPERS if PPC64 | 126 | select HAVE_SYSCALL_WRAPPERS if PPC64 |
@@ -307,10 +307,6 @@ config SWIOTLB | |||
307 | platforms where the size of a physical address is larger | 307 | platforms where the size of a physical address is larger |
308 | than the bus address. Not all platforms support this. | 308 | than the bus address. Not all platforms support this. |
309 | 309 | ||
310 | config PPC_NEED_DMA_SYNC_OPS | ||
311 | def_bool y | ||
312 | depends on (NOT_COHERENT_CACHE || SWIOTLB) | ||
313 | |||
314 | config HOTPLUG_CPU | 310 | config HOTPLUG_CPU |
315 | bool "Support for enabling/disabling CPUs" | 311 | bool "Support for enabling/disabling CPUs" |
316 | depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) | 312 | depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 8ca2b5183c56..91217e4a0bfc 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/scatterlist.h> | 15 | #include <linux/scatterlist.h> |
16 | #include <linux/dma-attrs.h> | 16 | #include <linux/dma-attrs.h> |
17 | #include <linux/dma-debug.h> | ||
17 | #include <asm/io.h> | 18 | #include <asm/io.h> |
18 | #include <asm/swiotlb.h> | 19 | #include <asm/swiotlb.h> |
19 | 20 | ||
@@ -89,6 +90,11 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | |||
89 | dev->archdata.dma_ops = ops; | 90 | dev->archdata.dma_ops = ops; |
90 | } | 91 | } |
91 | 92 | ||
93 | /* this will be removed soon */ | ||
94 | #define flush_write_buffers() | ||
95 | |||
96 | #include <asm-generic/dma-mapping-common.h> | ||
97 | |||
92 | static inline int dma_supported(struct device *dev, u64 mask) | 98 | static inline int dma_supported(struct device *dev, u64 mask) |
93 | { | 99 | { |
94 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | 100 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
@@ -117,87 +123,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |||
117 | return 0; | 123 | return 0; |
118 | } | 124 | } |
119 | 125 | ||
120 | /* | ||
121 | * map_/unmap_single actually call through to map/unmap_page now that all the | ||
122 | * dma_map_ops have been converted over. We just have to get the page and | ||
123 | * offset to pass through to map_page | ||
124 | */ | ||
125 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | ||
126 | void *cpu_addr, | ||
127 | size_t size, | ||
128 | enum dma_data_direction direction, | ||
129 | struct dma_attrs *attrs) | ||
130 | { | ||
131 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
132 | |||
133 | BUG_ON(!dma_ops); | ||
134 | |||
135 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), | ||
136 | (unsigned long)cpu_addr % PAGE_SIZE, size, | ||
137 | direction, attrs); | ||
138 | } | ||
139 | |||
140 | static inline void dma_unmap_single_attrs(struct device *dev, | ||
141 | dma_addr_t dma_addr, | ||
142 | size_t size, | ||
143 | enum dma_data_direction direction, | ||
144 | struct dma_attrs *attrs) | ||
145 | { | ||
146 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
147 | |||
148 | BUG_ON(!dma_ops); | ||
149 | |||
150 | dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); | ||
151 | } | ||
152 | |||
153 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | ||
154 | struct page *page, | ||
155 | unsigned long offset, size_t size, | ||
156 | enum dma_data_direction direction, | ||
157 | struct dma_attrs *attrs) | ||
158 | { | ||
159 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
160 | |||
161 | BUG_ON(!dma_ops); | ||
162 | |||
163 | return dma_ops->map_page(dev, page, offset, size, direction, attrs); | ||
164 | } | ||
165 | |||
166 | static inline void dma_unmap_page_attrs(struct device *dev, | ||
167 | dma_addr_t dma_address, | ||
168 | size_t size, | ||
169 | enum dma_data_direction direction, | ||
170 | struct dma_attrs *attrs) | ||
171 | { | ||
172 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
173 | |||
174 | BUG_ON(!dma_ops); | ||
175 | |||
176 | dma_ops->unmap_page(dev, dma_address, size, direction, attrs); | ||
177 | } | ||
178 | |||
179 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
180 | int nents, enum dma_data_direction direction, | ||
181 | struct dma_attrs *attrs) | ||
182 | { | ||
183 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
184 | |||
185 | BUG_ON(!dma_ops); | ||
186 | return dma_ops->map_sg(dev, sg, nents, direction, attrs); | ||
187 | } | ||
188 | |||
189 | static inline void dma_unmap_sg_attrs(struct device *dev, | ||
190 | struct scatterlist *sg, | ||
191 | int nhwentries, | ||
192 | enum dma_data_direction direction, | ||
193 | struct dma_attrs *attrs) | ||
194 | { | ||
195 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
196 | |||
197 | BUG_ON(!dma_ops); | ||
198 | dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); | ||
199 | } | ||
200 | |||
201 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 126 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
202 | dma_addr_t *dma_handle, gfp_t flag) | 127 | dma_addr_t *dma_handle, gfp_t flag) |
203 | { | 128 | { |
@@ -216,161 +141,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
216 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | 141 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); |
217 | } | 142 | } |
218 | 143 | ||
219 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
220 | size_t size, | ||
221 | enum dma_data_direction direction) | ||
222 | { | ||
223 | return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL); | ||
224 | } | ||
225 | |||
226 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
227 | size_t size, | ||
228 | enum dma_data_direction direction) | ||
229 | { | ||
230 | dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL); | ||
231 | } | ||
232 | |||
233 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
234 | unsigned long offset, size_t size, | ||
235 | enum dma_data_direction direction) | ||
236 | { | ||
237 | return dma_map_page_attrs(dev, page, offset, size, direction, NULL); | ||
238 | } | ||
239 | |||
240 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
241 | size_t size, | ||
242 | enum dma_data_direction direction) | ||
243 | { | ||
244 | dma_unmap_page_attrs(dev, dma_address, size, direction, NULL); | ||
245 | } | ||
246 | |||
247 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
248 | int nents, enum dma_data_direction direction) | ||
249 | { | ||
250 | return dma_map_sg_attrs(dev, sg, nents, direction, NULL); | ||
251 | } | ||
252 | |||
253 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
254 | int nhwentries, | ||
255 | enum dma_data_direction direction) | ||
256 | { | ||
257 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); | ||
258 | } | ||
259 | |||
260 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS | ||
261 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
262 | dma_addr_t dma_handle, size_t size, | ||
263 | enum dma_data_direction direction) | ||
264 | { | ||
265 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
266 | |||
267 | BUG_ON(!dma_ops); | ||
268 | |||
269 | if (dma_ops->sync_single_range_for_cpu) | ||
270 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | ||
271 | size, direction); | ||
272 | } | ||
273 | |||
274 | static inline void dma_sync_single_for_device(struct device *dev, | ||
275 | dma_addr_t dma_handle, size_t size, | ||
276 | enum dma_data_direction direction) | ||
277 | { | ||
278 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
279 | |||
280 | BUG_ON(!dma_ops); | ||
281 | |||
282 | if (dma_ops->sync_single_range_for_device) | ||
283 | dma_ops->sync_single_range_for_device(dev, dma_handle, | ||
284 | 0, size, direction); | ||
285 | } | ||
286 | |||
287 | static inline void dma_sync_sg_for_cpu(struct device *dev, | ||
288 | struct scatterlist *sgl, int nents, | ||
289 | enum dma_data_direction direction) | ||
290 | { | ||
291 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
292 | |||
293 | BUG_ON(!dma_ops); | ||
294 | |||
295 | if (dma_ops->sync_sg_for_cpu) | ||
296 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | ||
297 | } | ||
298 | |||
299 | static inline void dma_sync_sg_for_device(struct device *dev, | ||
300 | struct scatterlist *sgl, int nents, | ||
301 | enum dma_data_direction direction) | ||
302 | { | ||
303 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
304 | |||
305 | BUG_ON(!dma_ops); | ||
306 | |||
307 | if (dma_ops->sync_sg_for_device) | ||
308 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | ||
309 | } | ||
310 | |||
311 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
312 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
313 | enum dma_data_direction direction) | ||
314 | { | ||
315 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
316 | |||
317 | BUG_ON(!dma_ops); | ||
318 | |||
319 | if (dma_ops->sync_single_range_for_cpu) | ||
320 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | ||
321 | offset, size, direction); | ||
322 | } | ||
323 | |||
324 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
325 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
326 | enum dma_data_direction direction) | ||
327 | { | ||
328 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
329 | |||
330 | BUG_ON(!dma_ops); | ||
331 | |||
332 | if (dma_ops->sync_single_range_for_device) | ||
333 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | ||
334 | size, direction); | ||
335 | } | ||
336 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ | ||
337 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
338 | dma_addr_t dma_handle, size_t size, | ||
339 | enum dma_data_direction direction) | ||
340 | { | ||
341 | } | ||
342 | |||
343 | static inline void dma_sync_single_for_device(struct device *dev, | ||
344 | dma_addr_t dma_handle, size_t size, | ||
345 | enum dma_data_direction direction) | ||
346 | { | ||
347 | } | ||
348 | |||
349 | static inline void dma_sync_sg_for_cpu(struct device *dev, | ||
350 | struct scatterlist *sgl, int nents, | ||
351 | enum dma_data_direction direction) | ||
352 | { | ||
353 | } | ||
354 | |||
355 | static inline void dma_sync_sg_for_device(struct device *dev, | ||
356 | struct scatterlist *sgl, int nents, | ||
357 | enum dma_data_direction direction) | ||
358 | { | ||
359 | } | ||
360 | |||
361 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
362 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
363 | enum dma_data_direction direction) | ||
364 | { | ||
365 | } | ||
366 | |||
367 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
368 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
369 | enum dma_data_direction direction) | ||
370 | { | ||
371 | } | ||
372 | #endif | ||
373 | |||
374 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 144 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
375 | { | 145 | { |
376 | #ifdef CONFIG_PPC64 | 146 | #ifdef CONFIG_PPC64 |