diff options
Diffstat (limited to 'arch/powerpc/include/asm/dma-mapping.h')
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 318 |
1 files changed, 30 insertions, 288 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 0c34371ec49c..cb2ca41dd526 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/scatterlist.h> | 15 | #include <linux/scatterlist.h> |
16 | #include <linux/dma-attrs.h> | 16 | #include <linux/dma-attrs.h> |
17 | #include <linux/dma-debug.h> | ||
17 | #include <asm/io.h> | 18 | #include <asm/io.h> |
18 | #include <asm/swiotlb.h> | 19 | #include <asm/swiotlb.h> |
19 | 20 | ||
@@ -64,58 +65,14 @@ static inline unsigned long device_to_mask(struct device *dev) | |||
64 | } | 65 | } |
65 | 66 | ||
66 | /* | 67 | /* |
67 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | ||
68 | */ | ||
69 | struct dma_mapping_ops { | ||
70 | void * (*alloc_coherent)(struct device *dev, size_t size, | ||
71 | dma_addr_t *dma_handle, gfp_t flag); | ||
72 | void (*free_coherent)(struct device *dev, size_t size, | ||
73 | void *vaddr, dma_addr_t dma_handle); | ||
74 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | ||
75 | int nents, enum dma_data_direction direction, | ||
76 | struct dma_attrs *attrs); | ||
77 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
78 | int nents, enum dma_data_direction direction, | ||
79 | struct dma_attrs *attrs); | ||
80 | int (*dma_supported)(struct device *dev, u64 mask); | ||
81 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); | ||
82 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
83 | unsigned long offset, size_t size, | ||
84 | enum dma_data_direction direction, | ||
85 | struct dma_attrs *attrs); | ||
86 | void (*unmap_page)(struct device *dev, | ||
87 | dma_addr_t dma_address, size_t size, | ||
88 | enum dma_data_direction direction, | ||
89 | struct dma_attrs *attrs); | ||
90 | int (*addr_needs_map)(struct device *dev, dma_addr_t addr, | ||
91 | size_t size); | ||
92 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS | ||
93 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
94 | dma_addr_t dma_handle, unsigned long offset, | ||
95 | size_t size, | ||
96 | enum dma_data_direction direction); | ||
97 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
98 | dma_addr_t dma_handle, unsigned long offset, | ||
99 | size_t size, | ||
100 | enum dma_data_direction direction); | ||
101 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
102 | struct scatterlist *sg, int nelems, | ||
103 | enum dma_data_direction direction); | ||
104 | void (*sync_sg_for_device)(struct device *hwdev, | ||
105 | struct scatterlist *sg, int nelems, | ||
106 | enum dma_data_direction direction); | ||
107 | #endif | ||
108 | }; | ||
109 | |||
110 | /* | ||
111 | * Available generic sets of operations | 68 | * Available generic sets of operations |
112 | */ | 69 | */ |
113 | #ifdef CONFIG_PPC64 | 70 | #ifdef CONFIG_PPC64 |
114 | extern struct dma_mapping_ops dma_iommu_ops; | 71 | extern struct dma_map_ops dma_iommu_ops; |
115 | #endif | 72 | #endif |
116 | extern struct dma_mapping_ops dma_direct_ops; | 73 | extern struct dma_map_ops dma_direct_ops; |
117 | 74 | ||
118 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | 75 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
119 | { | 76 | { |
120 | /* We don't handle the NULL dev case for ISA for now. We could | 77 | /* We don't handle the NULL dev case for ISA for now. We could |
121 | * do it via an out of line call but it is not needed for now. The | 78 | * do it via an out of line call but it is not needed for now. The |
@@ -128,14 +85,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
128 | return dev->archdata.dma_ops; | 85 | return dev->archdata.dma_ops; |
129 | } | 86 | } |
130 | 87 | ||
131 | static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops) | 88 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) |
132 | { | 89 | { |
133 | dev->archdata.dma_ops = ops; | 90 | dev->archdata.dma_ops = ops; |
134 | } | 91 | } |
135 | 92 | ||
93 | /* this will be removed soon */ | ||
94 | #define flush_write_buffers() | ||
95 | |||
96 | #include <asm-generic/dma-mapping-common.h> | ||
97 | |||
136 | static inline int dma_supported(struct device *dev, u64 mask) | 98 | static inline int dma_supported(struct device *dev, u64 mask) |
137 | { | 99 | { |
138 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 100 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
139 | 101 | ||
140 | if (unlikely(dma_ops == NULL)) | 102 | if (unlikely(dma_ops == NULL)) |
141 | return 0; | 103 | return 0; |
@@ -149,7 +111,7 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
149 | 111 | ||
150 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | 112 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
151 | { | 113 | { |
152 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 114 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
153 | 115 | ||
154 | if (unlikely(dma_ops == NULL)) | 116 | if (unlikely(dma_ops == NULL)) |
155 | return -EIO; | 117 | return -EIO; |
@@ -161,262 +123,40 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |||
161 | return 0; | 123 | return 0; |
162 | } | 124 | } |
163 | 125 | ||
164 | /* | ||
165 | * map_/unmap_single actually call through to map/unmap_page now that all the | ||
166 | * dma_mapping_ops have been converted over. We just have to get the page and | ||
167 | * offset to pass through to map_page | ||
168 | */ | ||
169 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | ||
170 | void *cpu_addr, | ||
171 | size_t size, | ||
172 | enum dma_data_direction direction, | ||
173 | struct dma_attrs *attrs) | ||
174 | { | ||
175 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
176 | |||
177 | BUG_ON(!dma_ops); | ||
178 | |||
179 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), | ||
180 | (unsigned long)cpu_addr % PAGE_SIZE, size, | ||
181 | direction, attrs); | ||
182 | } | ||
183 | |||
184 | static inline void dma_unmap_single_attrs(struct device *dev, | ||
185 | dma_addr_t dma_addr, | ||
186 | size_t size, | ||
187 | enum dma_data_direction direction, | ||
188 | struct dma_attrs *attrs) | ||
189 | { | ||
190 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
191 | |||
192 | BUG_ON(!dma_ops); | ||
193 | |||
194 | dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); | ||
195 | } | ||
196 | |||
197 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | ||
198 | struct page *page, | ||
199 | unsigned long offset, size_t size, | ||
200 | enum dma_data_direction direction, | ||
201 | struct dma_attrs *attrs) | ||
202 | { | ||
203 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
204 | |||
205 | BUG_ON(!dma_ops); | ||
206 | |||
207 | return dma_ops->map_page(dev, page, offset, size, direction, attrs); | ||
208 | } | ||
209 | |||
210 | static inline void dma_unmap_page_attrs(struct device *dev, | ||
211 | dma_addr_t dma_address, | ||
212 | size_t size, | ||
213 | enum dma_data_direction direction, | ||
214 | struct dma_attrs *attrs) | ||
215 | { | ||
216 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
217 | |||
218 | BUG_ON(!dma_ops); | ||
219 | |||
220 | dma_ops->unmap_page(dev, dma_address, size, direction, attrs); | ||
221 | } | ||
222 | |||
223 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | ||
224 | int nents, enum dma_data_direction direction, | ||
225 | struct dma_attrs *attrs) | ||
226 | { | ||
227 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
228 | |||
229 | BUG_ON(!dma_ops); | ||
230 | return dma_ops->map_sg(dev, sg, nents, direction, attrs); | ||
231 | } | ||
232 | |||
233 | static inline void dma_unmap_sg_attrs(struct device *dev, | ||
234 | struct scatterlist *sg, | ||
235 | int nhwentries, | ||
236 | enum dma_data_direction direction, | ||
237 | struct dma_attrs *attrs) | ||
238 | { | ||
239 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
240 | |||
241 | BUG_ON(!dma_ops); | ||
242 | dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); | ||
243 | } | ||
244 | |||
245 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 126 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
246 | dma_addr_t *dma_handle, gfp_t flag) | 127 | dma_addr_t *dma_handle, gfp_t flag) |
247 | { | 128 | { |
248 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 129 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
249 | 130 | void *cpu_addr; | |
250 | BUG_ON(!dma_ops); | ||
251 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
252 | } | ||
253 | |||
254 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
255 | void *cpu_addr, dma_addr_t dma_handle) | ||
256 | { | ||
257 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
258 | |||
259 | BUG_ON(!dma_ops); | ||
260 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||
261 | } | ||
262 | |||
263 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
264 | size_t size, | ||
265 | enum dma_data_direction direction) | ||
266 | { | ||
267 | return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL); | ||
268 | } | ||
269 | |||
270 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
271 | size_t size, | ||
272 | enum dma_data_direction direction) | ||
273 | { | ||
274 | dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL); | ||
275 | } | ||
276 | |||
277 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
278 | unsigned long offset, size_t size, | ||
279 | enum dma_data_direction direction) | ||
280 | { | ||
281 | return dma_map_page_attrs(dev, page, offset, size, direction, NULL); | ||
282 | } | ||
283 | |||
284 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
285 | size_t size, | ||
286 | enum dma_data_direction direction) | ||
287 | { | ||
288 | dma_unmap_page_attrs(dev, dma_address, size, direction, NULL); | ||
289 | } | ||
290 | |||
291 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
292 | int nents, enum dma_data_direction direction) | ||
293 | { | ||
294 | return dma_map_sg_attrs(dev, sg, nents, direction, NULL); | ||
295 | } | ||
296 | |||
297 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
298 | int nhwentries, | ||
299 | enum dma_data_direction direction) | ||
300 | { | ||
301 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); | ||
302 | } | ||
303 | |||
304 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS | ||
305 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
306 | dma_addr_t dma_handle, size_t size, | ||
307 | enum dma_data_direction direction) | ||
308 | { | ||
309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
310 | |||
311 | BUG_ON(!dma_ops); | ||
312 | |||
313 | if (dma_ops->sync_single_range_for_cpu) | ||
314 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | ||
315 | size, direction); | ||
316 | } | ||
317 | |||
318 | static inline void dma_sync_single_for_device(struct device *dev, | ||
319 | dma_addr_t dma_handle, size_t size, | ||
320 | enum dma_data_direction direction) | ||
321 | { | ||
322 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
323 | |||
324 | BUG_ON(!dma_ops); | ||
325 | |||
326 | if (dma_ops->sync_single_range_for_device) | ||
327 | dma_ops->sync_single_range_for_device(dev, dma_handle, | ||
328 | 0, size, direction); | ||
329 | } | ||
330 | |||
331 | static inline void dma_sync_sg_for_cpu(struct device *dev, | ||
332 | struct scatterlist *sgl, int nents, | ||
333 | enum dma_data_direction direction) | ||
334 | { | ||
335 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
336 | 131 | ||
337 | BUG_ON(!dma_ops); | 132 | BUG_ON(!dma_ops); |
338 | 133 | ||
339 | if (dma_ops->sync_sg_for_cpu) | 134 | cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag); |
340 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | ||
341 | } | ||
342 | |||
343 | static inline void dma_sync_sg_for_device(struct device *dev, | ||
344 | struct scatterlist *sgl, int nents, | ||
345 | enum dma_data_direction direction) | ||
346 | { | ||
347 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
348 | |||
349 | BUG_ON(!dma_ops); | ||
350 | |||
351 | if (dma_ops->sync_sg_for_device) | ||
352 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | ||
353 | } | ||
354 | |||
355 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
356 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
357 | enum dma_data_direction direction) | ||
358 | { | ||
359 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
360 | 135 | ||
361 | BUG_ON(!dma_ops); | 136 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
362 | 137 | ||
363 | if (dma_ops->sync_single_range_for_cpu) | 138 | return cpu_addr; |
364 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | ||
365 | offset, size, direction); | ||
366 | } | 139 | } |
367 | 140 | ||
368 | static inline void dma_sync_single_range_for_device(struct device *dev, | 141 | static inline void dma_free_coherent(struct device *dev, size_t size, |
369 | dma_addr_t dma_handle, unsigned long offset, size_t size, | 142 | void *cpu_addr, dma_addr_t dma_handle) |
370 | enum dma_data_direction direction) | ||
371 | { | 143 | { |
372 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 144 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
373 | 145 | ||
374 | BUG_ON(!dma_ops); | 146 | BUG_ON(!dma_ops); |
375 | 147 | ||
376 | if (dma_ops->sync_single_range_for_device) | 148 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
377 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | ||
378 | size, direction); | ||
379 | } | ||
380 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ | ||
381 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
382 | dma_addr_t dma_handle, size_t size, | ||
383 | enum dma_data_direction direction) | ||
384 | { | ||
385 | } | ||
386 | |||
387 | static inline void dma_sync_single_for_device(struct device *dev, | ||
388 | dma_addr_t dma_handle, size_t size, | ||
389 | enum dma_data_direction direction) | ||
390 | { | ||
391 | } | ||
392 | |||
393 | static inline void dma_sync_sg_for_cpu(struct device *dev, | ||
394 | struct scatterlist *sgl, int nents, | ||
395 | enum dma_data_direction direction) | ||
396 | { | ||
397 | } | ||
398 | 149 | ||
399 | static inline void dma_sync_sg_for_device(struct device *dev, | 150 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); |
400 | struct scatterlist *sgl, int nents, | ||
401 | enum dma_data_direction direction) | ||
402 | { | ||
403 | } | 151 | } |
404 | 152 | ||
405 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 153 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
406 | dma_addr_t dma_handle, unsigned long offset, size_t size, | ||
407 | enum dma_data_direction direction) | ||
408 | { | 154 | { |
409 | } | 155 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
410 | 156 | ||
411 | static inline void dma_sync_single_range_for_device(struct device *dev, | 157 | if (dma_ops->mapping_error) |
412 | dma_addr_t dma_handle, unsigned long offset, size_t size, | 158 | return dma_ops->mapping_error(dev, dma_addr); |
413 | enum dma_data_direction direction) | ||
414 | { | ||
415 | } | ||
416 | #endif | ||
417 | 159 | ||
418 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
419 | { | ||
420 | #ifdef CONFIG_PPC64 | 160 | #ifdef CONFIG_PPC64 |
421 | return (dma_addr == DMA_ERROR_CODE); | 161 | return (dma_addr == DMA_ERROR_CODE); |
422 | #else | 162 | #else |
@@ -426,10 +166,12 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
426 | 166 | ||
427 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | 167 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
428 | { | 168 | { |
429 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 169 | #ifdef CONFIG_SWIOTLB |
170 | struct dev_archdata *sd = &dev->archdata; | ||
430 | 171 | ||
431 | if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size)) | 172 | if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr) |
432 | return 0; | 173 | return 0; |
174 | #endif | ||
433 | 175 | ||
434 | if (!dev->dma_mask) | 176 | if (!dev->dma_mask) |
435 | return 0; | 177 | return 0; |