aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/dma-mapping.h')
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h187
1 files changed, 61 insertions, 126 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index c7ca45f97dd2..fddb229bd74f 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -44,8 +44,6 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
44 44
45#endif /* ! CONFIG_NOT_COHERENT_CACHE */ 45#endif /* ! CONFIG_NOT_COHERENT_CACHE */
46 46
47#ifdef CONFIG_PPC64
48
49static inline unsigned long device_to_mask(struct device *dev) 47static inline unsigned long device_to_mask(struct device *dev)
50{ 48{
51 if (dev->dma_mask && *dev->dma_mask) 49 if (dev->dma_mask && *dev->dma_mask)
@@ -76,8 +74,24 @@ struct dma_mapping_ops {
76 struct dma_attrs *attrs); 74 struct dma_attrs *attrs);
77 int (*dma_supported)(struct device *dev, u64 mask); 75 int (*dma_supported)(struct device *dev, u64 mask);
78 int (*set_dma_mask)(struct device *dev, u64 dma_mask); 76 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
77 dma_addr_t (*map_page)(struct device *dev, struct page *page,
78 unsigned long offset, size_t size,
79 enum dma_data_direction direction,
80 struct dma_attrs *attrs);
81 void (*unmap_page)(struct device *dev,
82 dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction,
84 struct dma_attrs *attrs);
79}; 85};
80 86
87/*
88 * Available generic sets of operations
89 */
90#ifdef CONFIG_PPC64
91extern struct dma_mapping_ops dma_iommu_ops;
92#endif
93extern struct dma_mapping_ops dma_direct_ops;
94
81static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 95static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
82{ 96{
83 /* We don't handle the NULL dev case for ISA for now. We could 97 /* We don't handle the NULL dev case for ISA for now. We could
@@ -85,8 +99,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
85 * only ISA DMA device we support is the floppy and we have a hack 99 * only ISA DMA device we support is the floppy and we have a hack
86 * in the floppy driver directly to get a device for us. 100 * in the floppy driver directly to get a device for us.
87 */ 101 */
88 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) 102
103 if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) {
104#ifdef CONFIG_PPC64
89 return NULL; 105 return NULL;
106#else
107 /* Use default on 32-bit if dma_ops is not set up */
108 /* TODO: Long term, we should fix drivers so that dev and
109 * archdata dma_ops are set up for all buses.
110 */
111 return &dma_direct_ops;
112#endif
113 }
114
90 return dev->archdata.dma_ops; 115 return dev->archdata.dma_ops;
91} 116}
92 117
@@ -123,6 +148,12 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
123 return 0; 148 return 0;
124} 149}
125 150
151/*
152 * TODO: map_/unmap_single will ideally go away, to be completely
153 * replaced by map/unmap_page. Until then, we allow dma_ops to have
154 * one or the other, or both by checking to see if the specific
155 * function requested exists; and if not, falling back on the other set.
156 */
126static inline dma_addr_t dma_map_single_attrs(struct device *dev, 157static inline dma_addr_t dma_map_single_attrs(struct device *dev,
127 void *cpu_addr, 158 void *cpu_addr,
128 size_t size, 159 size_t size,
@@ -132,7 +163,14 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
132 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 163 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
133 164
134 BUG_ON(!dma_ops); 165 BUG_ON(!dma_ops);
135 return dma_ops->map_single(dev, cpu_addr, size, direction, attrs); 166
167 if (dma_ops->map_single)
168 return dma_ops->map_single(dev, cpu_addr, size, direction,
169 attrs);
170
171 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
172 (unsigned long)cpu_addr % PAGE_SIZE, size,
173 direction, attrs);
136} 174}
137 175
138static inline void dma_unmap_single_attrs(struct device *dev, 176static inline void dma_unmap_single_attrs(struct device *dev,
@@ -144,7 +182,13 @@ static inline void dma_unmap_single_attrs(struct device *dev,
144 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 182 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
145 183
146 BUG_ON(!dma_ops); 184 BUG_ON(!dma_ops);
147 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); 185
186 if (dma_ops->unmap_single) {
187 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
188 return;
189 }
190
191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
148} 192}
149 193
150static inline dma_addr_t dma_map_page_attrs(struct device *dev, 194static inline dma_addr_t dma_map_page_attrs(struct device *dev,
@@ -156,8 +200,13 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
156 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 200 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
157 201
158 BUG_ON(!dma_ops); 202 BUG_ON(!dma_ops);
203
204 if (dma_ops->map_page)
205 return dma_ops->map_page(dev, page, offset, size, direction,
206 attrs);
207
159 return dma_ops->map_single(dev, page_address(page) + offset, size, 208 return dma_ops->map_single(dev, page_address(page) + offset, size,
160 direction, attrs); 209 direction, attrs);
161} 210}
162 211
163static inline void dma_unmap_page_attrs(struct device *dev, 212static inline void dma_unmap_page_attrs(struct device *dev,
@@ -169,6 +218,12 @@ static inline void dma_unmap_page_attrs(struct device *dev,
169 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 218 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
170 219
171 BUG_ON(!dma_ops); 220 BUG_ON(!dma_ops);
221
222 if (dma_ops->unmap_page) {
223 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
224 return;
225 }
226
172 dma_ops->unmap_single(dev, dma_address, size, direction, attrs); 227 dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
173} 228}
174 229
@@ -253,126 +308,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
253 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); 308 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
254} 309}
255 310
256/*
257 * Available generic sets of operations
258 */
259extern struct dma_mapping_ops dma_iommu_ops;
260extern struct dma_mapping_ops dma_direct_ops;
261
262#else /* CONFIG_PPC64 */
263
264#define dma_supported(dev, mask) (1)
265
266static inline int dma_set_mask(struct device *dev, u64 dma_mask)
267{
268 if (!dev->dma_mask || !dma_supported(dev, mask))
269 return -EIO;
270
271 *dev->dma_mask = dma_mask;
272
273 return 0;
274}
275
276static inline void *dma_alloc_coherent(struct device *dev, size_t size,
277 dma_addr_t * dma_handle,
278 gfp_t gfp)
279{
280#ifdef CONFIG_NOT_COHERENT_CACHE
281 return __dma_alloc_coherent(size, dma_handle, gfp);
282#else
283 void *ret;
284 /* ignore region specifiers */
285 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
286
287 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
288 gfp |= GFP_DMA;
289
290 ret = (void *)__get_free_pages(gfp, get_order(size));
291
292 if (ret != NULL) {
293 memset(ret, 0, size);
294 *dma_handle = virt_to_bus(ret);
295 }
296
297 return ret;
298#endif
299}
300
301static inline void
302dma_free_coherent(struct device *dev, size_t size, void *vaddr,
303 dma_addr_t dma_handle)
304{
305#ifdef CONFIG_NOT_COHERENT_CACHE
306 __dma_free_coherent(size, vaddr);
307#else
308 free_pages((unsigned long)vaddr, get_order(size));
309#endif
310}
311
312static inline dma_addr_t
313dma_map_single(struct device *dev, void *ptr, size_t size,
314 enum dma_data_direction direction)
315{
316 BUG_ON(direction == DMA_NONE);
317
318 __dma_sync(ptr, size, direction);
319
320 return virt_to_bus(ptr);
321}
322
323static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
324 size_t size,
325 enum dma_data_direction direction)
326{
327 /* We do nothing. */
328}
329
330static inline dma_addr_t
331dma_map_page(struct device *dev, struct page *page,
332 unsigned long offset, size_t size,
333 enum dma_data_direction direction)
334{
335 BUG_ON(direction == DMA_NONE);
336
337 __dma_sync_page(page, offset, size, direction);
338
339 return page_to_bus(page) + offset;
340}
341
342static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
343 size_t size,
344 enum dma_data_direction direction)
345{
346 /* We do nothing. */
347}
348
349static inline int
350dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
351 enum dma_data_direction direction)
352{
353 struct scatterlist *sg;
354 int i;
355
356 BUG_ON(direction == DMA_NONE);
357
358 for_each_sg(sgl, sg, nents, i) {
359 BUG_ON(!sg_page(sg));
360 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
361 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
362 }
363
364 return nents;
365}
366
367static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
368 int nhwentries,
369 enum dma_data_direction direction)
370{
371 /* We don't do anything here. */
372}
373
374#endif /* CONFIG_PPC64 */
375
376static inline void dma_sync_single_for_cpu(struct device *dev, 311static inline void dma_sync_single_for_cpu(struct device *dev,
377 dma_addr_t dma_handle, size_t size, 312 dma_addr_t dma_handle, size_t size,
378 enum dma_data_direction direction) 313 enum dma_data_direction direction)