aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/dma-mapping.h')
-rw-r--r--include/asm-x86/dma-mapping.h87
1 files changed, 71 insertions, 16 deletions
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index 5d200e78bd81..219c33d6361c 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -9,12 +9,12 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/swiotlb.h> 11#include <asm/swiotlb.h>
12#include <asm-generic/dma-coherent.h>
12 13
13extern dma_addr_t bad_dma_address; 14extern dma_addr_t bad_dma_address;
14extern int iommu_merge; 15extern int iommu_merge;
15extern struct device fallback_dev; 16extern struct device x86_dma_fallback_dev;
16extern int panic_on_overflow; 17extern int panic_on_overflow;
17extern int force_iommu;
18 18
19struct dma_mapping_ops { 19struct dma_mapping_ops {
20 int (*mapping_error)(struct device *dev, 20 int (*mapping_error)(struct device *dev,
@@ -25,9 +25,6 @@ struct dma_mapping_ops {
25 void *vaddr, dma_addr_t dma_handle); 25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, 26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction); 27 size_t size, int direction);
28 /* like map_single, but doesn't check the device mask */
29 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
30 size_t size, int direction);
31 void (*unmap_single)(struct device *dev, dma_addr_t addr, 28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
32 size_t size, int direction); 29 size_t size, int direction);
33 void (*sync_single_for_cpu)(struct device *hwdev, 30 void (*sync_single_for_cpu)(struct device *hwdev,
@@ -68,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68 return dma_ops; 65 return dma_ops;
69 else 66 else
70 return dev->archdata.dma_ops; 67 return dev->archdata.dma_ops;
71#endif 68#endif /* ASM_X86__DMA_MAPPING_H */
72} 69}
73 70
74/* Make sure we keep the same behaviour */ 71/* Make sure we keep the same behaviour */
@@ -87,17 +84,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
87 84
88#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
89#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 86#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
90 87#define dma_is_consistent(d, h) (1)
91void *dma_alloc_coherent(struct device *dev, size_t size,
92 dma_addr_t *dma_handle, gfp_t flag);
93
94void dma_free_coherent(struct device *dev, size_t size,
95 void *vaddr, dma_addr_t dma_handle);
96
97 88
98extern int dma_supported(struct device *hwdev, u64 mask); 89extern int dma_supported(struct device *hwdev, u64 mask);
99extern int dma_set_mask(struct device *dev, u64 mask); 90extern int dma_set_mask(struct device *dev, u64 mask);
100 91
92extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_addr, gfp_t flag);
94
101static inline dma_addr_t 95static inline dma_addr_t
102dma_map_single(struct device *hwdev, void *ptr, size_t size, 96dma_map_single(struct device *hwdev, void *ptr, size_t size,
103 int direction) 97 int direction)
@@ -247,7 +241,68 @@ static inline int dma_get_cache_alignment(void)
247 return boot_cpu_data.x86_clflush_size; 241 return boot_cpu_data.x86_clflush_size;
248} 242}
249 243
250#define dma_is_consistent(d, h) (1) 244static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
245 gfp_t gfp)
246{
247 unsigned long dma_mask = 0;
251 248
252#include <asm-generic/dma-coherent.h> 249 dma_mask = dev->coherent_dma_mask;
253#endif /* ASM_X86__DMA_MAPPING_H */ 250 if (!dma_mask)
251 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
252
253 return dma_mask;
254}
255
256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
257{
258#ifdef CONFIG_X86_64
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32;
263#endif
264 return gfp;
265}
266
267static inline void *
268dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
269 gfp_t gfp)
270{
271 struct dma_mapping_ops *ops = get_dma_ops(dev);
272 void *memory;
273
274 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
275
276 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
277 return memory;
278
279 if (!dev) {
280 dev = &x86_dma_fallback_dev;
281 gfp |= GFP_DMA;
282 }
283
284 if (!is_device_dma_capable(dev))
285 return NULL;
286
287 if (!ops->alloc_coherent)
288 return NULL;
289
290 return ops->alloc_coherent(dev, size, dma_handle,
291 dma_alloc_coherent_gfp_flags(dev, gfp));
292}
293
294static inline void dma_free_coherent(struct device *dev, size_t size,
295 void *vaddr, dma_addr_t bus)
296{
297 struct dma_mapping_ops *ops = get_dma_ops(dev);
298
299 WARN_ON(irqs_disabled()); /* for portability */
300
301 if (dma_release_from_coherent(dev, get_order(size), vaddr))
302 return;
303
304 if (ops->free_coherent)
305 ops->free_coherent(dev, size, vaddr, bus);
306}
307
308#endif