aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dma-mapping.h')
-rw-r--r--include/linux/dma-mapping.h42
1 files changed, 21 insertions, 21 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 10c5a17b1f51..f1da68b82c63 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -127,7 +127,7 @@ struct dma_map_ops {
127 int is_phys; 127 int is_phys;
128}; 128};
129 129
130extern struct dma_map_ops dma_noop_ops; 130extern const struct dma_map_ops dma_noop_ops;
131 131
132#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 132#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
133 133
@@ -170,8 +170,8 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
170 * dma dependent code. Code that depends on the dma-mapping 170 * dma dependent code. Code that depends on the dma-mapping
171 * API needs to set 'depends on HAS_DMA' in its Kconfig 171 * API needs to set 'depends on HAS_DMA' in its Kconfig
172 */ 172 */
173extern struct dma_map_ops bad_dma_ops; 173extern const struct dma_map_ops bad_dma_ops;
174static inline struct dma_map_ops *get_dma_ops(struct device *dev) 174static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
175{ 175{
176 return &bad_dma_ops; 176 return &bad_dma_ops;
177} 177}
@@ -182,7 +182,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
182 enum dma_data_direction dir, 182 enum dma_data_direction dir,
183 unsigned long attrs) 183 unsigned long attrs)
184{ 184{
185 struct dma_map_ops *ops = get_dma_ops(dev); 185 const struct dma_map_ops *ops = get_dma_ops(dev);
186 dma_addr_t addr; 186 dma_addr_t addr;
187 187
188 kmemcheck_mark_initialized(ptr, size); 188 kmemcheck_mark_initialized(ptr, size);
@@ -201,7 +201,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
201 enum dma_data_direction dir, 201 enum dma_data_direction dir,
202 unsigned long attrs) 202 unsigned long attrs)
203{ 203{
204 struct dma_map_ops *ops = get_dma_ops(dev); 204 const struct dma_map_ops *ops = get_dma_ops(dev);
205 205
206 BUG_ON(!valid_dma_direction(dir)); 206 BUG_ON(!valid_dma_direction(dir));
207 if (ops->unmap_page) 207 if (ops->unmap_page)
@@ -217,7 +217,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
217 int nents, enum dma_data_direction dir, 217 int nents, enum dma_data_direction dir,
218 unsigned long attrs) 218 unsigned long attrs)
219{ 219{
220 struct dma_map_ops *ops = get_dma_ops(dev); 220 const struct dma_map_ops *ops = get_dma_ops(dev);
221 int i, ents; 221 int i, ents;
222 struct scatterlist *s; 222 struct scatterlist *s;
223 223
@@ -235,7 +235,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
235 int nents, enum dma_data_direction dir, 235 int nents, enum dma_data_direction dir,
236 unsigned long attrs) 236 unsigned long attrs)
237{ 237{
238 struct dma_map_ops *ops = get_dma_ops(dev); 238 const struct dma_map_ops *ops = get_dma_ops(dev);
239 239
240 BUG_ON(!valid_dma_direction(dir)); 240 BUG_ON(!valid_dma_direction(dir));
241 debug_dma_unmap_sg(dev, sg, nents, dir); 241 debug_dma_unmap_sg(dev, sg, nents, dir);
@@ -249,7 +249,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
249 enum dma_data_direction dir, 249 enum dma_data_direction dir,
250 unsigned long attrs) 250 unsigned long attrs)
251{ 251{
252 struct dma_map_ops *ops = get_dma_ops(dev); 252 const struct dma_map_ops *ops = get_dma_ops(dev);
253 dma_addr_t addr; 253 dma_addr_t addr;
254 254
255 kmemcheck_mark_initialized(page_address(page) + offset, size); 255 kmemcheck_mark_initialized(page_address(page) + offset, size);
@@ -265,7 +265,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
265 enum dma_data_direction dir, 265 enum dma_data_direction dir,
266 unsigned long attrs) 266 unsigned long attrs)
267{ 267{
268 struct dma_map_ops *ops = get_dma_ops(dev); 268 const struct dma_map_ops *ops = get_dma_ops(dev);
269 269
270 BUG_ON(!valid_dma_direction(dir)); 270 BUG_ON(!valid_dma_direction(dir));
271 if (ops->unmap_page) 271 if (ops->unmap_page)
@@ -279,7 +279,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
279 enum dma_data_direction dir, 279 enum dma_data_direction dir,
280 unsigned long attrs) 280 unsigned long attrs)
281{ 281{
282 struct dma_map_ops *ops = get_dma_ops(dev); 282 const struct dma_map_ops *ops = get_dma_ops(dev);
283 dma_addr_t addr; 283 dma_addr_t addr;
284 284
285 BUG_ON(!valid_dma_direction(dir)); 285 BUG_ON(!valid_dma_direction(dir));
@@ -300,7 +300,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
300 size_t size, enum dma_data_direction dir, 300 size_t size, enum dma_data_direction dir,
301 unsigned long attrs) 301 unsigned long attrs)
302{ 302{
303 struct dma_map_ops *ops = get_dma_ops(dev); 303 const struct dma_map_ops *ops = get_dma_ops(dev);
304 304
305 BUG_ON(!valid_dma_direction(dir)); 305 BUG_ON(!valid_dma_direction(dir));
306 if (ops->unmap_resource) 306 if (ops->unmap_resource)
@@ -312,7 +312,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
312 size_t size, 312 size_t size,
313 enum dma_data_direction dir) 313 enum dma_data_direction dir)
314{ 314{
315 struct dma_map_ops *ops = get_dma_ops(dev); 315 const struct dma_map_ops *ops = get_dma_ops(dev);
316 316
317 BUG_ON(!valid_dma_direction(dir)); 317 BUG_ON(!valid_dma_direction(dir));
318 if (ops->sync_single_for_cpu) 318 if (ops->sync_single_for_cpu)
@@ -324,7 +324,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
324 dma_addr_t addr, size_t size, 324 dma_addr_t addr, size_t size,
325 enum dma_data_direction dir) 325 enum dma_data_direction dir)
326{ 326{
327 struct dma_map_ops *ops = get_dma_ops(dev); 327 const struct dma_map_ops *ops = get_dma_ops(dev);
328 328
329 BUG_ON(!valid_dma_direction(dir)); 329 BUG_ON(!valid_dma_direction(dir));
330 if (ops->sync_single_for_device) 330 if (ops->sync_single_for_device)
@@ -364,7 +364,7 @@ static inline void
364dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 364dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
365 int nelems, enum dma_data_direction dir) 365 int nelems, enum dma_data_direction dir)
366{ 366{
367 struct dma_map_ops *ops = get_dma_ops(dev); 367 const struct dma_map_ops *ops = get_dma_ops(dev);
368 368
369 BUG_ON(!valid_dma_direction(dir)); 369 BUG_ON(!valid_dma_direction(dir));
370 if (ops->sync_sg_for_cpu) 370 if (ops->sync_sg_for_cpu)
@@ -376,7 +376,7 @@ static inline void
376dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 376dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
377 int nelems, enum dma_data_direction dir) 377 int nelems, enum dma_data_direction dir)
378{ 378{
379 struct dma_map_ops *ops = get_dma_ops(dev); 379 const struct dma_map_ops *ops = get_dma_ops(dev);
380 380
381 BUG_ON(!valid_dma_direction(dir)); 381 BUG_ON(!valid_dma_direction(dir));
382 if (ops->sync_sg_for_device) 382 if (ops->sync_sg_for_device)
@@ -421,7 +421,7 @@ static inline int
421dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 421dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
422 dma_addr_t dma_addr, size_t size, unsigned long attrs) 422 dma_addr_t dma_addr, size_t size, unsigned long attrs)
423{ 423{
424 struct dma_map_ops *ops = get_dma_ops(dev); 424 const struct dma_map_ops *ops = get_dma_ops(dev);
425 BUG_ON(!ops); 425 BUG_ON(!ops);
426 if (ops->mmap) 426 if (ops->mmap)
427 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 427 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@@ -439,7 +439,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
439 dma_addr_t dma_addr, size_t size, 439 dma_addr_t dma_addr, size_t size,
440 unsigned long attrs) 440 unsigned long attrs)
441{ 441{
442 struct dma_map_ops *ops = get_dma_ops(dev); 442 const struct dma_map_ops *ops = get_dma_ops(dev);
443 BUG_ON(!ops); 443 BUG_ON(!ops);
444 if (ops->get_sgtable) 444 if (ops->get_sgtable)
445 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 445 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
@@ -457,7 +457,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
457 dma_addr_t *dma_handle, gfp_t flag, 457 dma_addr_t *dma_handle, gfp_t flag,
458 unsigned long attrs) 458 unsigned long attrs)
459{ 459{
460 struct dma_map_ops *ops = get_dma_ops(dev); 460 const struct dma_map_ops *ops = get_dma_ops(dev);
461 void *cpu_addr; 461 void *cpu_addr;
462 462
463 BUG_ON(!ops); 463 BUG_ON(!ops);
@@ -479,7 +479,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
479 void *cpu_addr, dma_addr_t dma_handle, 479 void *cpu_addr, dma_addr_t dma_handle,
480 unsigned long attrs) 480 unsigned long attrs)
481{ 481{
482 struct dma_map_ops *ops = get_dma_ops(dev); 482 const struct dma_map_ops *ops = get_dma_ops(dev);
483 483
484 BUG_ON(!ops); 484 BUG_ON(!ops);
485 WARN_ON(irqs_disabled()); 485 WARN_ON(irqs_disabled());
@@ -537,7 +537,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
537#ifndef HAVE_ARCH_DMA_SUPPORTED 537#ifndef HAVE_ARCH_DMA_SUPPORTED
538static inline int dma_supported(struct device *dev, u64 mask) 538static inline int dma_supported(struct device *dev, u64 mask)
539{ 539{
540 struct dma_map_ops *ops = get_dma_ops(dev); 540 const struct dma_map_ops *ops = get_dma_ops(dev);
541 541
542 if (!ops) 542 if (!ops)
543 return 0; 543 return 0;
@@ -550,7 +550,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
550#ifndef HAVE_ARCH_DMA_SET_MASK 550#ifndef HAVE_ARCH_DMA_SET_MASK
551static inline int dma_set_mask(struct device *dev, u64 mask) 551static inline int dma_set_mask(struct device *dev, u64 mask)
552{ 552{
553 struct dma_map_ops *ops = get_dma_ops(dev); 553 const struct dma_map_ops *ops = get_dma_ops(dev);
554 554
555 if (ops->set_dma_mask) 555 if (ops->set_dma_mask)
556 return ops->set_dma_mask(dev, mask); 556 return ops->set_dma_mask(dev, mask);