aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dma-mapping.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-25 16:45:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-25 16:45:43 -0500
commitac1820fb286b552b6885d40ab34f1e59b815f1f1 (patch)
treeb9b4e6dc5df8574e6875e4e2f5f27105addc7812 /include/linux/dma-mapping.h
parentedccb59429657b09806146339e2b27594c1d1da0 (diff)
parent0bbb3b7496eabb6779962a998a8a91f4a8e589ff (diff)
Merge tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma DMA mapping updates from Doug Ledford: "Drop IB DMA mapping code and use core DMA code instead. Bart Van Assche noted that the ib DMA mapping code was significantly similar enough to the core DMA mapping code that with a few changes it was possible to remove the IB DMA mapping code entirely and switch the RDMA stack to use the core DMA mapping code. This resulted in a nice set of cleanups, but touched the entire tree and has been kept separate for that reason." * tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (37 commits) IB/rxe, IB/rdmavt: Use dma_virt_ops instead of duplicating it IB/core: Remove ib_device.dma_device nvme-rdma: Switch from dma_device to dev.parent RDS: net: Switch from dma_device to dev.parent IB/srpt: Modify a debug statement IB/srp: Switch from dma_device to dev.parent IB/iser: Switch from dma_device to dev.parent IB/IPoIB: Switch from dma_device to dev.parent IB/rxe: Switch from dma_device to dev.parent IB/vmw_pvrdma: Switch from dma_device to dev.parent IB/usnic: Switch from dma_device to dev.parent IB/qib: Switch from dma_device to dev.parent IB/qedr: Switch from dma_device to dev.parent IB/ocrdma: Switch from dma_device to dev.parent IB/nes: Remove a superfluous assignment statement IB/mthca: Switch from dma_device to dev.parent IB/mlx5: Switch from dma_device to dev.parent IB/mlx4: Switch from dma_device to dev.parent IB/i40iw: Remove a superfluous assignment statement IB/hns: Switch from dma_device to dev.parent ...
Diffstat (limited to 'include/linux/dma-mapping.h')
-rw-r--r--include/linux/dma-mapping.h55
1 files changed, 34 insertions, 21 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index c24721a33b4c..0977317c6835 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -134,7 +134,8 @@ struct dma_map_ops {
134 int is_phys; 134 int is_phys;
135}; 135};
136 136
137extern struct dma_map_ops dma_noop_ops; 137extern const struct dma_map_ops dma_noop_ops;
138extern const struct dma_map_ops dma_virt_ops;
138 139
139#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 140#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
140 141
@@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
171 172
172#ifdef CONFIG_HAS_DMA 173#ifdef CONFIG_HAS_DMA
173#include <asm/dma-mapping.h> 174#include <asm/dma-mapping.h>
175static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
176{
177 if (dev && dev->dma_ops)
178 return dev->dma_ops;
179 return get_arch_dma_ops(dev ? dev->bus : NULL);
180}
181
182static inline void set_dma_ops(struct device *dev,
183 const struct dma_map_ops *dma_ops)
184{
185 dev->dma_ops = dma_ops;
186}
174#else 187#else
175/* 188/*
176 * Define the dma api to allow compilation but not linking of 189 * Define the dma api to allow compilation but not linking of
177 * dma dependent code. Code that depends on the dma-mapping 190 * dma dependent code. Code that depends on the dma-mapping
178 * API needs to set 'depends on HAS_DMA' in its Kconfig 191 * API needs to set 'depends on HAS_DMA' in its Kconfig
179 */ 192 */
180extern struct dma_map_ops bad_dma_ops; 193extern const struct dma_map_ops bad_dma_ops;
181static inline struct dma_map_ops *get_dma_ops(struct device *dev) 194static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
182{ 195{
183 return &bad_dma_ops; 196 return &bad_dma_ops;
184} 197}
@@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
189 enum dma_data_direction dir, 202 enum dma_data_direction dir,
190 unsigned long attrs) 203 unsigned long attrs)
191{ 204{
192 struct dma_map_ops *ops = get_dma_ops(dev); 205 const struct dma_map_ops *ops = get_dma_ops(dev);
193 dma_addr_t addr; 206 dma_addr_t addr;
194 207
195 kmemcheck_mark_initialized(ptr, size); 208 kmemcheck_mark_initialized(ptr, size);
@@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
208 enum dma_data_direction dir, 221 enum dma_data_direction dir,
209 unsigned long attrs) 222 unsigned long attrs)
210{ 223{
211 struct dma_map_ops *ops = get_dma_ops(dev); 224 const struct dma_map_ops *ops = get_dma_ops(dev);
212 225
213 BUG_ON(!valid_dma_direction(dir)); 226 BUG_ON(!valid_dma_direction(dir));
214 if (ops->unmap_page) 227 if (ops->unmap_page)
@@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction dir, 237 int nents, enum dma_data_direction dir,
225 unsigned long attrs) 238 unsigned long attrs)
226{ 239{
227 struct dma_map_ops *ops = get_dma_ops(dev); 240 const struct dma_map_ops *ops = get_dma_ops(dev);
228 int i, ents; 241 int i, ents;
229 struct scatterlist *s; 242 struct scatterlist *s;
230 243
@@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
242 int nents, enum dma_data_direction dir, 255 int nents, enum dma_data_direction dir,
243 unsigned long attrs) 256 unsigned long attrs)
244{ 257{
245 struct dma_map_ops *ops = get_dma_ops(dev); 258 const struct dma_map_ops *ops = get_dma_ops(dev);
246 259
247 BUG_ON(!valid_dma_direction(dir)); 260 BUG_ON(!valid_dma_direction(dir));
248 debug_dma_unmap_sg(dev, sg, nents, dir); 261 debug_dma_unmap_sg(dev, sg, nents, dir);
@@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
256 enum dma_data_direction dir, 269 enum dma_data_direction dir,
257 unsigned long attrs) 270 unsigned long attrs)
258{ 271{
259 struct dma_map_ops *ops = get_dma_ops(dev); 272 const struct dma_map_ops *ops = get_dma_ops(dev);
260 dma_addr_t addr; 273 dma_addr_t addr;
261 274
262 kmemcheck_mark_initialized(page_address(page) + offset, size); 275 kmemcheck_mark_initialized(page_address(page) + offset, size);
@@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
272 enum dma_data_direction dir, 285 enum dma_data_direction dir,
273 unsigned long attrs) 286 unsigned long attrs)
274{ 287{
275 struct dma_map_ops *ops = get_dma_ops(dev); 288 const struct dma_map_ops *ops = get_dma_ops(dev);
276 289
277 BUG_ON(!valid_dma_direction(dir)); 290 BUG_ON(!valid_dma_direction(dir));
278 if (ops->unmap_page) 291 if (ops->unmap_page)
@@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
286 enum dma_data_direction dir, 299 enum dma_data_direction dir,
287 unsigned long attrs) 300 unsigned long attrs)
288{ 301{
289 struct dma_map_ops *ops = get_dma_ops(dev); 302 const struct dma_map_ops *ops = get_dma_ops(dev);
290 dma_addr_t addr; 303 dma_addr_t addr;
291 304
292 BUG_ON(!valid_dma_direction(dir)); 305 BUG_ON(!valid_dma_direction(dir));
@@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
307 size_t size, enum dma_data_direction dir, 320 size_t size, enum dma_data_direction dir,
308 unsigned long attrs) 321 unsigned long attrs)
309{ 322{
310 struct dma_map_ops *ops = get_dma_ops(dev); 323 const struct dma_map_ops *ops = get_dma_ops(dev);
311 324
312 BUG_ON(!valid_dma_direction(dir)); 325 BUG_ON(!valid_dma_direction(dir));
313 if (ops->unmap_resource) 326 if (ops->unmap_resource)
@@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
319 size_t size, 332 size_t size,
320 enum dma_data_direction dir) 333 enum dma_data_direction dir)
321{ 334{
322 struct dma_map_ops *ops = get_dma_ops(dev); 335 const struct dma_map_ops *ops = get_dma_ops(dev);
323 336
324 BUG_ON(!valid_dma_direction(dir)); 337 BUG_ON(!valid_dma_direction(dir));
325 if (ops->sync_single_for_cpu) 338 if (ops->sync_single_for_cpu)
@@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
331 dma_addr_t addr, size_t size, 344 dma_addr_t addr, size_t size,
332 enum dma_data_direction dir) 345 enum dma_data_direction dir)
333{ 346{
334 struct dma_map_ops *ops = get_dma_ops(dev); 347 const struct dma_map_ops *ops = get_dma_ops(dev);
335 348
336 BUG_ON(!valid_dma_direction(dir)); 349 BUG_ON(!valid_dma_direction(dir));
337 if (ops->sync_single_for_device) 350 if (ops->sync_single_for_device)
@@ -371,7 +384,7 @@ static inline void
371dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 384dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
372 int nelems, enum dma_data_direction dir) 385 int nelems, enum dma_data_direction dir)
373{ 386{
374 struct dma_map_ops *ops = get_dma_ops(dev); 387 const struct dma_map_ops *ops = get_dma_ops(dev);
375 388
376 BUG_ON(!valid_dma_direction(dir)); 389 BUG_ON(!valid_dma_direction(dir));
377 if (ops->sync_sg_for_cpu) 390 if (ops->sync_sg_for_cpu)
@@ -383,7 +396,7 @@ static inline void
383dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 396dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
384 int nelems, enum dma_data_direction dir) 397 int nelems, enum dma_data_direction dir)
385{ 398{
386 struct dma_map_ops *ops = get_dma_ops(dev); 399 const struct dma_map_ops *ops = get_dma_ops(dev);
387 400
388 BUG_ON(!valid_dma_direction(dir)); 401 BUG_ON(!valid_dma_direction(dir));
389 if (ops->sync_sg_for_device) 402 if (ops->sync_sg_for_device)
@@ -428,7 +441,7 @@ static inline int
428dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 441dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
429 dma_addr_t dma_addr, size_t size, unsigned long attrs) 442 dma_addr_t dma_addr, size_t size, unsigned long attrs)
430{ 443{
431 struct dma_map_ops *ops = get_dma_ops(dev); 444 const struct dma_map_ops *ops = get_dma_ops(dev);
432 BUG_ON(!ops); 445 BUG_ON(!ops);
433 if (ops->mmap) 446 if (ops->mmap)
434 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 447 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
446 dma_addr_t dma_addr, size_t size, 459 dma_addr_t dma_addr, size_t size,
447 unsigned long attrs) 460 unsigned long attrs)
448{ 461{
449 struct dma_map_ops *ops = get_dma_ops(dev); 462 const struct dma_map_ops *ops = get_dma_ops(dev);
450 BUG_ON(!ops); 463 BUG_ON(!ops);
451 if (ops->get_sgtable) 464 if (ops->get_sgtable)
452 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 465 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
@@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
464 dma_addr_t *dma_handle, gfp_t flag, 477 dma_addr_t *dma_handle, gfp_t flag,
465 unsigned long attrs) 478 unsigned long attrs)
466{ 479{
467 struct dma_map_ops *ops = get_dma_ops(dev); 480 const struct dma_map_ops *ops = get_dma_ops(dev);
468 void *cpu_addr; 481 void *cpu_addr;
469 482
470 BUG_ON(!ops); 483 BUG_ON(!ops);
@@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
486 void *cpu_addr, dma_addr_t dma_handle, 499 void *cpu_addr, dma_addr_t dma_handle,
487 unsigned long attrs) 500 unsigned long attrs)
488{ 501{
489 struct dma_map_ops *ops = get_dma_ops(dev); 502 const struct dma_map_ops *ops = get_dma_ops(dev);
490 503
491 BUG_ON(!ops); 504 BUG_ON(!ops);
492 WARN_ON(irqs_disabled()); 505 WARN_ON(irqs_disabled());
@@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
544#ifndef HAVE_ARCH_DMA_SUPPORTED 557#ifndef HAVE_ARCH_DMA_SUPPORTED
545static inline int dma_supported(struct device *dev, u64 mask) 558static inline int dma_supported(struct device *dev, u64 mask)
546{ 559{
547 struct dma_map_ops *ops = get_dma_ops(dev); 560 const struct dma_map_ops *ops = get_dma_ops(dev);
548 561
549 if (!ops) 562 if (!ops)
550 return 0; 563 return 0;
@@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
557#ifndef HAVE_ARCH_DMA_SET_MASK 570#ifndef HAVE_ARCH_DMA_SET_MASK
558static inline int dma_set_mask(struct device *dev, u64 mask) 571static inline int dma_set_mask(struct device *dev, u64 mask)
559{ 572{
560 struct dma_map_ops *ops = get_dma_ops(dev); 573 const struct dma_map_ops *ops = get_dma_ops(dev);
561 574
562 if (ops->set_dma_mask) 575 if (ops->set_dma_mask)
563 return ops->set_dma_mask(dev, mask); 576 return ops->set_dma_mask(dev, mask);