summaryrefslogtreecommitdiffstats
path: root/include/linux/dma-mapping.h
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-06 16:39:32 -0500
committerChristoph Hellwig <hch@lst.de>2018-12-13 15:06:18 -0500
commit356da6d0cde3323236977fce54c1f9612a742036 (patch)
tree87ad8176833266fbaa038780d67aebcc490d0d64 /include/linux/dma-mapping.h
parent190d4e5916a2d70a11009022b968fca948fb5dc7 (diff)
dma-mapping: bypass indirect calls for dma-direct
Avoid expensive indirect calls in the fast path DMA mapping operations by directly calling the dma_direct_* ops if we are using the directly mapped DMA operations. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/linux/dma-mapping.h')
-rw-r--r--include/linux/dma-mapping.h111
1 files changed, 97 insertions, 14 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 269ee27fc3d9..f422aec0f53c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -134,7 +134,6 @@ struct dma_map_ops {
134 134
135#define DMA_MAPPING_ERROR (~(dma_addr_t)0) 135#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
136 136
137extern const struct dma_map_ops dma_direct_ops;
138extern const struct dma_map_ops dma_virt_ops; 137extern const struct dma_map_ops dma_virt_ops;
139extern const struct dma_map_ops dma_dummy_ops; 138extern const struct dma_map_ops dma_dummy_ops;
140 139
@@ -222,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
222} 221}
223#endif 222#endif
224 223
224static inline bool dma_is_direct(const struct dma_map_ops *ops)
225{
226 return likely(!ops);
227}
228
229/*
230 * All the dma_direct_* declarations are here just for the indirect call bypass,
231 * and must not be used directly drivers!
232 */
233dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t size, enum dma_data_direction dir,
235 unsigned long attrs);
236int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
237 enum dma_data_direction dir, unsigned long attrs);
238
239#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
240 defined(CONFIG_SWIOTLB)
241void dma_direct_sync_single_for_device(struct device *dev,
242 dma_addr_t addr, size_t size, enum dma_data_direction dir);
243void dma_direct_sync_sg_for_device(struct device *dev,
244 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
245#else
246static inline void dma_direct_sync_single_for_device(struct device *dev,
247 dma_addr_t addr, size_t size, enum dma_data_direction dir)
248{
249}
250static inline void dma_direct_sync_sg_for_device(struct device *dev,
251 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
252{
253}
254#endif
255
256#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
257 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
258 defined(CONFIG_SWIOTLB)
259void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
260 size_t size, enum dma_data_direction dir, unsigned long attrs);
261void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
262 int nents, enum dma_data_direction dir, unsigned long attrs);
263void dma_direct_sync_single_for_cpu(struct device *dev,
264 dma_addr_t addr, size_t size, enum dma_data_direction dir);
265void dma_direct_sync_sg_for_cpu(struct device *dev,
266 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
267#else
268static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
269 size_t size, enum dma_data_direction dir, unsigned long attrs)
270{
271}
272static inline void dma_direct_unmap_sg(struct device *dev,
273 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
274 unsigned long attrs)
275{
276}
277static inline void dma_direct_sync_single_for_cpu(struct device *dev,
278 dma_addr_t addr, size_t size, enum dma_data_direction dir)
279{
280}
281static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
282 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
283{
284}
285#endif
286
225static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 287static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
226 size_t size, 288 size_t size,
227 enum dma_data_direction dir, 289 enum dma_data_direction dir,
@@ -232,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
232 294
233 BUG_ON(!valid_dma_direction(dir)); 295 BUG_ON(!valid_dma_direction(dir));
234 debug_dma_map_single(dev, ptr, size); 296 debug_dma_map_single(dev, ptr, size);
235 addr = ops->map_page(dev, virt_to_page(ptr), 297 if (dma_is_direct(ops))
236 offset_in_page(ptr), size, 298 addr = dma_direct_map_page(dev, virt_to_page(ptr),
237 dir, attrs); 299 offset_in_page(ptr), size, dir, attrs);
300 else
301 addr = ops->map_page(dev, virt_to_page(ptr),
302 offset_in_page(ptr), size, dir, attrs);
238 debug_dma_map_page(dev, virt_to_page(ptr), 303 debug_dma_map_page(dev, virt_to_page(ptr),
239 offset_in_page(ptr), size, 304 offset_in_page(ptr), size,
240 dir, addr, true); 305 dir, addr, true);
@@ -249,7 +314,9 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
249 const struct dma_map_ops *ops = get_dma_ops(dev); 314 const struct dma_map_ops *ops = get_dma_ops(dev);
250 315
251 BUG_ON(!valid_dma_direction(dir)); 316 BUG_ON(!valid_dma_direction(dir));
252 if (ops->unmap_page) 317 if (dma_is_direct(ops))
318 dma_direct_unmap_page(dev, addr, size, dir, attrs);
319 else if (ops->unmap_page)
253 ops->unmap_page(dev, addr, size, dir, attrs); 320 ops->unmap_page(dev, addr, size, dir, attrs);
254 debug_dma_unmap_page(dev, addr, size, dir, true); 321 debug_dma_unmap_page(dev, addr, size, dir, true);
255} 322}
@@ -272,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
272 int ents; 339 int ents;
273 340
274 BUG_ON(!valid_dma_direction(dir)); 341 BUG_ON(!valid_dma_direction(dir));
275 ents = ops->map_sg(dev, sg, nents, dir, attrs); 342 if (dma_is_direct(ops))
343 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
344 else
345 ents = ops->map_sg(dev, sg, nents, dir, attrs);
276 BUG_ON(ents < 0); 346 BUG_ON(ents < 0);
277 debug_dma_map_sg(dev, sg, nents, ents, dir); 347 debug_dma_map_sg(dev, sg, nents, ents, dir);
278 348
@@ -287,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
287 357
288 BUG_ON(!valid_dma_direction(dir)); 358 BUG_ON(!valid_dma_direction(dir));
289 debug_dma_unmap_sg(dev, sg, nents, dir); 359 debug_dma_unmap_sg(dev, sg, nents, dir);
290 if (ops->unmap_sg) 360 if (dma_is_direct(ops))
361 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
362 else if (ops->unmap_sg)
291 ops->unmap_sg(dev, sg, nents, dir, attrs); 363 ops->unmap_sg(dev, sg, nents, dir, attrs);
292} 364}
293 365
@@ -301,7 +373,10 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
301 dma_addr_t addr; 373 dma_addr_t addr;
302 374
303 BUG_ON(!valid_dma_direction(dir)); 375 BUG_ON(!valid_dma_direction(dir));
304 addr = ops->map_page(dev, page, offset, size, dir, attrs); 376 if (dma_is_direct(ops))
377 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
378 else
379 addr = ops->map_page(dev, page, offset, size, dir, attrs);
305 debug_dma_map_page(dev, page, offset, size, dir, addr, false); 380 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
306 381
307 return addr; 382 return addr;
@@ -322,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
322 BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); 397 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
323 398
324 addr = phys_addr; 399 addr = phys_addr;
325 if (ops->map_resource) 400 if (ops && ops->map_resource)
326 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 401 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
327 402
328 debug_dma_map_resource(dev, phys_addr, size, dir, addr); 403 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
@@ -337,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
337 const struct dma_map_ops *ops = get_dma_ops(dev); 412 const struct dma_map_ops *ops = get_dma_ops(dev);
338 413
339 BUG_ON(!valid_dma_direction(dir)); 414 BUG_ON(!valid_dma_direction(dir));
340 if (ops->unmap_resource) 415 if (ops && ops->unmap_resource)
341 ops->unmap_resource(dev, addr, size, dir, attrs); 416 ops->unmap_resource(dev, addr, size, dir, attrs);
342 debug_dma_unmap_resource(dev, addr, size, dir); 417 debug_dma_unmap_resource(dev, addr, size, dir);
343} 418}
@@ -349,7 +424,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
349 const struct dma_map_ops *ops = get_dma_ops(dev); 424 const struct dma_map_ops *ops = get_dma_ops(dev);
350 425
351 BUG_ON(!valid_dma_direction(dir)); 426 BUG_ON(!valid_dma_direction(dir));
352 if (ops->sync_single_for_cpu) 427 if (dma_is_direct(ops))
428 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
429 else if (ops->sync_single_for_cpu)
353 ops->sync_single_for_cpu(dev, addr, size, dir); 430 ops->sync_single_for_cpu(dev, addr, size, dir);
354 debug_dma_sync_single_for_cpu(dev, addr, size, dir); 431 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
355} 432}
@@ -368,7 +445,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
368 const struct dma_map_ops *ops = get_dma_ops(dev); 445 const struct dma_map_ops *ops = get_dma_ops(dev);
369 446
370 BUG_ON(!valid_dma_direction(dir)); 447 BUG_ON(!valid_dma_direction(dir));
371 if (ops->sync_single_for_device) 448 if (dma_is_direct(ops))
449 dma_direct_sync_single_for_device(dev, addr, size, dir);
450 else if (ops->sync_single_for_device)
372 ops->sync_single_for_device(dev, addr, size, dir); 451 ops->sync_single_for_device(dev, addr, size, dir);
373 debug_dma_sync_single_for_device(dev, addr, size, dir); 452 debug_dma_sync_single_for_device(dev, addr, size, dir);
374} 453}
@@ -387,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
387 const struct dma_map_ops *ops = get_dma_ops(dev); 466 const struct dma_map_ops *ops = get_dma_ops(dev);
388 467
389 BUG_ON(!valid_dma_direction(dir)); 468 BUG_ON(!valid_dma_direction(dir));
390 if (ops->sync_sg_for_cpu) 469 if (dma_is_direct(ops))
470 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
471 else if (ops->sync_sg_for_cpu)
391 ops->sync_sg_for_cpu(dev, sg, nelems, dir); 472 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
392 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 473 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
393} 474}
@@ -399,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
399 const struct dma_map_ops *ops = get_dma_ops(dev); 480 const struct dma_map_ops *ops = get_dma_ops(dev);
400 481
401 BUG_ON(!valid_dma_direction(dir)); 482 BUG_ON(!valid_dma_direction(dir));
402 if (ops->sync_sg_for_device) 483 if (dma_is_direct(ops))
484 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
485 else if (ops->sync_sg_for_device)
403 ops->sync_sg_for_device(dev, sg, nelems, dir); 486 ops->sync_sg_for_device(dev, sg, nelems, dir);
404 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 487 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
405 488