aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/controller/vmd.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-06 16:37:00 -0500
committerChristoph Hellwig <hch@lst.de>2018-12-13 15:06:17 -0500
commit190d4e5916a2d70a11009022b968fca948fb5dc7 (patch)
treee0525720a7592d59a5875495d345f005af0d42b3 /drivers/pci/controller/vmd.c
parent55897af63091ebc2c3f239c6a6666f748113ac50 (diff)
vmd: use the proper dma_* APIs instead of direct methods calls
With the bypass support for the direct mapping we might not always have methods to call, so use the proper APIs instead. The only downside is that we will create two dma-debug entries for each mapping if CONFIG_DMA_DEBUG is enabled. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'drivers/pci/controller/vmd.c')
-rw-r--r--drivers/pci/controller/vmd.c42
1 files changed, 17 insertions, 25 deletions
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 98ce79eac128..3890812cdf87 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -307,39 +307,32 @@ static struct device *to_vmd_dev(struct device *dev)
307 return &vmd->dev->dev; 307 return &vmd->dev->dev;
308} 308}
309 309
310static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
311{
312 return get_dma_ops(to_vmd_dev(dev));
313}
314
315static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, 310static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
316 gfp_t flag, unsigned long attrs) 311 gfp_t flag, unsigned long attrs)
317{ 312{
318 return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, 313 return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
319 attrs);
320} 314}
321 315
322static void vmd_free(struct device *dev, size_t size, void *vaddr, 316static void vmd_free(struct device *dev, size_t size, void *vaddr,
323 dma_addr_t addr, unsigned long attrs) 317 dma_addr_t addr, unsigned long attrs)
324{ 318{
325 return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, 319 return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
326 attrs);
327} 320}
328 321
329static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, 322static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
330 void *cpu_addr, dma_addr_t addr, size_t size, 323 void *cpu_addr, dma_addr_t addr, size_t size,
331 unsigned long attrs) 324 unsigned long attrs)
332{ 325{
333 return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, 326 return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
334 size, attrs); 327 attrs);
335} 328}
336 329
337static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, 330static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
338 void *cpu_addr, dma_addr_t addr, size_t size, 331 void *cpu_addr, dma_addr_t addr, size_t size,
339 unsigned long attrs) 332 unsigned long attrs)
340{ 333{
341 return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, 334 return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
342 addr, size, attrs); 335 attrs);
343} 336}
344 337
345static dma_addr_t vmd_map_page(struct device *dev, struct page *page, 338static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
@@ -347,61 +340,60 @@ static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
347 enum dma_data_direction dir, 340 enum dma_data_direction dir,
348 unsigned long attrs) 341 unsigned long attrs)
349{ 342{
350 return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, 343 return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
351 dir, attrs); 344 attrs);
352} 345}
353 346
354static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, 347static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
355 enum dma_data_direction dir, unsigned long attrs) 348 enum dma_data_direction dir, unsigned long attrs)
356{ 349{
357 vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); 350 dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
358} 351}
359 352
360static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, 353static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
361 enum dma_data_direction dir, unsigned long attrs) 354 enum dma_data_direction dir, unsigned long attrs)
362{ 355{
363 return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); 356 return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
364} 357}
365 358
366static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 359static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
367 enum dma_data_direction dir, unsigned long attrs) 360 enum dma_data_direction dir, unsigned long attrs)
368{ 361{
369 vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); 362 dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
370} 363}
371 364
372static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 365static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
373 size_t size, enum dma_data_direction dir) 366 size_t size, enum dma_data_direction dir)
374{ 367{
375 vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); 368 dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
376} 369}
377 370
378static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, 371static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
379 size_t size, enum dma_data_direction dir) 372 size_t size, enum dma_data_direction dir)
380{ 373{
381 vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, 374 dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
382 dir);
383} 375}
384 376
385static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 377static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
386 int nents, enum dma_data_direction dir) 378 int nents, enum dma_data_direction dir)
387{ 379{
388 vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); 380 dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
389} 381}
390 382
391static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 383static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
392 int nents, enum dma_data_direction dir) 384 int nents, enum dma_data_direction dir)
393{ 385{
394 vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); 386 dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
395} 387}
396 388
397static int vmd_dma_supported(struct device *dev, u64 mask) 389static int vmd_dma_supported(struct device *dev, u64 mask)
398{ 390{
399 return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); 391 return dma_supported(to_vmd_dev(dev), mask);
400} 392}
401 393
402static u64 vmd_get_required_mask(struct device *dev) 394static u64 vmd_get_required_mask(struct device *dev)
403{ 395{
404 return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); 396 return dma_get_required_mask(to_vmd_dev(dev));
405} 397}
406 398
407static void vmd_teardown_dma_ops(struct vmd_dev *vmd) 399static void vmd_teardown_dma_ops(struct vmd_dev *vmd)