diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-19 16:27:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-19 16:27:23 -0400 |
commit | 671df189537883f36cf9c7d4f9495bfac0f86627 (patch) | |
tree | 22e5f598ed1f5d9b2218d85d4426140f804d61e6 /drivers/xen | |
parent | c9fe5630dae1df2328d82042602e2c4d1add8d57 (diff) | |
parent | c7d9eccb3c1e802c5cbb2a764eb0eb9807d9f12e (diff) |
Merge tag 'dma-mapping-5.4' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- add dma-mapping and block layer helpers to take care of IOMMU merging
for mmc plus subsequent fixups (Yoshihiro Shimoda)
- rework handling of the pgprot bits for remapping (me)
- take care of the dma direct infrastructure for swiotlb-xen (me)
- improve the dma noncoherent remapping infrastructure (me)
- better defaults for ->mmap, ->get_sgtable and ->get_required_mask
(me)
- cleanup mmaping of coherent DMA allocations (me)
- various misc cleanups (Andy Shevchenko, me)
* tag 'dma-mapping-5.4' of git://git.infradead.org/users/hch/dma-mapping: (41 commits)
mmc: renesas_sdhi_internal_dmac: Add MMC_CAP2_MERGE_CAPABLE
mmc: queue: Fix bigger segments usage
arm64: use asm-generic/dma-mapping.h
swiotlb-xen: merge xen_unmap_single into xen_swiotlb_unmap_page
swiotlb-xen: simplify cache maintainance
swiotlb-xen: use the same foreign page check everywhere
swiotlb-xen: remove xen_swiotlb_dma_mmap and xen_swiotlb_dma_get_sgtable
xen: remove the exports for xen_{create,destroy}_contiguous_region
xen/arm: remove xen_dma_ops
xen/arm: simplify dma_cache_maint
xen/arm: use dev_is_dma_coherent
xen/arm: consolidate page-coherent.h
xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance
arm: remove wrappers for the generic dma remap helpers
dma-mapping: introduce a dma_common_find_pages helper
dma-mapping: always use VM_DMA_COHERENT for generic DMA remap
vmalloc: lift the arm flag for coherent mappings to common code
dma-mapping: provide a better default ->get_required_mask
dma-mapping: remove the dma_declare_coherent_memory export
remoteproc: don't allow modular build
...
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 84 |
1 files changed, 16 insertions, 68 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index adcabd9473eb..58c9365fa217 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <linux/memblock.h> | 29 | #include <linux/memblock.h> |
30 | #include <linux/dma-direct.h> | 30 | #include <linux/dma-direct.h> |
31 | #include <linux/dma-noncoherent.h> | ||
31 | #include <linux/export.h> | 32 | #include <linux/export.h> |
32 | #include <xen/swiotlb-xen.h> | 33 | #include <xen/swiotlb-xen.h> |
33 | #include <xen/page.h> | 34 | #include <xen/page.h> |
@@ -391,6 +392,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
391 | if (map == (phys_addr_t)DMA_MAPPING_ERROR) | 392 | if (map == (phys_addr_t)DMA_MAPPING_ERROR) |
392 | return DMA_MAPPING_ERROR; | 393 | return DMA_MAPPING_ERROR; |
393 | 394 | ||
395 | phys = map; | ||
394 | dev_addr = xen_phys_to_bus(map); | 396 | dev_addr = xen_phys_to_bus(map); |
395 | 397 | ||
396 | /* | 398 | /* |
@@ -402,14 +404,9 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
402 | return DMA_MAPPING_ERROR; | 404 | return DMA_MAPPING_ERROR; |
403 | } | 405 | } |
404 | 406 | ||
405 | page = pfn_to_page(map >> PAGE_SHIFT); | ||
406 | offset = map & ~PAGE_MASK; | ||
407 | done: | 407 | done: |
408 | /* | 408 | if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
409 | * we are not interested in the dma_addr returned by xen_dma_map_page, | 409 | xen_dma_sync_for_device(dev, dev_addr, phys, size, dir); |
410 | * only in the potential cache flushes executed by the function. | ||
411 | */ | ||
412 | xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); | ||
413 | return dev_addr; | 410 | return dev_addr; |
414 | } | 411 | } |
415 | 412 | ||
@@ -421,35 +418,29 @@ done: | |||
421 | * After this call, reads by the cpu to the buffer are guaranteed to see | 418 | * After this call, reads by the cpu to the buffer are guaranteed to see |
422 | * whatever the device wrote there. | 419 | * whatever the device wrote there. |
423 | */ | 420 | */ |
424 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 421 | static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
425 | size_t size, enum dma_data_direction dir, | 422 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
426 | unsigned long attrs) | ||
427 | { | 423 | { |
428 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); | 424 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
429 | 425 | ||
430 | BUG_ON(dir == DMA_NONE); | 426 | BUG_ON(dir == DMA_NONE); |
431 | 427 | ||
432 | xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); | 428 | if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
429 | xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir); | ||
433 | 430 | ||
434 | /* NOTE: We use dev_addr here, not paddr! */ | 431 | /* NOTE: We use dev_addr here, not paddr! */ |
435 | if (is_xen_swiotlb_buffer(dev_addr)) | 432 | if (is_xen_swiotlb_buffer(dev_addr)) |
436 | swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); | 433 | swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); |
437 | } | 434 | } |
438 | 435 | ||
439 | static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | ||
440 | size_t size, enum dma_data_direction dir, | ||
441 | unsigned long attrs) | ||
442 | { | ||
443 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); | ||
444 | } | ||
445 | |||
446 | static void | 436 | static void |
447 | xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, | 437 | xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, |
448 | size_t size, enum dma_data_direction dir) | 438 | size_t size, enum dma_data_direction dir) |
449 | { | 439 | { |
450 | phys_addr_t paddr = xen_bus_to_phys(dma_addr); | 440 | phys_addr_t paddr = xen_bus_to_phys(dma_addr); |
451 | 441 | ||
452 | xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir); | 442 | if (!dev_is_dma_coherent(dev)) |
443 | xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir); | ||
453 | 444 | ||
454 | if (is_xen_swiotlb_buffer(dma_addr)) | 445 | if (is_xen_swiotlb_buffer(dma_addr)) |
455 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); | 446 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); |
@@ -464,7 +455,8 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, | |||
464 | if (is_xen_swiotlb_buffer(dma_addr)) | 455 | if (is_xen_swiotlb_buffer(dma_addr)) |
465 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); | 456 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); |
466 | 457 | ||
467 | xen_dma_sync_single_for_device(dev, dma_addr, size, dir); | 458 | if (!dev_is_dma_coherent(dev)) |
459 | xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir); | ||
468 | } | 460 | } |
469 | 461 | ||
470 | /* | 462 | /* |
@@ -481,7 +473,8 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
481 | BUG_ON(dir == DMA_NONE); | 473 | BUG_ON(dir == DMA_NONE); |
482 | 474 | ||
483 | for_each_sg(sgl, sg, nelems, i) | 475 | for_each_sg(sgl, sg, nelems, i) |
484 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); | 476 | xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), |
477 | dir, attrs); | ||
485 | 478 | ||
486 | } | 479 | } |
487 | 480 | ||
@@ -547,51 +540,6 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) | |||
547 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; | 540 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; |
548 | } | 541 | } |
549 | 542 | ||
550 | /* | ||
551 | * Create userspace mapping for the DMA-coherent memory. | ||
552 | * This function should be called with the pages from the current domain only, | ||
553 | * passing pages mapped from other domains would lead to memory corruption. | ||
554 | */ | ||
555 | static int | ||
556 | xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
557 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
558 | unsigned long attrs) | ||
559 | { | ||
560 | #ifdef CONFIG_ARM | ||
561 | if (xen_get_dma_ops(dev)->mmap) | ||
562 | return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, | ||
563 | dma_addr, size, attrs); | ||
564 | #endif | ||
565 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * This function should be called with the pages from the current domain only, | ||
570 | * passing pages mapped from other domains would lead to memory corruption. | ||
571 | */ | ||
572 | static int | ||
573 | xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
574 | void *cpu_addr, dma_addr_t handle, size_t size, | ||
575 | unsigned long attrs) | ||
576 | { | ||
577 | #ifdef CONFIG_ARM | ||
578 | if (xen_get_dma_ops(dev)->get_sgtable) { | ||
579 | #if 0 | ||
580 | /* | ||
581 | * This check verifies that the page belongs to the current domain and | ||
582 | * is not one mapped from another domain. | ||
583 | * This check is for debug only, and should not go to production build | ||
584 | */ | ||
585 | unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle)); | ||
586 | BUG_ON (!page_is_ram(bfn)); | ||
587 | #endif | ||
588 | return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr, | ||
589 | handle, size, attrs); | ||
590 | } | ||
591 | #endif | ||
592 | return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs); | ||
593 | } | ||
594 | |||
595 | const struct dma_map_ops xen_swiotlb_dma_ops = { | 543 | const struct dma_map_ops xen_swiotlb_dma_ops = { |
596 | .alloc = xen_swiotlb_alloc_coherent, | 544 | .alloc = xen_swiotlb_alloc_coherent, |
597 | .free = xen_swiotlb_free_coherent, | 545 | .free = xen_swiotlb_free_coherent, |
@@ -604,6 +552,6 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { | |||
604 | .map_page = xen_swiotlb_map_page, | 552 | .map_page = xen_swiotlb_map_page, |
605 | .unmap_page = xen_swiotlb_unmap_page, | 553 | .unmap_page = xen_swiotlb_unmap_page, |
606 | .dma_supported = xen_swiotlb_dma_supported, | 554 | .dma_supported = xen_swiotlb_dma_supported, |
607 | .mmap = xen_swiotlb_dma_mmap, | 555 | .mmap = dma_common_mmap, |
608 | .get_sgtable = xen_swiotlb_get_sgtable, | 556 | .get_sgtable = dma_common_get_sgtable, |
609 | }; | 557 | }; |