diff options
author | Robin Murphy <robin.murphy@arm.com> | 2018-09-12 11:24:13 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2018-09-25 04:23:16 -0400 |
commit | 43c5bf11a610ceeae68b26c24e0c76852d0d5cfc (patch) | |
tree | 961a7e537c255524ecd83733f8c0364bc8b8a3f4 | |
parent | 6af588fed39178c8e118fcf9cb6664e58a1fbe88 (diff) |
iommu/dma: Use fast DMA domain lookup
Most parts of iommu-dma already assume they are operating on a default
domain set up by iommu_dma_init_domain(), and can be converted straight
over to avoid the refcounting bottleneck. MSI page mappings may be in
an unmanaged domain with an explicit MSI-only cookie, so retain the
non-specific lookup, but that's OK since they're far from a contended
fast path either way.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/dma-iommu.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 511ff9a1d6d9..320f9ea82f3f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -491,7 +491,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, | |||
491 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | 491 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, |
492 | dma_addr_t *handle) | 492 | dma_addr_t *handle) |
493 | { | 493 | { |
494 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); | 494 | __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size); |
495 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | 495 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
496 | *handle = IOMMU_MAPPING_ERROR; | 496 | *handle = IOMMU_MAPPING_ERROR; |
497 | } | 497 | } |
@@ -518,7 +518,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
518 | unsigned long attrs, int prot, dma_addr_t *handle, | 518 | unsigned long attrs, int prot, dma_addr_t *handle, |
519 | void (*flush_page)(struct device *, const void *, phys_addr_t)) | 519 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
520 | { | 520 | { |
521 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 521 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
522 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 522 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
523 | struct iova_domain *iovad = &cookie->iovad; | 523 | struct iova_domain *iovad = &cookie->iovad; |
524 | struct page **pages; | 524 | struct page **pages; |
@@ -606,9 +606,8 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) | |||
606 | } | 606 | } |
607 | 607 | ||
608 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, | 608 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
609 | size_t size, int prot) | 609 | size_t size, int prot, struct iommu_domain *domain) |
610 | { | 610 | { |
611 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | ||
612 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 611 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
613 | size_t iova_off = 0; | 612 | size_t iova_off = 0; |
614 | dma_addr_t iova; | 613 | dma_addr_t iova; |
@@ -632,13 +631,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, | |||
632 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | 631 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
633 | unsigned long offset, size_t size, int prot) | 632 | unsigned long offset, size_t size, int prot) |
634 | { | 633 | { |
635 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); | 634 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot, |
635 | iommu_get_dma_domain(dev)); | ||
636 | } | 636 | } |
637 | 637 | ||
638 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | 638 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
639 | enum dma_data_direction dir, unsigned long attrs) | 639 | enum dma_data_direction dir, unsigned long attrs) |
640 | { | 640 | { |
641 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); | 641 | __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); |
642 | } | 642 | } |
643 | 643 | ||
644 | /* | 644 | /* |
@@ -726,7 +726,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) | |||
726 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | 726 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
727 | int nents, int prot) | 727 | int nents, int prot) |
728 | { | 728 | { |
729 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 729 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
730 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 730 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
731 | struct iova_domain *iovad = &cookie->iovad; | 731 | struct iova_domain *iovad = &cookie->iovad; |
732 | struct scatterlist *s, *prev = NULL; | 732 | struct scatterlist *s, *prev = NULL; |
@@ -811,20 +811,21 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
811 | sg = tmp; | 811 | sg = tmp; |
812 | } | 812 | } |
813 | end = sg_dma_address(sg) + sg_dma_len(sg); | 813 | end = sg_dma_address(sg) + sg_dma_len(sg); |
814 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); | 814 | __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start); |
815 | } | 815 | } |
816 | 816 | ||
817 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, | 817 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
818 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 818 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
819 | { | 819 | { |
820 | return __iommu_dma_map(dev, phys, size, | 820 | return __iommu_dma_map(dev, phys, size, |
821 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); | 821 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, |
822 | iommu_get_dma_domain(dev)); | ||
822 | } | 823 | } |
823 | 824 | ||
824 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | 825 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
825 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 826 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
826 | { | 827 | { |
827 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); | 828 | __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); |
828 | } | 829 | } |
829 | 830 | ||
830 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 831 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
@@ -850,7 +851,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
850 | if (!msi_page) | 851 | if (!msi_page) |
851 | return NULL; | 852 | return NULL; |
852 | 853 | ||
853 | iova = __iommu_dma_map(dev, msi_addr, size, prot); | 854 | iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); |
854 | if (iommu_dma_mapping_error(dev, iova)) | 855 | if (iommu_dma_mapping_error(dev, iova)) |
855 | goto out_free_page; | 856 | goto out_free_page; |
856 | 857 | ||