diff options
-rw-r--r-- | drivers/iommu/iommu.c | 63 |
1 files changed, 35 insertions, 28 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d8f98b14e2fe..4b0b56b0501d 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain, | |||
754 | } | 754 | } |
755 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); | 755 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); |
756 | 756 | ||
757 | static size_t iommu_pgsize(struct iommu_domain *domain, | ||
758 | unsigned long addr_merge, size_t size) | ||
759 | { | ||
760 | unsigned int pgsize_idx; | ||
761 | size_t pgsize; | ||
762 | |||
763 | /* Max page size that still fits into 'size' */ | ||
764 | pgsize_idx = __fls(size); | ||
765 | |||
766 | /* need to consider alignment requirements ? */ | ||
767 | if (likely(addr_merge)) { | ||
768 | /* Max page size allowed by address */ | ||
769 | unsigned int align_pgsize_idx = __ffs(addr_merge); | ||
770 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); | ||
771 | } | ||
772 | |||
773 | /* build a mask of acceptable page sizes */ | ||
774 | pgsize = (1UL << (pgsize_idx + 1)) - 1; | ||
775 | |||
776 | /* throw away page sizes not supported by the hardware */ | ||
777 | pgsize &= domain->ops->pgsize_bitmap; | ||
778 | |||
779 | /* make sure we're still sane */ | ||
780 | BUG_ON(!pgsize); | ||
781 | |||
782 | /* pick the biggest page */ | ||
783 | pgsize_idx = __fls(pgsize); | ||
784 | pgsize = 1UL << pgsize_idx; | ||
785 | |||
786 | return pgsize; | ||
787 | } | ||
788 | |||
757 | int iommu_map(struct iommu_domain *domain, unsigned long iova, | 789 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
758 | phys_addr_t paddr, size_t size, int prot) | 790 | phys_addr_t paddr, size_t size, int prot) |
759 | { | 791 | { |
@@ -785,32 +817,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
785 | (unsigned long)paddr, (unsigned long)size); | 817 | (unsigned long)paddr, (unsigned long)size); |
786 | 818 | ||
787 | while (size) { | 819 | while (size) { |
788 | unsigned long pgsize, addr_merge = iova | paddr; | 820 | size_t pgsize = iommu_pgsize(domain, iova | paddr, size); |
789 | unsigned int pgsize_idx; | ||
790 | |||
791 | /* Max page size that still fits into 'size' */ | ||
792 | pgsize_idx = __fls(size); | ||
793 | |||
794 | /* need to consider alignment requirements ? */ | ||
795 | if (likely(addr_merge)) { | ||
796 | /* Max page size allowed by both iova and paddr */ | ||
797 | unsigned int align_pgsize_idx = __ffs(addr_merge); | ||
798 | |||
799 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); | ||
800 | } | ||
801 | |||
802 | /* build a mask of acceptable page sizes */ | ||
803 | pgsize = (1UL << (pgsize_idx + 1)) - 1; | ||
804 | |||
805 | /* throw away page sizes not supported by the hardware */ | ||
806 | pgsize &= domain->ops->pgsize_bitmap; | ||
807 | |||
808 | /* make sure we're still sane */ | ||
809 | BUG_ON(!pgsize); | ||
810 | |||
811 | /* pick the biggest page */ | ||
812 | pgsize_idx = __fls(pgsize); | ||
813 | pgsize = 1UL << pgsize_idx; | ||
814 | 821 | ||
815 | pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, | 822 | pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, |
816 | (unsigned long)paddr, pgsize); | 823 | (unsigned long)paddr, pgsize); |
@@ -863,9 +870,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) | |||
863 | * or we hit an area that isn't mapped. | 870 | * or we hit an area that isn't mapped. |
864 | */ | 871 | */ |
865 | while (unmapped < size) { | 872 | while (unmapped < size) { |
866 | size_t left = size - unmapped; | 873 | size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); |
867 | 874 | ||
868 | unmapped_page = domain->ops->unmap(domain, iova, left); | 875 | unmapped_page = domain->ops->unmap(domain, iova, pgsize); |
869 | if (!unmapped_page) | 876 | if (!unmapped_page) |
870 | break; | 877 | break; |
871 | 878 | ||