diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2014-03-19 12:07:49 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2014-03-19 13:21:32 -0400 |
commit | 5cf0a76fa2179d246fc0375d733bdccffd59382b (patch) | |
tree | 252606f6807877d15a936dd94325742cf8021a8d /drivers/iommu/intel-iommu.c | |
parent | 75f05569d0e51f6332a291c82abbeb7c8262e32d (diff) |
iommu/vt-d: Clean up size handling for intel_iommu_unmap()
We have this horrid API where iommu_unmap() can unmap more than it's asked
to, if the IOVA in question happens to be mapped with a large page.
Instead of propagating this nonsense to the point where we end up returning
the page order from dma_pte_clear_range(), let's just do it once and adjust
the 'size' parameter accordingly.
Augment pfn_to_dma_pte() to return the level at which the PTE was found,
which will also be useful later if we end up changing the API for
iommu_iova_to_phys() to behave the same way as is being discussed upstream.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 37 |
1 files changed, 25 insertions, 12 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 484d669d2720..6472bf15bef2 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -784,7 +784,7 @@ out: | |||
784 | } | 784 | } |
785 | 785 | ||
786 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 786 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
787 | unsigned long pfn, int target_level) | 787 | unsigned long pfn, int *target_level) |
788 | { | 788 | { |
789 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 789 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
790 | struct dma_pte *parent, *pte = NULL; | 790 | struct dma_pte *parent, *pte = NULL; |
@@ -799,14 +799,14 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
799 | 799 | ||
800 | parent = domain->pgd; | 800 | parent = domain->pgd; |
801 | 801 | ||
802 | while (level > 0) { | 802 | while (1) { |
803 | void *tmp_page; | 803 | void *tmp_page; |
804 | 804 | ||
805 | offset = pfn_level_offset(pfn, level); | 805 | offset = pfn_level_offset(pfn, level); |
806 | pte = &parent[offset]; | 806 | pte = &parent[offset]; |
807 | if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) | 807 | if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) |
808 | break; | 808 | break; |
809 | if (level == target_level) | 809 | if (level == *target_level) |
810 | break; | 810 | break; |
811 | 811 | ||
812 | if (!dma_pte_present(pte)) { | 812 | if (!dma_pte_present(pte)) { |
@@ -827,10 +827,16 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
827 | domain_flush_cache(domain, pte, sizeof(*pte)); | 827 | domain_flush_cache(domain, pte, sizeof(*pte)); |
828 | } | 828 | } |
829 | } | 829 | } |
830 | if (level == 1) | ||
831 | break; | ||
832 | |||
830 | parent = phys_to_virt(dma_pte_addr(pte)); | 833 | parent = phys_to_virt(dma_pte_addr(pte)); |
831 | level--; | 834 | level--; |
832 | } | 835 | } |
833 | 836 | ||
837 | if (!*target_level) | ||
838 | *target_level = level; | ||
839 | |||
834 | return pte; | 840 | return pte; |
835 | } | 841 | } |
836 | 842 | ||
@@ -868,7 +874,7 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, | |||
868 | } | 874 | } |
869 | 875 | ||
870 | /* clear last level pte, a tlb flush should be followed */ | 876 | /* clear last level pte, a tlb flush should be followed */ |
871 | static int dma_pte_clear_range(struct dmar_domain *domain, | 877 | static void dma_pte_clear_range(struct dmar_domain *domain, |
872 | unsigned long start_pfn, | 878 | unsigned long start_pfn, |
873 | unsigned long last_pfn) | 879 | unsigned long last_pfn) |
874 | { | 880 | { |
@@ -898,8 +904,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain, | |||
898 | (void *)pte - (void *)first_pte); | 904 | (void *)pte - (void *)first_pte); |
899 | 905 | ||
900 | } while (start_pfn && start_pfn <= last_pfn); | 906 | } while (start_pfn && start_pfn <= last_pfn); |
901 | |||
902 | return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH); | ||
903 | } | 907 | } |
904 | 908 | ||
905 | static void dma_pte_free_level(struct dmar_domain *domain, int level, | 909 | static void dma_pte_free_level(struct dmar_domain *domain, int level, |
@@ -1832,7 +1836,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1832 | if (!pte) { | 1836 | if (!pte) { |
1833 | largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); | 1837 | largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); |
1834 | 1838 | ||
1835 | first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl); | 1839 | first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); |
1836 | if (!pte) | 1840 | if (!pte) |
1837 | return -ENOMEM; | 1841 | return -ENOMEM; |
1838 | /* It is large page*/ | 1842 | /* It is large page*/ |
@@ -4099,15 +4103,23 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, | |||
4099 | unsigned long iova, size_t size) | 4103 | unsigned long iova, size_t size) |
4100 | { | 4104 | { |
4101 | struct dmar_domain *dmar_domain = domain->priv; | 4105 | struct dmar_domain *dmar_domain = domain->priv; |
4102 | int order; | 4106 | int level = 0; |
4107 | |||
4108 | /* Cope with horrid API which requires us to unmap more than the | ||
4109 | size argument if it happens to be a large-page mapping. */ | ||
4110 | if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)) | ||
4111 | BUG(); | ||
4112 | |||
4113 | if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) | ||
4114 | size = VTD_PAGE_SIZE << level_to_offset_bits(level); | ||
4103 | 4115 | ||
4104 | order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 4116 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
4105 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 4117 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
4106 | 4118 | ||
4107 | if (dmar_domain->max_addr == iova + size) | 4119 | if (dmar_domain->max_addr == iova + size) |
4108 | dmar_domain->max_addr = iova; | 4120 | dmar_domain->max_addr = iova; |
4109 | 4121 | ||
4110 | return PAGE_SIZE << order; | 4122 | return size; |
4111 | } | 4123 | } |
4112 | 4124 | ||
4113 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 4125 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -4115,9 +4127,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | |||
4115 | { | 4127 | { |
4116 | struct dmar_domain *dmar_domain = domain->priv; | 4128 | struct dmar_domain *dmar_domain = domain->priv; |
4117 | struct dma_pte *pte; | 4129 | struct dma_pte *pte; |
4130 | int level = 0; | ||
4118 | u64 phys = 0; | 4131 | u64 phys = 0; |
4119 | 4132 | ||
4120 | pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0); | 4133 | pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); |
4121 | if (pte) | 4134 | if (pte) |
4122 | phys = dma_pte_addr(pte); | 4135 | phys = dma_pte_addr(pte); |
4123 | 4136 | ||