diff options
-rw-r--r-- | drivers/iommu/intel-iommu.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 5565753d460a..237ef52c8c76 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -819,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, | |||
819 | } | 819 | } |
820 | 820 | ||
821 | /* clear last level pte, a tlb flush should be followed */ | 821 | /* clear last level pte, a tlb flush should be followed */ |
822 | static void dma_pte_clear_range(struct dmar_domain *domain, | 822 | static int dma_pte_clear_range(struct dmar_domain *domain, |
823 | unsigned long start_pfn, | 823 | unsigned long start_pfn, |
824 | unsigned long last_pfn) | 824 | unsigned long last_pfn) |
825 | { | 825 | { |
826 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 826 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
827 | unsigned int large_page = 1; | 827 | unsigned int large_page = 1; |
828 | struct dma_pte *first_pte, *pte; | 828 | struct dma_pte *first_pte, *pte; |
829 | int order; | ||
829 | 830 | ||
830 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | 831 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
831 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 832 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
@@ -849,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, | |||
849 | (void *)pte - (void *)first_pte); | 850 | (void *)pte - (void *)first_pte); |
850 | 851 | ||
851 | } while (start_pfn && start_pfn <= last_pfn); | 852 | } while (start_pfn && start_pfn <= last_pfn); |
853 | |||
854 | order = (large_page - 1) * 9; | ||
855 | return order; | ||
852 | } | 856 | } |
853 | 857 | ||
854 | /* free page table pages. last level pte should already be cleared */ | 858 | /* free page table pages. last level pte should already be cleared */ |
@@ -3869,14 +3873,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain, | |||
3869 | { | 3873 | { |
3870 | struct dmar_domain *dmar_domain = domain->priv; | 3874 | struct dmar_domain *dmar_domain = domain->priv; |
3871 | size_t size = PAGE_SIZE << gfp_order; | 3875 | size_t size = PAGE_SIZE << gfp_order; |
3876 | int order; | ||
3872 | 3877 | ||
3873 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3878 | order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3874 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3879 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3875 | 3880 | ||
3876 | if (dmar_domain->max_addr == iova + size) | 3881 | if (dmar_domain->max_addr == iova + size) |
3877 | dmar_domain->max_addr = iova; | 3882 | dmar_domain->max_addr = iova; |
3878 | 3883 | ||
3879 | return gfp_order; | 3884 | return order; |
3880 | } | 3885 | } |
3881 | 3886 | ||
3882 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 3887 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |