aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorAllen Kay <allen.m.kay@intel.com>2011-10-14 15:31:54 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2011-10-19 07:06:18 -0400
commit292827cb164ad00cc7689a21283b1261c0b6daed (patch)
tree9f2627fbecab0c5e885093152910b4a191dd3f68 /drivers/iommu
parent3fb39615007d0645ad7f3a509d7120a1987d95b2 (diff)
intel-iommu: fix return value of iommu_unmap() API
iommu_unmap() API expects IOMMU drivers to return the actual page order of the address being unmapped. Previous code was just returning page order passed in from the caller. This patch fixes this problem. Signed-off-by: Allen Kay <allen.m.kay@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel-iommu.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5565753d460a..237ef52c8c76 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -819,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
819} 819}
820 820
821/* clear last level pte, a tlb flush should be followed */ 821/* clear last level pte, a tlb flush should be followed */
822static void dma_pte_clear_range(struct dmar_domain *domain, 822static int dma_pte_clear_range(struct dmar_domain *domain,
823 unsigned long start_pfn, 823 unsigned long start_pfn,
824 unsigned long last_pfn) 824 unsigned long last_pfn)
825{ 825{
826 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 826 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
827 unsigned int large_page = 1; 827 unsigned int large_page = 1;
828 struct dma_pte *first_pte, *pte; 828 struct dma_pte *first_pte, *pte;
829 int order;
829 830
830 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 831 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
831 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 832 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -849,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
849 (void *)pte - (void *)first_pte); 850 (void *)pte - (void *)first_pte);
850 851
851 } while (start_pfn && start_pfn <= last_pfn); 852 } while (start_pfn && start_pfn <= last_pfn);
853
854 order = (large_page - 1) * 9;
855 return order;
852} 856}
853 857
854/* free page table pages. last level pte should already be cleared */ 858/* free page table pages. last level pte should already be cleared */
@@ -3869,14 +3873,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
3869{ 3873{
3870 struct dmar_domain *dmar_domain = domain->priv; 3874 struct dmar_domain *dmar_domain = domain->priv;
3871 size_t size = PAGE_SIZE << gfp_order; 3875 size_t size = PAGE_SIZE << gfp_order;
3876 int order;
3872 3877
3873 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 3878 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3874 (iova + size - 1) >> VTD_PAGE_SHIFT); 3879 (iova + size - 1) >> VTD_PAGE_SHIFT);
3875 3880
3876 if (dmar_domain->max_addr == iova + size) 3881 if (dmar_domain->max_addr == iova + size)
3877 dmar_domain->max_addr = iova; 3882 dmar_domain->max_addr = iova;
3878 3883
3879 return gfp_order; 3884 return order;
3880} 3885}
3881 3886
3882static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 3887static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,