diff options
author | Allen Kay <allen.m.kay@intel.com> | 2011-10-14 15:32:46 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2011-10-19 07:06:22 -0400 |
commit | 4399c8bf2b9093696fa8160d79712e7346989c46 (patch) | |
tree | 66363d75fa8e346b84ba6fcdca33f78858456328 /drivers/iommu/intel-iommu.c | |
parent | 8140a95d228efbcd64d84150e794761a32463947 (diff) |
intel-iommu: fix superpage support in pfn_to_dma_pte()
If target_level == 0, current code breaks out of the while-loop if
SUPERPAGE bit is set. We should also break out if PTE is not present.
If we don't do this, KVM calls to iommu_iova_to_phys() will cause
pfn_to_dma_pte() to create mapping for 4KiB pages.
Signed-off-by: Allen Kay <allen.m.kay@intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index e5883602cb35..a88f3cbb100b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
306 | return (pte->val & 3) != 0; | 306 | return (pte->val & 3) != 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | static inline bool dma_pte_superpage(struct dma_pte *pte) | ||
310 | { | ||
311 | return (pte->val & (1 << 7)); | ||
312 | } | ||
313 | |||
309 | static inline int first_pte_in_page(struct dma_pte *pte) | 314 | static inline int first_pte_in_page(struct dma_pte *pte) |
310 | { | 315 | { |
311 | return !((unsigned long)pte & ~VTD_PAGE_MASK); | 316 | return !((unsigned long)pte & ~VTD_PAGE_MASK); |
@@ -734,29 +739,23 @@ out: | |||
734 | } | 739 | } |
735 | 740 | ||
736 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 741 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
737 | unsigned long pfn, int large_level) | 742 | unsigned long pfn, int target_level) |
738 | { | 743 | { |
739 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 744 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
740 | struct dma_pte *parent, *pte = NULL; | 745 | struct dma_pte *parent, *pte = NULL; |
741 | int level = agaw_to_level(domain->agaw); | 746 | int level = agaw_to_level(domain->agaw); |
742 | int offset, target_level; | 747 | int offset; |
743 | 748 | ||
744 | BUG_ON(!domain->pgd); | 749 | BUG_ON(!domain->pgd); |
745 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); | 750 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); |
746 | parent = domain->pgd; | 751 | parent = domain->pgd; |
747 | 752 | ||
748 | /* Search pte */ | ||
749 | if (!large_level) | ||
750 | target_level = 1; | ||
751 | else | ||
752 | target_level = large_level; | ||
753 | |||
754 | while (level > 0) { | 753 | while (level > 0) { |
755 | void *tmp_page; | 754 | void *tmp_page; |
756 | 755 | ||
757 | offset = pfn_level_offset(pfn, level); | 756 | offset = pfn_level_offset(pfn, level); |
758 | pte = &parent[offset]; | 757 | pte = &parent[offset]; |
759 | if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) | 758 | if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) |
760 | break; | 759 | break; |
761 | if (level == target_level) | 760 | if (level == target_level) |
762 | break; | 761 | break; |