aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-09-19 10:36:28 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-09-19 10:36:28 -0400
commit59c36286b74ae6a8adebf6e133a83d7f2e3e6704 (patch)
treead6d608b560dc540330bdcc89e8702ac85993174 /drivers/pci/intel-iommu.c
parent2ebe31513fcbe7a781f27002f065b50ae195022f (diff)
intel-iommu: Fix integer overflow in dma_pte_{clear_range,free_pagetable}()
If end_pfn is equal to (unsigned long)-1, then the loop will never end. Seen on 32-bit kernel, but could have happened on 64-bit too once we get hardware that supports 64-bit guest addresses. Change both functions to a 'do {} while' loop with the test at the end, and check for the PFN having wrapper round to zero. Reported-by: Benjamin LaHaise <ben.lahaise@neterion.com> Tested-by: Benjamin LaHaise <ben.lahaise@neterion.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c9272a1fb691..5493c79dde47 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -785,9 +785,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
785 785
786 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 786 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
787 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 787 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
788 BUG_ON(start_pfn > last_pfn);
788 789
789 /* we don't need lock here; nobody else touches the iova range */ 790 /* we don't need lock here; nobody else touches the iova range */
790 while (start_pfn <= last_pfn) { 791 do {
791 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); 792 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
792 if (!pte) { 793 if (!pte) {
793 start_pfn = align_to_level(start_pfn + 1, 2); 794 start_pfn = align_to_level(start_pfn + 1, 2);
@@ -801,7 +802,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
801 802
802 domain_flush_cache(domain, first_pte, 803 domain_flush_cache(domain, first_pte,
803 (void *)pte - (void *)first_pte); 804 (void *)pte - (void *)first_pte);
804 } 805
806 } while (start_pfn && start_pfn <= last_pfn);
805} 807}
806 808
807/* free page table pages. last level pte should already be cleared */ 809/* free page table pages. last level pte should already be cleared */
@@ -817,6 +819,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
817 819
818 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 820 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
819 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 821 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
822 BUG_ON(start_pfn > last_pfn);
820 823
821 /* We don't need lock here; nobody else touches the iova range */ 824 /* We don't need lock here; nobody else touches the iova range */
822 level = 2; 825 level = 2;
@@ -827,7 +830,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
827 if (tmp + level_size(level) - 1 > last_pfn) 830 if (tmp + level_size(level) - 1 > last_pfn)
828 return; 831 return;
829 832
830 while (tmp + level_size(level) - 1 <= last_pfn) { 833 do {
831 first_pte = pte = dma_pfn_level_pte(domain, tmp, level); 834 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
832 if (!pte) { 835 if (!pte) {
833 tmp = align_to_level(tmp + 1, level + 1); 836 tmp = align_to_level(tmp + 1, level + 1);
@@ -846,7 +849,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
846 domain_flush_cache(domain, first_pte, 849 domain_flush_cache(domain, first_pte,
847 (void *)pte - (void *)first_pte); 850 (void *)pte - (void *)first_pte);
848 851
849 } 852 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
850 level++; 853 level++;
851 } 854 }
852 /* free pgd */ 855 /* free pgd */