aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-07-02 06:21:16 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-07-02 06:27:13 -0400
commit75e6bf9638992dfc0fec9c3ca10444c8e0d6a638 (patch)
treebdbabd91d77edb3d623292ea97b17d88d053816e /drivers/pci
parent7766a3fb905f0b078b05f5d6a6be8df4c64b9f51 (diff)
intel-iommu: Introduce first_pte_in_page() to simplify PTE-setting loops
On Wed, 2009-07-01 at 16:59 -0700, Linus Torvalds wrote: > I also _really_ hate how you do > > (unsigned long)pte >> VTD_PAGE_SHIFT == > (unsigned long)first_pte >> VTD_PAGE_SHIFT Kill this, in favour of just looking to see if the incremented pte pointer has 'wrapped' onto the next page. Which means we have to check it _after_ incrementing it, not before. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 2c1b2babfdc5..dcf0295a9b60 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -240,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
240 return (pte->val & 3) != 0; 240 return (pte->val & 3) != 0;
241} 241}
242 242
243static inline int first_pte_in_page(struct dma_pte *pte)
244{
245 return !((unsigned long)pte & ~VTD_PAGE_MASK);
246}
247
243/* 248/*
244 * This domain is a statically identity mapping domain. 249 * This domain is a statically identity mapping domain.
245 * 1. This domain creats a static 1:1 mapping to all usable memory. 250 * 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -780,13 +785,12 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
780 start_pfn = align_to_level(start_pfn + 1, 2); 785 start_pfn = align_to_level(start_pfn + 1, 2);
781 continue; 786 continue;
782 } 787 }
783 while (start_pfn <= last_pfn && 788 do {
784 (unsigned long)pte >> VTD_PAGE_SHIFT ==
785 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
786 dma_clear_pte(pte); 789 dma_clear_pte(pte);
787 start_pfn++; 790 start_pfn++;
788 pte++; 791 pte++;
789 } 792 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
793
790 domain_flush_cache(domain, first_pte, 794 domain_flush_cache(domain, first_pte,
791 (void *)pte - (void *)first_pte); 795 (void *)pte - (void *)first_pte);
792 } 796 }
@@ -821,14 +825,14 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
821 tmp = align_to_level(tmp + 1, level + 1); 825 tmp = align_to_level(tmp + 1, level + 1);
822 continue; 826 continue;
823 } 827 }
824 while (tmp + level_size(level) - 1 <= last_pfn && 828 do {
825 (unsigned long)pte >> VTD_PAGE_SHIFT ==
826 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
827 free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); 829 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
828 dma_clear_pte(pte); 830 dma_clear_pte(pte);
829 pte++; 831 pte++;
830 tmp += level_size(level); 832 tmp += level_size(level);
831 } 833 } while (!first_pte_in_page(pte) &&
834 tmp + level_size(level) - 1 <= last_pfn);
835
832 domain_flush_cache(domain, first_pte, 836 domain_flush_cache(domain, first_pte,
833 (void *)pte - (void *)first_pte); 837 (void *)pte - (void *)first_pte);
834 838
@@ -1694,9 +1698,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1694 WARN_ON(1); 1698 WARN_ON(1);
1695 } 1699 }
1696 pte++; 1700 pte++;
1697 if (!nr_pages || 1701 if (!nr_pages || first_pte_in_page(pte)) {
1698 (unsigned long)pte >> VTD_PAGE_SHIFT !=
1699 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
1700 domain_flush_cache(domain, first_pte, 1702 domain_flush_cache(domain, first_pte,
1701 (void *)pte - (void *)first_pte); 1703 (void *)pte - (void *)first_pte);
1702 pte = NULL; 1704 pte = NULL;