diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-06-27 17:41:00 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-06-29 08:30:35 -0400 |
commit | 6660c63a79a639b86e3a709e25a8c4fc3ab24770 (patch) | |
tree | 8e9b0f30973bb33f588f9d80dee91bc798ff4127 | |
parent | 595badf5d65d50300319e6178e6df005ea501f70 (diff) |
intel-iommu: Make dma_pte_free_pagetable() use pfns
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
-rw-r--r-- | drivers/pci/intel-iommu.c | 40 |
1 files changed, 22 insertions, 18 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index ff8b7ce4a013..1526864a9d6f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -669,27 +669,27 @@ static inline int width_to_agaw(int width) | |||
669 | 669 | ||
670 | static inline unsigned int level_to_offset_bits(int level) | 670 | static inline unsigned int level_to_offset_bits(int level) |
671 | { | 671 | { |
672 | return (12 + (level - 1) * LEVEL_STRIDE); | 672 | return (level - 1) * LEVEL_STRIDE; |
673 | } | 673 | } |
674 | 674 | ||
675 | static inline int pfn_level_offset(unsigned long pfn, int level) | 675 | static inline int pfn_level_offset(unsigned long pfn, int level) |
676 | { | 676 | { |
677 | return (pfn >> (level_to_offset_bits(level) - 12)) & LEVEL_MASK; | 677 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; |
678 | } | 678 | } |
679 | 679 | ||
680 | static inline u64 level_mask(int level) | 680 | static inline unsigned long level_mask(int level) |
681 | { | 681 | { |
682 | return ((u64)-1 << level_to_offset_bits(level)); | 682 | return -1UL << level_to_offset_bits(level); |
683 | } | 683 | } |
684 | 684 | ||
685 | static inline u64 level_size(int level) | 685 | static inline unsigned long level_size(int level) |
686 | { | 686 | { |
687 | return ((u64)1 << level_to_offset_bits(level)); | 687 | return 1UL << level_to_offset_bits(level); |
688 | } | 688 | } |
689 | 689 | ||
690 | static inline u64 align_to_level(u64 addr, int level) | 690 | static inline unsigned long align_to_level(unsigned long pfn, int level) |
691 | { | 691 | { |
692 | return ((addr + level_size(level) - 1) & level_mask(level)); | 692 | return (pfn + level_size(level) - 1) & level_mask(level); |
693 | } | 693 | } |
694 | 694 | ||
695 | static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | 695 | static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) |
@@ -798,25 +798,29 @@ static void dma_pte_clear_range(struct dmar_domain *domain, | |||
798 | static void dma_pte_free_pagetable(struct dmar_domain *domain, | 798 | static void dma_pte_free_pagetable(struct dmar_domain *domain, |
799 | u64 start, u64 end) | 799 | u64 start, u64 end) |
800 | { | 800 | { |
801 | int addr_width = agaw_to_width(domain->agaw); | 801 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
802 | unsigned long start_pfn = start >> VTD_PAGE_SHIFT; | ||
803 | unsigned long last_pfn = (end-1) >> VTD_PAGE_SHIFT; | ||
802 | struct dma_pte *pte; | 804 | struct dma_pte *pte; |
803 | int total = agaw_to_level(domain->agaw); | 805 | int total = agaw_to_level(domain->agaw); |
804 | int level; | 806 | int level; |
805 | u64 tmp; | 807 | unsigned long tmp; |
806 | 808 | ||
807 | BUG_ON(start >> addr_width); | 809 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
808 | BUG_ON(end >> addr_width); | 810 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
809 | 811 | ||
810 | /* we don't need lock here, nobody else touches the iova range */ | 812 | /* we don't need lock here, nobody else touches the iova range */ |
811 | level = 2; | 813 | level = 2; |
812 | while (level <= total) { | 814 | while (level <= total) { |
813 | tmp = align_to_level(start, level); | 815 | tmp = align_to_level(start_pfn, level); |
814 | if (tmp >= end || (tmp + level_size(level) > end)) | 816 | |
817 | /* Only clear this pte/pmd if we're asked to clear its | ||
818 | _whole_ range */ | ||
819 | if (tmp + level_size(level) - 1 > last_pfn) | ||
815 | return; | 820 | return; |
816 | 821 | ||
817 | while (tmp < end) { | 822 | while (tmp <= last_pfn) { |
818 | pte = dma_pfn_level_pte(domain, tmp >> VTD_PAGE_SHIFT, | 823 | pte = dma_pfn_level_pte(domain, tmp, level); |
819 | level); | ||
820 | if (pte) { | 824 | if (pte) { |
821 | free_pgtable_page( | 825 | free_pgtable_page( |
822 | phys_to_virt(dma_pte_addr(pte))); | 826 | phys_to_virt(dma_pte_addr(pte))); |
@@ -828,7 +832,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
828 | level++; | 832 | level++; |
829 | } | 833 | } |
830 | /* free pgd */ | 834 | /* free pgd */ |
831 | if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) { | 835 | if (start == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { |
832 | free_pgtable_page(domain->pgd); | 836 | free_pgtable_page(domain->pgd); |
833 | domain->pgd = NULL; | 837 | domain->pgd = NULL; |
834 | } | 838 | } |