aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-28 05:37:25 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 08:32:26 -0400
commitb026fd28ea23af24a3eea6e5be3f3d0193a8e87d (patch)
tree557ccb29c1213f39951651fef68d7e7910d9893b
parent163cc52ccd2cc5c5ae4e1c886f6fde8547feed2a (diff)
intel-iommu: Change addr_to_dma_pte() to pfn_to_dma_pte()
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
-rw-r--r--drivers/pci/intel-iommu.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 21dc77311863..dfbabd151a9c 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -692,23 +692,24 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
692 return (pfn + level_size(level) - 1) & level_mask(level); 692 return (pfn + level_size(level) - 1) & level_mask(level);
693} 693}
694 694
695static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) 695static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
696 unsigned long pfn)
696{ 697{
697 int addr_width = agaw_to_width(domain->agaw); 698 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
698 struct dma_pte *parent, *pte = NULL; 699 struct dma_pte *parent, *pte = NULL;
699 int level = agaw_to_level(domain->agaw); 700 int level = agaw_to_level(domain->agaw);
700 int offset; 701 int offset;
701 unsigned long flags; 702 unsigned long flags;
702 703
703 BUG_ON(!domain->pgd); 704 BUG_ON(!domain->pgd);
704 BUG_ON(addr >> addr_width); 705 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
705 parent = domain->pgd; 706 parent = domain->pgd;
706 707
707 spin_lock_irqsave(&domain->mapping_lock, flags); 708 spin_lock_irqsave(&domain->mapping_lock, flags);
708 while (level > 0) { 709 while (level > 0) {
709 void *tmp_page; 710 void *tmp_page;
710 711
711 offset = pfn_level_offset(addr >> VTD_PAGE_SHIFT, level); 712 offset = pfn_level_offset(pfn, level);
712 pte = &parent[offset]; 713 pte = &parent[offset];
713 if (level == 1) 714 if (level == 1)
714 break; 715 break;
@@ -1660,7 +1661,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1660 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; 1661 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1661 index = 0; 1662 index = 0;
1662 while (start_pfn < end_pfn) { 1663 while (start_pfn < end_pfn) {
1663 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); 1664 pte = pfn_to_dma_pte(domain, (iova >> VTD_PAGE_SHIFT) + index);
1664 if (!pte) 1665 if (!pte)
1665 return -ENOMEM; 1666 return -ENOMEM;
1666 /* We don't need lock here, nobody else 1667 /* We don't need lock here, nobody else
@@ -3533,7 +3534,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3533 struct dma_pte *pte; 3534 struct dma_pte *pte;
3534 u64 phys = 0; 3535 u64 phys = 0;
3535 3536
3536 pte = addr_to_dma_pte(dmar_domain, iova); 3537 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3537 if (pte) 3538 if (pte)
3538 phys = dma_pte_addr(pte); 3539 phys = dma_pte_addr(pte);
3539 3540