aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-28 15:38:49 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 08:39:45 -0400
commit6f6a00e40aa3fdd3b29c30e3ef1fc9690506bc03 (patch)
treeba1f28669888141c99c38cfb8d6b2b6034874e25 /drivers/pci
parent310a5ab93cb4ce29367238f682affd9ac352f4d0 (diff)
intel-iommu: Performance improvement for domain_pfn_mapping()
As with dma_pte_clear_range(), don't keep flushing a single PTE at a time. And also micro-optimise the setting of PTE values rather than using the helper functions to do all the masking. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index ba7e37f7111a..f8074236bcce 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1639,7 +1639,7 @@ static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1639 unsigned long phys_pfn, unsigned long nr_pages, 1639 unsigned long phys_pfn, unsigned long nr_pages,
1640 int prot) 1640 int prot)
1641{ 1641{
1642 struct dma_pte *pte; 1642 struct dma_pte *first_pte = NULL, *pte = NULL;
1643 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 1643 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1644 1644
1645 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); 1645 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
@@ -1647,19 +1647,27 @@ static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1647 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1647 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1648 return -EINVAL; 1648 return -EINVAL;
1649 1649
1650 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1651
1650 while (nr_pages--) { 1652 while (nr_pages--) {
1651 pte = pfn_to_dma_pte(domain, iov_pfn); 1653 if (!pte) {
1652 if (!pte) 1654 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1653 return -ENOMEM; 1655 if (!pte)
1656 return -ENOMEM;
1657 }
1654 /* We don't need lock here, nobody else 1658 /* We don't need lock here, nobody else
1655 * touches the iova range 1659 * touches the iova range
1656 */ 1660 */
1657 BUG_ON(dma_pte_addr(pte)); 1661 BUG_ON(dma_pte_addr(pte));
1658 dma_set_pte_pfn(pte, phys_pfn); 1662 pte->val = (phys_pfn << VTD_PAGE_SHIFT) | prot;
1659 dma_set_pte_prot(pte, prot); 1663 pte++;
1660 if (prot & DMA_PTE_SNP) 1664 if (!nr_pages ||
1661 dma_set_pte_snp(pte); 1665 (unsigned long)pte >> VTD_PAGE_SHIFT !=
1662 domain_flush_cache(domain, pte, sizeof(*pte)); 1666 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
1667 domain_flush_cache(domain, first_pte,
1668 (void *)pte - (void *)first_pte);
1669 pte = NULL;
1670 }
1663 iov_pfn++; 1671 iov_pfn++;
1664 phys_pfn++; 1672 phys_pfn++;
1665 } 1673 }