aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 06:17:38 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 22:51:30 -0400
commite1605495c716ef4eebdb7606bcd1b593f28e2837 (patch)
treede1144b9697f94dda08b3a9434fa97e0dbaab93f /drivers/pci/intel-iommu.c
parent875764de6f0ddb23d270c29357d5a339232a0488 (diff)
intel-iommu: Introduce domain_sg_mapping() to speed up intel_map_sg()
Instead of calling domain_pfn_mapping() repeatedly with single or small numbers of pages, just pass the sglist in. It can optimise the number of cache flushes like domain_pfn_mapping() does, and gives a huge speedup for large scatterlists. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c83
1 files changed, 62 insertions, 21 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 11a23201445a..28bd5f2d78fc 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1635,6 +1635,56 @@ static int domain_context_mapped(struct pci_dev *pdev)
1635 tmp->devfn); 1635 tmp->devfn);
1636} 1636}
1637 1637
1638static int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1639 struct scatterlist *sg, unsigned long nr_pages,
1640 int prot)
1641{
1642 struct dma_pte *first_pte = NULL, *pte = NULL;
1643 uint64_t pteval;
1644 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1645 unsigned long sg_res = 0;
1646
1647 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1648
1649 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1650 return -EINVAL;
1651
1652 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1653
1654 while (nr_pages--) {
1655 if (!sg_res) {
1656 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
1657 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1658 sg->dma_length = sg->length;
1659 pteval = page_to_phys(sg_page(sg)) | prot;
1660 }
1661 if (!pte) {
1662 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1663 if (!pte)
1664 return -ENOMEM;
1665 }
1666 /* We don't need lock here, nobody else
1667 * touches the iova range
1668 */
1669 BUG_ON(dma_pte_addr(pte));
1670 pte->val = pteval;
1671 pte++;
1672 if (!nr_pages ||
1673 (unsigned long)pte >> VTD_PAGE_SHIFT !=
1674 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
1675 domain_flush_cache(domain, first_pte,
1676 (void *)pte - (void *)first_pte);
1677 pte = NULL;
1678 }
1679 iov_pfn++;
1680 pteval += VTD_PAGE_SIZE;
1681 sg_res--;
1682 if (!sg_res)
1683 sg = sg_next(sg);
1684 }
1685 return 0;
1686}
1687
1638static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1688static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1639 unsigned long phys_pfn, unsigned long nr_pages, 1689 unsigned long phys_pfn, unsigned long nr_pages,
1640 int prot) 1690 int prot)
@@ -2758,27 +2808,18 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2758 prot |= DMA_PTE_WRITE; 2808 prot |= DMA_PTE_WRITE;
2759 2809
2760 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 2810 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2761 offset_pfn = 0; 2811
2762 for_each_sg(sglist, sg, nelems, i) { 2812 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
2763 int nr_pages = aligned_nrpages(sg->offset, sg->length); 2813 if (unlikely(ret)) {
2764 ret = domain_pfn_mapping(domain, start_vpfn + offset_pfn, 2814 /* clear the page */
2765 page_to_dma_pfn(sg_page(sg)), 2815 dma_pte_clear_range(domain, start_vpfn,
2766 nr_pages, prot); 2816 start_vpfn + size - 1);
2767 if (ret) { 2817 /* free page tables */
2768 /* clear the page */ 2818 dma_pte_free_pagetable(domain, start_vpfn,
2769 dma_pte_clear_range(domain, start_vpfn, 2819 start_vpfn + size - 1);
2770 start_vpfn + offset_pfn); 2820 /* free iova */
2771 /* free page tables */ 2821 __free_iova(&domain->iovad, iova);
2772 dma_pte_free_pagetable(domain, start_vpfn, 2822 return 0;
2773 start_vpfn + offset_pfn);
2774 /* free iova */
2775 __free_iova(&domain->iovad, iova);
2776 return 0;
2777 }
2778 sg->dma_address = ((dma_addr_t)(start_vpfn + offset_pfn)
2779 << VTD_PAGE_SHIFT) + sg->offset;
2780 sg->dma_length = sg->length;
2781 offset_pfn += nr_pages;
2782 } 2823 }
2783 2824
2784 /* it's a non-present to present mapping. Only flush if caching mode */ 2825 /* it's a non-present to present mapping. Only flush if caching mode */