aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/intel-iommu.c38
1 files changed, 24 insertions, 14 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f08d7865fe00..7540ef91d5f7 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1643,40 +1643,48 @@ static int domain_context_mapped(struct pci_dev *pdev)
1643 tmp->devfn); 1643 tmp->devfn);
1644} 1644}
1645 1645
1646static int 1646static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1647domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, 1647 unsigned long phys_pfn, unsigned long nr_pages,
1648 u64 hpa, size_t size, int prot) 1648 int prot)
1649{ 1649{
1650 unsigned long start_pfn = hpa >> VTD_PAGE_SHIFT;
1651 unsigned long last_pfn = (hpa + size - 1) >> VTD_PAGE_SHIFT;
1652 struct dma_pte *pte; 1650 struct dma_pte *pte;
1653 int index = 0;
1654 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 1651 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1655 1652
1656 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 1653 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1657 1654
1658 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1655 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1659 return -EINVAL; 1656 return -EINVAL;
1660 1657
1661 while (start_pfn <= last_pfn) { 1658 while (nr_pages--) {
1662 pte = pfn_to_dma_pte(domain, (iova >> VTD_PAGE_SHIFT) + index); 1659 pte = pfn_to_dma_pte(domain, iov_pfn);
1663 if (!pte) 1660 if (!pte)
1664 return -ENOMEM; 1661 return -ENOMEM;
1665 /* We don't need lock here, nobody else 1662 /* We don't need lock here, nobody else
1666 * touches the iova range 1663 * touches the iova range
1667 */ 1664 */
1668 BUG_ON(dma_pte_addr(pte)); 1665 BUG_ON(dma_pte_addr(pte));
1669 dma_set_pte_pfn(pte, start_pfn); 1666 dma_set_pte_pfn(pte, phys_pfn);
1670 dma_set_pte_prot(pte, prot); 1667 dma_set_pte_prot(pte, prot);
1671 if (prot & DMA_PTE_SNP) 1668 if (prot & DMA_PTE_SNP)
1672 dma_set_pte_snp(pte); 1669 dma_set_pte_snp(pte);
1673 domain_flush_cache(domain, pte, sizeof(*pte)); 1670 domain_flush_cache(domain, pte, sizeof(*pte));
1674 start_pfn++; 1671 iov_pfn++;
1675 index++; 1672 phys_pfn++;
1676 } 1673 }
1677 return 0; 1674 return 0;
1678} 1675}
1679 1676
1677static int domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1678 u64 hpa, size_t size, int prot)
1679{
1680 unsigned long first_pfn = hpa >> VTD_PAGE_SHIFT;
1681 unsigned long last_pfn = (hpa + size - 1) >> VTD_PAGE_SHIFT;
1682
1683 return domain_pfn_mapping(domain, iova >> VTD_PAGE_SHIFT, first_pfn,
1684 last_pfn - first_pfn + 1, prot);
1685
1686}
1687
1680static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) 1688static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1681{ 1689{
1682 if (!iommu) 1690 if (!iommu)
@@ -1893,8 +1901,10 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
1893 dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT, 1901 dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT,
1894 (base + size - 1) >> VTD_PAGE_SHIFT); 1902 (base + size - 1) >> VTD_PAGE_SHIFT);
1895 1903
1896 return domain_page_mapping(domain, base, base, size, 1904 return domain_pfn_mapping(domain, base >> VTD_PAGE_SHIFT,
1897 DMA_PTE_READ|DMA_PTE_WRITE); 1905 base >> VTD_PAGE_SHIFT,
1906 size >> VTD_PAGE_SHIFT,
1907 DMA_PTE_READ|DMA_PTE_WRITE);
1898} 1908}
1899 1909
1900static int iommu_prepare_identity_map(struct pci_dev *pdev, 1910static int iommu_prepare_identity_map(struct pci_dev *pdev,