aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2018-05-03 22:34:53 -0400
committerJoerg Roedel <jroedel@suse.de>2018-05-15 10:31:41 -0400
commit87684fd997a6f12486bb0a638c7dba3130afb376 (patch)
tree7901692aa08cba54a1c44d4c7328fb39785c7565
parenteed91a0b85e55bcda8849d191604537b9933594f (diff)
iommu/vt-d: Fix iotlb psi missing for mappings
When caching mode is enabled for IOMMU, we should send explicit IOTLB PSIs even for newly created mappings. However these events are missing for all intel_iommu_map() callers, e.g., iommu_map(). One direct user is the vfio-pci driver. To make sure we'll send the PSIs always when necessary, this patch firstly introduced domain_mapping() helper for page mappings, then fixed the problem by generalizing the explicit map IOTLB PSI logic into that new helper. With that, we let iommu_domain_identity_map() to use the simplified version to avoid sending the notifications, while for all the rest of cases we send the notifications always. For VM case, we send the PSIs to all the backend IOMMUs for the domain. This patch allows the nested device assignment to work with QEMU (assign device firstly to L1 guest, then assign it again to L2 guest). Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/intel-iommu.c43
1 files changed, 34 insertions, 9 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 13190a54aba2..601d3789211f 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2352,18 +2352,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2352 return 0; 2352 return 0;
2353} 2353}
2354 2354
2355static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2356 struct scatterlist *sg, unsigned long phys_pfn,
2357 unsigned long nr_pages, int prot)
2358{
2359 int ret;
2360 struct intel_iommu *iommu;
2361
2362 /* Do the real mapping first */
2363 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2364 if (ret)
2365 return ret;
2366
2367 /* Notify about the new mapping */
2368 if (domain_type_is_vm(domain)) {
2369 /* VM typed domains can have more than one IOMMUs */
2370 int iommu_id;
2371 for_each_domain_iommu(iommu_id, domain) {
2372 iommu = g_iommus[iommu_id];
2373 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2374 }
2375 } else {
2376 /* General domains only have one IOMMU */
2377 iommu = domain_get_iommu(domain);
2378 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2379 }
2380
2381 return 0;
2382}
2383
2355static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 2384static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2356 struct scatterlist *sg, unsigned long nr_pages, 2385 struct scatterlist *sg, unsigned long nr_pages,
2357 int prot) 2386 int prot)
2358{ 2387{
2359 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); 2388 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2360} 2389}
2361 2390
2362static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 2391static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2363 unsigned long phys_pfn, unsigned long nr_pages, 2392 unsigned long phys_pfn, unsigned long nr_pages,
2364 int prot) 2393 int prot)
2365{ 2394{
2366 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); 2395 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2367} 2396}
2368 2397
2369static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) 2398static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
@@ -2668,9 +2697,9 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
2668 */ 2697 */
2669 dma_pte_clear_range(domain, first_vpfn, last_vpfn); 2698 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2670 2699
2671 return domain_pfn_mapping(domain, first_vpfn, first_vpfn, 2700 return __domain_mapping(domain, first_vpfn, NULL,
2672 last_vpfn - first_vpfn + 1, 2701 first_vpfn, last_vpfn - first_vpfn + 1,
2673 DMA_PTE_READ|DMA_PTE_WRITE); 2702 DMA_PTE_READ|DMA_PTE_WRITE);
2674} 2703}
2675 2704
2676static int domain_prepare_identity_map(struct device *dev, 2705static int domain_prepare_identity_map(struct device *dev,
@@ -3637,8 +3666,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3637 if (ret) 3666 if (ret)
3638 goto error; 3667 goto error;
3639 3668
3640 __mapping_notify_one(iommu, domain, mm_to_dma_pfn(iova_pfn), size);
3641
3642 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; 3669 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3643 start_paddr += paddr & ~PAGE_MASK; 3670 start_paddr += paddr & ~PAGE_MASK;
3644 return start_paddr; 3671 return start_paddr;
@@ -3825,8 +3852,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3825 return 0; 3852 return 0;
3826 } 3853 }
3827 3854
3828 __mapping_notify_one(iommu, domain, start_vpfn, size);
3829
3830 return nelems; 3855 return nelems;
3831} 3856}
3832 3857