aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2015-07-21 09:20:32 -0400
committerJoerg Roedel <jroedel@suse.de>2015-08-12 10:23:34 -0400
commita1ddcbe9301023928f877b675a40914427928f2a (patch)
treeb3de5d4583dbdbbd21674bdeb708209c3084a9b1 /drivers/iommu
parentde24e55395698e29f2a0582ae1899fa0001f829a (diff)
iommu/vt-d: Pass dmar_domain directly into iommu_flush_iotlb_psi
This function can figure out the domain-id to use itself from the iommu_did array. This is more reliable over different domain types and brings us one step further to remove the domain->id field. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel-iommu.c42
1 files changed, 24 insertions, 18 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5d4261ff67ad..380b4e2f76cd 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1471,11 +1471,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1471 spin_unlock_irqrestore(&device_domain_lock, flags); 1471 spin_unlock_irqrestore(&device_domain_lock, flags);
1472} 1472}
1473 1473
1474static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 1474static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1475 unsigned long pfn, unsigned int pages, int ih, int map) 1475 struct dmar_domain *domain,
1476 unsigned long pfn, unsigned int pages,
1477 int ih, int map)
1476{ 1478{
1477 unsigned int mask = ilog2(__roundup_pow_of_two(pages)); 1479 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1478 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; 1480 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1481 u16 did = domain->iommu_did[iommu->seq_id];
1479 1482
1480 BUG_ON(pages == 0); 1483 BUG_ON(pages == 0);
1481 1484
@@ -3422,7 +3425,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3422 3425
3423 /* it's a non-present to present mapping. Only flush if caching mode */ 3426 /* it's a non-present to present mapping. Only flush if caching mode */
3424 if (cap_caching_mode(iommu->cap)) 3427 if (cap_caching_mode(iommu->cap))
3425 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1); 3428 iommu_flush_iotlb_psi(iommu, domain,
3429 mm_to_dma_pfn(iova->pfn_lo),
3430 size, 0, 1);
3426 else 3431 else
3427 iommu_flush_write_buffer(iommu); 3432 iommu_flush_write_buffer(iommu);
3428 3433
@@ -3473,7 +3478,7 @@ static void flush_unmaps(void)
3473 3478
3474 /* On real hardware multiple invalidations are expensive */ 3479 /* On real hardware multiple invalidations are expensive */
3475 if (cap_caching_mode(iommu->cap)) 3480 if (cap_caching_mode(iommu->cap))
3476 iommu_flush_iotlb_psi(iommu, domain->id, 3481 iommu_flush_iotlb_psi(iommu, domain,
3477 iova->pfn_lo, iova_size(iova), 3482 iova->pfn_lo, iova_size(iova),
3478 !deferred_flush[i].freelist[j], 0); 3483 !deferred_flush[i].freelist[j], 0);
3479 else { 3484 else {
@@ -3557,7 +3562,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3557 freelist = domain_unmap(domain, start_pfn, last_pfn); 3562 freelist = domain_unmap(domain, start_pfn, last_pfn);
3558 3563
3559 if (intel_iommu_strict) { 3564 if (intel_iommu_strict) {
3560 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 3565 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3561 last_pfn - start_pfn + 1, !freelist, 0); 3566 last_pfn - start_pfn + 1, !freelist, 0);
3562 /* free iova */ 3567 /* free iova */
3563 __free_iova(&domain->iovad, iova); 3568 __free_iova(&domain->iovad, iova);
@@ -3715,7 +3720,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3715 3720
3716 /* it's a non-present to present mapping. Only flush if caching mode */ 3721 /* it's a non-present to present mapping. Only flush if caching mode */
3717 if (cap_caching_mode(iommu->cap)) 3722 if (cap_caching_mode(iommu->cap))
3718 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1); 3723 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3719 else 3724 else
3720 iommu_flush_write_buffer(iommu); 3725 iommu_flush_write_buffer(iommu);
3721 3726
@@ -4421,7 +4426,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
4421 4426
4422 rcu_read_lock(); 4427 rcu_read_lock();
4423 for_each_active_iommu(iommu, drhd) 4428 for_each_active_iommu(iommu, drhd)
4424 iommu_flush_iotlb_psi(iommu, si_domain->id, 4429 iommu_flush_iotlb_psi(iommu, si_domain,
4425 iova->pfn_lo, iova_size(iova), 4430 iova->pfn_lo, iova_size(iova),
4426 !freelist, 0); 4431 !freelist, 0);
4427 rcu_read_unlock(); 4432 rcu_read_unlock();
@@ -4872,17 +4877,18 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
4872 npages = last_pfn - start_pfn + 1; 4877 npages = last_pfn - start_pfn + 1;
4873 4878
4874 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) { 4879 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4875 iommu = g_iommus[iommu_id]; 4880 iommu = g_iommus[iommu_id];
4876 4881
4877 /* 4882 /*
4878 * find bit position of dmar_domain 4883 * find bit position of dmar_domain
4879 */ 4884 */
4880 ndomains = cap_ndoms(iommu->cap); 4885 ndomains = cap_ndoms(iommu->cap);
4881 for_each_set_bit(num, iommu->domain_ids, ndomains) { 4886 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4882 if (get_iommu_domain(iommu, num) == dmar_domain) 4887 if (get_iommu_domain(iommu, num) == dmar_domain)
4883 iommu_flush_iotlb_psi(iommu, num, start_pfn, 4888 iommu_flush_iotlb_psi(iommu, dmar_domain,
4884 npages, !freelist, 0); 4889 start_pfn, npages,
4885 } 4890 !freelist, 0);
4891 }
4886 4892
4887 } 4893 }
4888 4894