aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorNadav Amit <nadav.amit@gmail.com>2010-04-08 16:00:41 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-04-09 10:54:41 -0400
commit78d5f0f500e6ba8f6cfd0673475ff4d941d705a2 (patch)
treed992607aba2b178a251d52d4e2ac6f9c62709c10 /drivers/pci/intel-iommu.c
parent82653633b6161cdecc011d15bc9df1c7489bd9a2 (diff)
intel-iommu: Avoid global flushes with caching mode.
While it may be efficient on real hardware, emulation of global invalidations is very expensive as all shadow entries must be examined. This patch changes the behaviour when caching mode is enabled (which is the case when IOMMU emulation takes place). In this case, page specific invalidation is used instead. Signed-off-by: Nadav Amit <nadav.amit@gmail.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 1880ee06d701..9ce79b1bae83 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2647,15 +2647,24 @@ static void flush_unmaps(void)
2647 if (!deferred_flush[i].next) 2647 if (!deferred_flush[i].next)
2648 continue; 2648 continue;
2649 2649
2650 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2650 /* In caching mode, global flushes turn emulation expensive */
2651 if (!cap_caching_mode(iommu->cap))
2652 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2651 DMA_TLB_GLOBAL_FLUSH); 2653 DMA_TLB_GLOBAL_FLUSH);
2652 for (j = 0; j < deferred_flush[i].next; j++) { 2654 for (j = 0; j < deferred_flush[i].next; j++) {
2653 unsigned long mask; 2655 unsigned long mask;
2654 struct iova *iova = deferred_flush[i].iova[j]; 2656 struct iova *iova = deferred_flush[i].iova[j];
2655 2657 struct dmar_domain *domain = deferred_flush[i].domain[j];
2656 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); 2658
2657 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], 2659 /* On real hardware multiple invalidations are expensive */
2658 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); 2660 if (cap_caching_mode(iommu->cap))
2661 iommu_flush_iotlb_psi(iommu, domain->id,
2662 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2663 else {
2664 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2665 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2666 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2667 }
2659 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); 2668 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2660 } 2669 }
2661 deferred_flush[i].next = 0; 2670 deferred_flush[i].next = 0;