aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorNadav Amit <nadav.amit@gmail.com>2010-04-01 06:24:40 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-04-09 10:39:21 -0400
commit82653633b6161cdecc011d15bc9df1c7489bd9a2 (patch)
tree788049fff6372963e8685fda8badc4d82246464a /drivers/pci/intel-iommu.c
parent8bdd77dd4ef99292f3d705c4c389c12f55641133 (diff)
intel-iommu: Use correct domain ID when caching mode is enabled
In caching-mode mappings of pages (changes from non-present to present) require invalidation. Currently, this IOTLB flush is performed with domain ID of zero. This is not according to the VT-d spec and causes big problems for emulating software. This patch uses the correct domain ID in IOTLB flushes. Device IOTLB invalidation is performed only on present to non-present changes. This decision is now based on explicit parameter instead of zero domain-ID. Signed-off-by: Nadav Amit <nadav.amit@gmail.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 341da41cde8b..1880ee06d701 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1064,7 +1064,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1064} 1064}
1065 1065
1066static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 1066static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1067 unsigned long pfn, unsigned int pages) 1067 unsigned long pfn, unsigned int pages, int map)
1068{ 1068{
1069 unsigned int mask = ilog2(__roundup_pow_of_two(pages)); 1069 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1070 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; 1070 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
@@ -1085,10 +1085,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1085 DMA_TLB_PSI_FLUSH); 1085 DMA_TLB_PSI_FLUSH);
1086 1086
1087 /* 1087 /*
1088 * In caching mode, domain ID 0 is reserved for non-present to present 1088 * In caching mode, changes of pages from non-present to present require
1089 * mapping flush. Device IOTLB doesn't need to be flushed in this case. 1089 * flush. However, device IOTLB doesn't need to be flushed in this case.
1090 */ 1090 */
1091 if (!cap_caching_mode(iommu->cap) || did) 1091 if (!cap_caching_mode(iommu->cap) || !map)
1092 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); 1092 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1093} 1093}
1094 1094
@@ -1544,7 +1544,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1544 (((u16)bus) << 8) | devfn, 1544 (((u16)bus) << 8) | devfn,
1545 DMA_CCMD_MASK_NOBIT, 1545 DMA_CCMD_MASK_NOBIT,
1546 DMA_CCMD_DEVICE_INVL); 1546 DMA_CCMD_DEVICE_INVL);
1547 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); 1547 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1548 } else { 1548 } else {
1549 iommu_flush_write_buffer(iommu); 1549 iommu_flush_write_buffer(iommu);
1550 } 1550 }
@@ -2607,7 +2607,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2607 2607
2608 /* it's a non-present to present mapping. Only flush if caching mode */ 2608 /* it's a non-present to present mapping. Only flush if caching mode */
2609 if (cap_caching_mode(iommu->cap)) 2609 if (cap_caching_mode(iommu->cap))
2610 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size); 2610 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2611 else 2611 else
2612 iommu_flush_write_buffer(iommu); 2612 iommu_flush_write_buffer(iommu);
2613 2613
@@ -2736,7 +2736,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2736 2736
2737 if (intel_iommu_strict) { 2737 if (intel_iommu_strict) {
2738 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2738 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2739 last_pfn - start_pfn + 1); 2739 last_pfn - start_pfn + 1, 0);
2740 /* free iova */ 2740 /* free iova */
2741 __free_iova(&domain->iovad, iova); 2741 __free_iova(&domain->iovad, iova);
2742 } else { 2742 } else {
@@ -2826,7 +2826,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2826 2826
2827 if (intel_iommu_strict) { 2827 if (intel_iommu_strict) {
2828 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2828 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2829 last_pfn - start_pfn + 1); 2829 last_pfn - start_pfn + 1, 0);
2830 /* free iova */ 2830 /* free iova */
2831 __free_iova(&domain->iovad, iova); 2831 __free_iova(&domain->iovad, iova);
2832 } else { 2832 } else {
@@ -2913,7 +2913,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2913 2913
2914 /* it's a non-present to present mapping. Only flush if caching mode */ 2914 /* it's a non-present to present mapping. Only flush if caching mode */
2915 if (cap_caching_mode(iommu->cap)) 2915 if (cap_caching_mode(iommu->cap))
2916 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, size); 2916 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
2917 else 2917 else
2918 iommu_flush_write_buffer(iommu); 2918 iommu_flush_write_buffer(iommu);
2919 2919