aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-05-10 12:16:06 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-05-10 14:49:52 -0400
commit4c25a2c1b90bf785fc2e2f0f0c74a80b3e070d39 (patch)
tree2784fbbf4d6782db300b92870d2bf6111ef26627 /drivers
parentfa3b6dcd5298db2e7b63c17795c9e5570d3df8d9 (diff)
intel-iommu: Clean up handling of "caching mode" vs. context flushing.
It really doesn't make a lot of sense to have some of the logic to handle caching vs. non-caching mode duplicated in qi_flush_context() and __iommu_flush_context(), while the return value indicates whether the caller should take other action which depends on the same thing. Especially since qi_flush_context() thought it was returning something entirely different anyway. This patch makes qi_flush_context() and __iommu_flush_context() both return void, removes the 'non_present_entry_flush' argument and makes the only call site which _set_ that argument to 1 do the right thing. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/dmar.c13
-rw-r--r--drivers/pci/intel-iommu.c52
2 files changed, 24 insertions, 41 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index d3d86b749eee..10a071ba3232 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -723,23 +723,16 @@ void qi_global_iec(struct intel_iommu *iommu)
723 qi_submit_sync(&desc, iommu); 723 qi_submit_sync(&desc, iommu);
724} 724}
725 725
726int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 726void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
727 u64 type, int non_present_entry_flush) 727 u64 type)
728{ 728{
729 struct qi_desc desc; 729 struct qi_desc desc;
730 730
731 if (non_present_entry_flush) {
732 if (!cap_caching_mode(iommu->cap))
733 return 1;
734 else
735 did = 0;
736 }
737
738 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) 731 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
739 | QI_CC_GRAN(type) | QI_CC_TYPE; 732 | QI_CC_GRAN(type) | QI_CC_TYPE;
740 desc.high = 0; 733 desc.high = 0;
741 734
742 return qi_submit_sync(&desc, iommu); 735 qi_submit_sync(&desc, iommu);
743} 736}
744 737
745int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 738int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index d6f4ee50924c..9f5d9151edc9 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -857,26 +857,13 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
857} 857}
858 858
859/* return value determine if we need a write buffer flush */ 859/* return value determine if we need a write buffer flush */
860static int __iommu_flush_context(struct intel_iommu *iommu, 860static void __iommu_flush_context(struct intel_iommu *iommu,
861 u16 did, u16 source_id, u8 function_mask, u64 type, 861 u16 did, u16 source_id, u8 function_mask,
862 int non_present_entry_flush) 862 u64 type)
863{ 863{
864 u64 val = 0; 864 u64 val = 0;
865 unsigned long flag; 865 unsigned long flag;
866 866
867 /*
868 * In the non-present entry flush case, if hardware doesn't cache
869 * non-present entry we do nothing and if hardware cache non-present
870 * entry, we flush entries of domain 0 (the domain id is used to cache
871 * any non-present entries)
872 */
873 if (non_present_entry_flush) {
874 if (!cap_caching_mode(iommu->cap))
875 return 1;
876 else
877 did = 0;
878 }
879
880 switch (type) { 867 switch (type) {
881 case DMA_CCMD_GLOBAL_INVL: 868 case DMA_CCMD_GLOBAL_INVL:
882 val = DMA_CCMD_GLOBAL_INVL; 869 val = DMA_CCMD_GLOBAL_INVL;
@@ -901,9 +888,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
901 dmar_readq, (!(val & DMA_CCMD_ICC)), val); 888 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
902 889
903 spin_unlock_irqrestore(&iommu->register_lock, flag); 890 spin_unlock_irqrestore(&iommu->register_lock, flag);
904
905 /* flush context entry will implicitly flush write buffer */
906 return 0;
907} 891}
908 892
909/* return value determine if we need a write buffer flush */ 893/* return value determine if we need a write buffer flush */
@@ -1428,14 +1412,21 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1428 context_set_present(context); 1412 context_set_present(context);
1429 domain_flush_cache(domain, context, sizeof(*context)); 1413 domain_flush_cache(domain, context, sizeof(*context));
1430 1414
1431 /* it's a non-present to present mapping */ 1415 /*
1432 if (iommu->flush.flush_context(iommu, id, 1416 * It's a non-present to present mapping. If hardware doesn't cache
1433 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1417 * non-present entry we only need to flush the write-buffer. If the
1434 DMA_CCMD_DEVICE_INVL, 1)) 1418 * _does_ cache non-present entries, then it does so in the special
1435 iommu_flush_write_buffer(iommu); 1419 * domain #0, which we have to flush:
1436 else 1420 */
1421 if (cap_caching_mode(iommu->cap)) {
1422 iommu->flush.flush_context(iommu, 0,
1423 (((u16)bus) << 8) | devfn,
1424 DMA_CCMD_MASK_NOBIT,
1425 DMA_CCMD_DEVICE_INVL);
1437 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); 1426 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1438 1427 } else {
1428 iommu_flush_write_buffer(iommu);
1429 }
1439 spin_unlock_irqrestore(&iommu->lock, flags); 1430 spin_unlock_irqrestore(&iommu->lock, flags);
1440 1431
1441 spin_lock_irqsave(&domain->iommu_lock, flags); 1432 spin_lock_irqsave(&domain->iommu_lock, flags);
@@ -1566,7 +1557,7 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1566 1557
1567 clear_context_table(iommu, bus, devfn); 1558 clear_context_table(iommu, bus, devfn);
1568 iommu->flush.flush_context(iommu, 0, 0, 0, 1559 iommu->flush.flush_context(iommu, 0, 0, 0,
1569 DMA_CCMD_GLOBAL_INVL, 0); 1560 DMA_CCMD_GLOBAL_INVL);
1570 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 1561 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1571 DMA_TLB_GLOBAL_FLUSH, 0); 1562 DMA_TLB_GLOBAL_FLUSH, 0);
1572} 1563}
@@ -2104,8 +2095,7 @@ static int __init init_dmars(void)
2104 2095
2105 iommu_set_root_entry(iommu); 2096 iommu_set_root_entry(iommu);
2106 2097
2107 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, 2098 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2108 0);
2109 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, 2099 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
2110 0); 2100 0);
2111 iommu_disable_protect_mem_regions(iommu); 2101 iommu_disable_protect_mem_regions(iommu);
@@ -2721,7 +2711,7 @@ static int init_iommu_hw(void)
2721 iommu_set_root_entry(iommu); 2711 iommu_set_root_entry(iommu);
2722 2712
2723 iommu->flush.flush_context(iommu, 0, 0, 0, 2713 iommu->flush.flush_context(iommu, 0, 0, 0,
2724 DMA_CCMD_GLOBAL_INVL, 0); 2714 DMA_CCMD_GLOBAL_INVL);
2725 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2715 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2726 DMA_TLB_GLOBAL_FLUSH, 0); 2716 DMA_TLB_GLOBAL_FLUSH, 0);
2727 iommu_disable_protect_mem_regions(iommu); 2717 iommu_disable_protect_mem_regions(iommu);
@@ -2738,7 +2728,7 @@ static void iommu_flush_all(void)
2738 2728
2739 for_each_active_iommu(iommu, drhd) { 2729 for_each_active_iommu(iommu, drhd) {
2740 iommu->flush.flush_context(iommu, 0, 0, 0, 2730 iommu->flush.flush_context(iommu, 0, 0, 0,
2741 DMA_CCMD_GLOBAL_INVL, 0); 2731 DMA_CCMD_GLOBAL_INVL);
2742 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2732 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2743 DMA_TLB_GLOBAL_FLUSH, 0); 2733 DMA_TLB_GLOBAL_FLUSH, 0);
2744 } 2734 }