diff options
-rw-r--r-- | drivers/pci/dmar.c | 13 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 52 | ||||
-rw-r--r-- | include/linux/intel-iommu.h | 8 |
3 files changed, 28 insertions, 45 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index d3d86b749eee..10a071ba3232 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -723,23 +723,16 @@ void qi_global_iec(struct intel_iommu *iommu) | |||
723 | qi_submit_sync(&desc, iommu); | 723 | qi_submit_sync(&desc, iommu); |
724 | } | 724 | } |
725 | 725 | ||
726 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | 726 | void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
727 | u64 type, int non_present_entry_flush) | 727 | u64 type) |
728 | { | 728 | { |
729 | struct qi_desc desc; | 729 | struct qi_desc desc; |
730 | 730 | ||
731 | if (non_present_entry_flush) { | ||
732 | if (!cap_caching_mode(iommu->cap)) | ||
733 | return 1; | ||
734 | else | ||
735 | did = 0; | ||
736 | } | ||
737 | |||
738 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | 731 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) |
739 | | QI_CC_GRAN(type) | QI_CC_TYPE; | 732 | | QI_CC_GRAN(type) | QI_CC_TYPE; |
740 | desc.high = 0; | 733 | desc.high = 0; |
741 | 734 | ||
742 | return qi_submit_sync(&desc, iommu); | 735 | qi_submit_sync(&desc, iommu); |
743 | } | 736 | } |
744 | 737 | ||
745 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 738 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index d6f4ee50924c..9f5d9151edc9 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -857,26 +857,13 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu) | |||
857 | } | 857 | } |
858 | 858 | ||
859 | /* return value determine if we need a write buffer flush */ | 859 | /* return value determine if we need a write buffer flush */ |
860 | static int __iommu_flush_context(struct intel_iommu *iommu, | 860 | static void __iommu_flush_context(struct intel_iommu *iommu, |
861 | u16 did, u16 source_id, u8 function_mask, u64 type, | 861 | u16 did, u16 source_id, u8 function_mask, |
862 | int non_present_entry_flush) | 862 | u64 type) |
863 | { | 863 | { |
864 | u64 val = 0; | 864 | u64 val = 0; |
865 | unsigned long flag; | 865 | unsigned long flag; |
866 | 866 | ||
867 | /* | ||
868 | * In the non-present entry flush case, if hardware doesn't cache | ||
869 | * non-present entry we do nothing and if hardware cache non-present | ||
870 | * entry, we flush entries of domain 0 (the domain id is used to cache | ||
871 | * any non-present entries) | ||
872 | */ | ||
873 | if (non_present_entry_flush) { | ||
874 | if (!cap_caching_mode(iommu->cap)) | ||
875 | return 1; | ||
876 | else | ||
877 | did = 0; | ||
878 | } | ||
879 | |||
880 | switch (type) { | 867 | switch (type) { |
881 | case DMA_CCMD_GLOBAL_INVL: | 868 | case DMA_CCMD_GLOBAL_INVL: |
882 | val = DMA_CCMD_GLOBAL_INVL; | 869 | val = DMA_CCMD_GLOBAL_INVL; |
@@ -901,9 +888,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu, | |||
901 | dmar_readq, (!(val & DMA_CCMD_ICC)), val); | 888 | dmar_readq, (!(val & DMA_CCMD_ICC)), val); |
902 | 889 | ||
903 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 890 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
904 | |||
905 | /* flush context entry will implicitly flush write buffer */ | ||
906 | return 0; | ||
907 | } | 891 | } |
908 | 892 | ||
909 | /* return value determine if we need a write buffer flush */ | 893 | /* return value determine if we need a write buffer flush */ |
@@ -1428,14 +1412,21 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1428 | context_set_present(context); | 1412 | context_set_present(context); |
1429 | domain_flush_cache(domain, context, sizeof(*context)); | 1413 | domain_flush_cache(domain, context, sizeof(*context)); |
1430 | 1414 | ||
1431 | /* it's a non-present to present mapping */ | 1415 | /* |
1432 | if (iommu->flush.flush_context(iommu, id, | 1416 | * It's a non-present to present mapping. If hardware doesn't cache |
1433 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, | 1417 | * non-present entry we only need to flush the write-buffer. If the |
1434 | DMA_CCMD_DEVICE_INVL, 1)) | 1418 | * _does_ cache non-present entries, then it does so in the special |
1435 | iommu_flush_write_buffer(iommu); | 1419 | * domain #0, which we have to flush: |
1436 | else | 1420 | */ |
1421 | if (cap_caching_mode(iommu->cap)) { | ||
1422 | iommu->flush.flush_context(iommu, 0, | ||
1423 | (((u16)bus) << 8) | devfn, | ||
1424 | DMA_CCMD_MASK_NOBIT, | ||
1425 | DMA_CCMD_DEVICE_INVL); | ||
1437 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); | 1426 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); |
1438 | 1427 | } else { | |
1428 | iommu_flush_write_buffer(iommu); | ||
1429 | } | ||
1439 | spin_unlock_irqrestore(&iommu->lock, flags); | 1430 | spin_unlock_irqrestore(&iommu->lock, flags); |
1440 | 1431 | ||
1441 | spin_lock_irqsave(&domain->iommu_lock, flags); | 1432 | spin_lock_irqsave(&domain->iommu_lock, flags); |
@@ -1566,7 +1557,7 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) | |||
1566 | 1557 | ||
1567 | clear_context_table(iommu, bus, devfn); | 1558 | clear_context_table(iommu, bus, devfn); |
1568 | iommu->flush.flush_context(iommu, 0, 0, 0, | 1559 | iommu->flush.flush_context(iommu, 0, 0, 0, |
1569 | DMA_CCMD_GLOBAL_INVL, 0); | 1560 | DMA_CCMD_GLOBAL_INVL); |
1570 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 1561 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
1571 | DMA_TLB_GLOBAL_FLUSH, 0); | 1562 | DMA_TLB_GLOBAL_FLUSH, 0); |
1572 | } | 1563 | } |
@@ -2104,8 +2095,7 @@ static int __init init_dmars(void) | |||
2104 | 2095 | ||
2105 | iommu_set_root_entry(iommu); | 2096 | iommu_set_root_entry(iommu); |
2106 | 2097 | ||
2107 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, | 2098 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); |
2108 | 0); | ||
2109 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | 2099 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, |
2110 | 0); | 2100 | 0); |
2111 | iommu_disable_protect_mem_regions(iommu); | 2101 | iommu_disable_protect_mem_regions(iommu); |
@@ -2721,7 +2711,7 @@ static int init_iommu_hw(void) | |||
2721 | iommu_set_root_entry(iommu); | 2711 | iommu_set_root_entry(iommu); |
2722 | 2712 | ||
2723 | iommu->flush.flush_context(iommu, 0, 0, 0, | 2713 | iommu->flush.flush_context(iommu, 0, 0, 0, |
2724 | DMA_CCMD_GLOBAL_INVL, 0); | 2714 | DMA_CCMD_GLOBAL_INVL); |
2725 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 2715 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
2726 | DMA_TLB_GLOBAL_FLUSH, 0); | 2716 | DMA_TLB_GLOBAL_FLUSH, 0); |
2727 | iommu_disable_protect_mem_regions(iommu); | 2717 | iommu_disable_protect_mem_regions(iommu); |
@@ -2738,7 +2728,7 @@ static void iommu_flush_all(void) | |||
2738 | 2728 | ||
2739 | for_each_active_iommu(iommu, drhd) { | 2729 | for_each_active_iommu(iommu, drhd) { |
2740 | iommu->flush.flush_context(iommu, 0, 0, 0, | 2730 | iommu->flush.flush_context(iommu, 0, 0, 0, |
2741 | DMA_CCMD_GLOBAL_INVL, 0); | 2731 | DMA_CCMD_GLOBAL_INVL); |
2742 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 2732 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
2743 | DMA_TLB_GLOBAL_FLUSH, 0); | 2733 | DMA_TLB_GLOBAL_FLUSH, 0); |
2744 | } | 2734 | } |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 7246971a7feb..f2b94dafbf38 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -281,8 +281,8 @@ struct ir_table { | |||
281 | #endif | 281 | #endif |
282 | 282 | ||
283 | struct iommu_flush { | 283 | struct iommu_flush { |
284 | int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | 284 | void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, |
285 | u64 type, int non_present_entry_flush); | 285 | u8 fm, u64 type); |
286 | int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, | 286 | int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, |
287 | unsigned int size_order, u64 type, int non_present_entry_flush); | 287 | unsigned int size_order, u64 type, int non_present_entry_flush); |
288 | }; | 288 | }; |
@@ -339,8 +339,8 @@ extern void dmar_disable_qi(struct intel_iommu *iommu); | |||
339 | extern int dmar_reenable_qi(struct intel_iommu *iommu); | 339 | extern int dmar_reenable_qi(struct intel_iommu *iommu); |
340 | extern void qi_global_iec(struct intel_iommu *iommu); | 340 | extern void qi_global_iec(struct intel_iommu *iommu); |
341 | 341 | ||
342 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, | 342 | extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, |
343 | u8 fm, u64 type, int non_present_entry_flush); | 343 | u8 fm, u64 type); |
344 | extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 344 | extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
345 | unsigned int size_order, u64 type, | 345 | unsigned int size_order, u64 type, |
346 | int non_present_entry_flush); | 346 | int non_present_entry_flush); |