diff options
-rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 3 | ||||
-rw-r--r-- | virt/kvm/iommu.c | 16 |
6 files changed, 12 insertions, 15 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 95a3ff93777c..db95f570705f 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -476,7 +476,7 @@ struct kvm_arch { | |||
476 | 476 | ||
477 | struct list_head assigned_dev_head; | 477 | struct list_head assigned_dev_head; |
478 | struct iommu_domain *iommu_domain; | 478 | struct iommu_domain *iommu_domain; |
479 | int iommu_flags; | 479 | bool iommu_noncoherent; |
480 | 480 | ||
481 | unsigned long irq_sources_bitmap; | 481 | unsigned long irq_sources_bitmap; |
482 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | 482 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5cbf3166257c..91b35e4005d3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -564,7 +564,7 @@ struct kvm_arch { | |||
564 | 564 | ||
565 | struct list_head assigned_dev_head; | 565 | struct list_head assigned_dev_head; |
566 | struct iommu_domain *iommu_domain; | 566 | struct iommu_domain *iommu_domain; |
567 | int iommu_flags; | 567 | bool iommu_noncoherent; |
568 | struct kvm_pic *vpic; | 568 | struct kvm_pic *vpic; |
569 | struct kvm_ioapic *vioapic; | 569 | struct kvm_ioapic *vioapic; |
570 | struct kvm_pit *vpit; | 570 | struct kvm_pit *vpit; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6850b0f1d52b..727a5e980c43 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -7446,7 +7446,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | |||
7446 | if (is_mmio) | 7446 | if (is_mmio) |
7447 | ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; | 7447 | ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; |
7448 | else if (vcpu->kvm->arch.iommu_domain && | 7448 | else if (vcpu->kvm->arch.iommu_domain && |
7449 | !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)) | 7449 | vcpu->kvm->arch.iommu_noncoherent) |
7450 | ret = kvm_get_guest_memory_type(vcpu, gfn) << | 7450 | ret = kvm_get_guest_memory_type(vcpu, gfn) << |
7451 | VMX_EPT_MT_EPTE_SHIFT; | 7451 | VMX_EPT_MT_EPTE_SHIFT; |
7452 | else | 7452 | else |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 15f9540a2b1f..92ad83e5b132 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2719,7 +2719,7 @@ static void wbinvd_ipi(void *garbage) | |||
2719 | static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) | 2719 | static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) |
2720 | { | 2720 | { |
2721 | return vcpu->kvm->arch.iommu_domain && | 2721 | return vcpu->kvm->arch.iommu_domain && |
2722 | !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY); | 2722 | vcpu->kvm->arch.iommu_noncoherent; |
2723 | } | 2723 | } |
2724 | 2724 | ||
2725 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 2725 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7beddbd38ac7..ed64880e4915 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -746,9 +746,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | |||
746 | int kvm_request_irq_source_id(struct kvm *kvm); | 746 | int kvm_request_irq_source_id(struct kvm *kvm); |
747 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | 747 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
748 | 748 | ||
749 | /* For vcpu->arch.iommu_flags */ | ||
750 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 | ||
751 | |||
752 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT | 749 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
753 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); | 750 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
754 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); | 751 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index a3b14109049b..d32d156a423a 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -79,7 +79,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | |||
79 | flags = IOMMU_READ; | 79 | flags = IOMMU_READ; |
80 | if (!(slot->flags & KVM_MEM_READONLY)) | 80 | if (!(slot->flags & KVM_MEM_READONLY)) |
81 | flags |= IOMMU_WRITE; | 81 | flags |= IOMMU_WRITE; |
82 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) | 82 | if (!kvm->arch.iommu_noncoherent) |
83 | flags |= IOMMU_CACHE; | 83 | flags |= IOMMU_CACHE; |
84 | 84 | ||
85 | 85 | ||
@@ -158,7 +158,8 @@ int kvm_assign_device(struct kvm *kvm, | |||
158 | { | 158 | { |
159 | struct pci_dev *pdev = NULL; | 159 | struct pci_dev *pdev = NULL; |
160 | struct iommu_domain *domain = kvm->arch.iommu_domain; | 160 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
161 | int r, last_flags; | 161 | int r; |
162 | bool noncoherent; | ||
162 | 163 | ||
163 | /* check if iommu exists and in use */ | 164 | /* check if iommu exists and in use */ |
164 | if (!domain) | 165 | if (!domain) |
@@ -174,15 +175,13 @@ int kvm_assign_device(struct kvm *kvm, | |||
174 | return r; | 175 | return r; |
175 | } | 176 | } |
176 | 177 | ||
177 | last_flags = kvm->arch.iommu_flags; | 178 | noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain, |
178 | if (iommu_domain_has_cap(kvm->arch.iommu_domain, | 179 | IOMMU_CAP_CACHE_COHERENCY); |
179 | IOMMU_CAP_CACHE_COHERENCY)) | ||
180 | kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY; | ||
181 | 180 | ||
182 | /* Check if need to update IOMMU page table for guest memory */ | 181 | /* Check if need to update IOMMU page table for guest memory */ |
183 | if ((last_flags ^ kvm->arch.iommu_flags) == | 182 | if (noncoherent != kvm->arch.iommu_noncoherent) { |
184 | KVM_IOMMU_CACHE_COHERENCY) { | ||
185 | kvm_iommu_unmap_memslots(kvm); | 183 | kvm_iommu_unmap_memslots(kvm); |
184 | kvm->arch.iommu_noncoherent = noncoherent; | ||
186 | r = kvm_iommu_map_memslots(kvm); | 185 | r = kvm_iommu_map_memslots(kvm); |
187 | if (r) | 186 | if (r) |
188 | goto out_unmap; | 187 | goto out_unmap; |
@@ -342,6 +341,7 @@ int kvm_iommu_unmap_guest(struct kvm *kvm) | |||
342 | mutex_lock(&kvm->slots_lock); | 341 | mutex_lock(&kvm->slots_lock); |
343 | kvm_iommu_unmap_memslots(kvm); | 342 | kvm_iommu_unmap_memslots(kvm); |
344 | kvm->arch.iommu_domain = NULL; | 343 | kvm->arch.iommu_domain = NULL; |
344 | kvm->arch.iommu_noncoherent = false; | ||
345 | mutex_unlock(&kvm->slots_lock); | 345 | mutex_unlock(&kvm->slots_lock); |
346 | 346 | ||
347 | iommu_domain_free(domain); | 347 | iommu_domain_free(domain); |