aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c22
-rw-r--r--include/linux/kvm_host.h19
-rw-r--r--virt/kvm/iommu.c6
-rw-r--r--virt/kvm/vfio.c44
6 files changed, 92 insertions, 4 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 91b35e4005d3..de388c55e7ec 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -565,6 +565,8 @@ struct kvm_arch {
565 struct list_head assigned_dev_head; 565 struct list_head assigned_dev_head;
566 struct iommu_domain *iommu_domain; 566 struct iommu_domain *iommu_domain;
567 bool iommu_noncoherent; 567 bool iommu_noncoherent;
568#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
569 atomic_t noncoherent_dma_count;
568 struct kvm_pic *vpic; 570 struct kvm_pic *vpic;
569 struct kvm_ioapic *vioapic; 571 struct kvm_ioapic *vioapic;
570 struct kvm_pit *vpit; 572 struct kvm_pit *vpit;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 727a5e980c43..fabf7421ec18 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7445,8 +7445,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7445 */ 7445 */
7446 if (is_mmio) 7446 if (is_mmio)
7447 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; 7447 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7448 else if (vcpu->kvm->arch.iommu_domain && 7448 else if (kvm_arch_has_noncoherent_dma(vcpu->kvm))
7449 vcpu->kvm->arch.iommu_noncoherent)
7450 ret = kvm_get_guest_memory_type(vcpu, gfn) << 7449 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
7451 VMX_EPT_MT_EPTE_SHIFT; 7450 VMX_EPT_MT_EPTE_SHIFT;
7452 else 7451 else
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 92ad83e5b132..ec35d09937da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2718,8 +2718,7 @@ static void wbinvd_ipi(void *garbage)
2718 2718
2719static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 2719static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2720{ 2720{
2721 return vcpu->kvm->arch.iommu_domain && 2721 return kvm_arch_has_noncoherent_dma(vcpu->kvm);
2722 vcpu->kvm->arch.iommu_noncoherent;
2723} 2722}
2724 2723
2725void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2724void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -6998,6 +6997,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
6998 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 6997 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6999 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); 6998 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
7000 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 6999 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
7000 atomic_set(&kvm->arch.noncoherent_dma_count, 0);
7001 7001
7002 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 7002 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
7003 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 7003 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
@@ -7437,6 +7437,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
7437 kvm_x86_ops->interrupt_allowed(vcpu); 7437 kvm_x86_ops->interrupt_allowed(vcpu);
7438} 7438}
7439 7439
7440void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
7441{
7442 atomic_inc(&kvm->arch.noncoherent_dma_count);
7443}
7444EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
7445
7446void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
7447{
7448 atomic_dec(&kvm->arch.noncoherent_dma_count);
7449}
7450EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
7451
7452bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
7453{
7454 return atomic_read(&kvm->arch.noncoherent_dma_count);
7455}
7456EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
7457
7440EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 7458EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
7441EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 7459EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
7442EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 7460EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ed64880e4915..92aae88756db 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -670,6 +670,25 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
670} 670}
671#endif 671#endif
672 672
673#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
674void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
675void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
676bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
677#else
678static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
679{
680}
681
682static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
683{
684}
685
686static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
687{
688 return false;
689}
690#endif
691
673static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 692static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
674{ 693{
675#ifdef __KVM_HAVE_ARCH_WQP 694#ifdef __KVM_HAVE_ARCH_WQP
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index d32d156a423a..c7d9ce122529 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -140,6 +140,9 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
140 struct kvm_memslots *slots; 140 struct kvm_memslots *slots;
141 struct kvm_memory_slot *memslot; 141 struct kvm_memory_slot *memslot;
142 142
143 if (kvm->arch.iommu_noncoherent)
144 kvm_arch_register_noncoherent_dma(kvm);
145
143 idx = srcu_read_lock(&kvm->srcu); 146 idx = srcu_read_lock(&kvm->srcu);
144 slots = kvm_memslots(kvm); 147 slots = kvm_memslots(kvm);
145 148
@@ -327,6 +330,9 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
327 330
328 srcu_read_unlock(&kvm->srcu, idx); 331 srcu_read_unlock(&kvm->srcu, idx);
329 332
333 if (kvm->arch.iommu_noncoherent)
334 kvm_arch_unregister_noncoherent_dma(kvm);
335
330 return 0; 336 return 0;
331} 337}
332 338
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 597c258245ea..ca4260e35037 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -27,6 +27,7 @@ struct kvm_vfio_group {
27struct kvm_vfio { 27struct kvm_vfio {
28 struct list_head group_list; 28 struct list_head group_list;
29 struct mutex lock; 29 struct mutex lock;
30 bool noncoherent;
30}; 31};
31 32
32static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep) 33static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
@@ -58,6 +59,43 @@ static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
58 symbol_put(vfio_group_put_external_user); 59 symbol_put(vfio_group_put_external_user);
59} 60}
60 61
62/*
63 * Groups can use the same or different IOMMU domains. If the same then
64 * adding a new group may change the coherency of groups we've previously
65 * been told about. We don't want to care about any of that so we retest
66 * each group and bail as soon as we find one that's noncoherent. This
67 * means we only ever [un]register_noncoherent_dma once for the whole device.
68 */
69static void kvm_vfio_update_coherency(struct kvm_device *dev)
70{
71 struct kvm_vfio *kv = dev->private;
72 bool noncoherent = false;
73 struct kvm_vfio_group *kvg;
74
75 mutex_lock(&kv->lock);
76
77 list_for_each_entry(kvg, &kv->group_list, node) {
78 /*
79 * TODO: We need an interface to check the coherency of
80 * the IOMMU domain this group is using. For now, assume
81 * it's always noncoherent.
82 */
83 noncoherent = true;
84 break;
85 }
86
87 if (noncoherent != kv->noncoherent) {
88 kv->noncoherent = noncoherent;
89
90 if (kv->noncoherent)
91 kvm_arch_register_noncoherent_dma(dev->kvm);
92 else
93 kvm_arch_unregister_noncoherent_dma(dev->kvm);
94 }
95
96 mutex_unlock(&kv->lock);
97}
98
61static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) 99static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
62{ 100{
63 struct kvm_vfio *kv = dev->private; 101 struct kvm_vfio *kv = dev->private;
@@ -105,6 +143,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
105 143
106 mutex_unlock(&kv->lock); 144 mutex_unlock(&kv->lock);
107 145
146 kvm_vfio_update_coherency(dev);
147
108 return 0; 148 return 0;
109 149
110 case KVM_DEV_VFIO_GROUP_DEL: 150 case KVM_DEV_VFIO_GROUP_DEL:
@@ -140,6 +180,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
140 180
141 kvm_vfio_group_put_external_user(vfio_group); 181 kvm_vfio_group_put_external_user(vfio_group);
142 182
183 kvm_vfio_update_coherency(dev);
184
143 return ret; 185 return ret;
144 } 186 }
145 187
@@ -185,6 +227,8 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
185 kfree(kvg); 227 kfree(kvg);
186 } 228 }
187 229
230 kvm_vfio_update_coherency(dev);
231
188 kfree(kv); 232 kfree(kv);
189 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */ 233 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
190} 234}