aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-11-04 16:08:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-11-04 16:08:05 -0400
commit66cecb67894b35c6af17eb4e6b6aaec6c8957c2e (patch)
treec1986d590632735047174c5587d0b22b45b47a1e /virt/kvm
parent34c510b2eecd2fb8414998f54ce12c94e16d78a0 (diff)
parentd9092f52d7e61dd1557f2db2400ddb430e85937e (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "One NULL pointer dereference, and two fixes for regressions introduced during the merge window. The rest are fixes for MIPS, s390 and nested VMX" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm: x86: Check memopp before dereference (CVE-2016-8630) kvm: nVMX: VMCLEAR an active shadow VMCS after last use KVM: x86: drop TSC offsetting kvm_x86_ops to fix KVM_GET/SET_CLOCK KVM: x86: fix wbinvd_dirty_mask use-after-free kvm/x86: Show WRMSR data is in hex kvm: nVMX: Fix kernel panics induced by illegal INVEPT/INVVPID types KVM: document lock orders KVM: fix OOPS on flush_work KVM: s390: Fix STHYI buffer alignment for diag224 KVM: MIPS: Precalculate MMIO load resume PC KVM: MIPS: Make ERET handle ERL before EXL KVM: MIPS: Fix lazy user ASID regenerate for SMP
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/eventfd.c22
-rw-r--r--virt/kvm/kvm_main.c6
2 files changed, 25 insertions, 3 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f397e9b20370..a29786dd9522 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -42,6 +42,7 @@
42 42
43#ifdef CONFIG_HAVE_KVM_IRQFD 43#ifdef CONFIG_HAVE_KVM_IRQFD
44 44
45static struct workqueue_struct *irqfd_cleanup_wq;
45 46
46static void 47static void
47irqfd_inject(struct work_struct *work) 48irqfd_inject(struct work_struct *work)
@@ -167,7 +168,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
167 168
168 list_del_init(&irqfd->list); 169 list_del_init(&irqfd->list);
169 170
170 schedule_work(&irqfd->shutdown); 171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
171} 172}
172 173
173int __attribute__((weak)) kvm_arch_set_irq_inatomic( 174int __attribute__((weak)) kvm_arch_set_irq_inatomic(
@@ -554,7 +555,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
554 * so that we guarantee there will not be any more interrupts on this 555 * so that we guarantee there will not be any more interrupts on this
555 * gsi once this deassign function returns. 556 * gsi once this deassign function returns.
556 */ 557 */
557 flush_work(&irqfd->shutdown); 558 flush_workqueue(irqfd_cleanup_wq);
558 559
559 return 0; 560 return 0;
560} 561}
@@ -591,7 +592,7 @@ kvm_irqfd_release(struct kvm *kvm)
591 * Block until we know all outstanding shutdown jobs have completed 592 * Block until we know all outstanding shutdown jobs have completed
592 * since we do not take a kvm* reference. 593 * since we do not take a kvm* reference.
593 */ 594 */
594 flush_work(&irqfd->shutdown); 595 flush_workqueue(irqfd_cleanup_wq);
595 596
596} 597}
597 598
@@ -621,8 +622,23 @@ void kvm_irq_routing_update(struct kvm *kvm)
621 spin_unlock_irq(&kvm->irqfds.lock); 622 spin_unlock_irq(&kvm->irqfds.lock);
622} 623}
623 624
625/*
626 * create a host-wide workqueue for issuing deferred shutdown requests
627 * aggregated from all vm* instances. We need our own isolated
628 * queue to ease flushing work items when a VM exits.
629 */
630int kvm_irqfd_init(void)
631{
632 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
633 if (!irqfd_cleanup_wq)
634 return -ENOMEM;
635
636 return 0;
637}
638
624void kvm_irqfd_exit(void) 639void kvm_irqfd_exit(void)
625{ 640{
641 destroy_workqueue(irqfd_cleanup_wq);
626} 642}
627#endif 643#endif
628 644
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2907b7b78654..5c360347a1e9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3844,7 +3844,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3844 * kvm_arch_init makes sure there's at most one caller 3844 * kvm_arch_init makes sure there's at most one caller
3845 * for architectures that support multiple implementations, 3845 * for architectures that support multiple implementations,
3846 * like intel and amd on x86. 3846 * like intel and amd on x86.
3847 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3848 * conflicts in case kvm is already setup for another implementation.
3847 */ 3849 */
3850 r = kvm_irqfd_init();
3851 if (r)
3852 goto out_irqfd;
3848 3853
3849 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3854 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3850 r = -ENOMEM; 3855 r = -ENOMEM;
@@ -3926,6 +3931,7 @@ out_free_0a:
3926 free_cpumask_var(cpus_hardware_enabled); 3931 free_cpumask_var(cpus_hardware_enabled);
3927out_free_0: 3932out_free_0:
3928 kvm_irqfd_exit(); 3933 kvm_irqfd_exit();
3934out_irqfd:
3929 kvm_arch_exit(); 3935 kvm_arch_exit();
3930out_fail: 3936out_fail:
3931 return r; 3937 return r;