aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLan Tianyu <tianyu.lan@intel.com>2017-12-21 21:10:36 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2018-07-18 05:31:27 -0400
commitb5020a8e6b54d2ece80b1e7dedb33c79a40ebd47 (patch)
tree100e74e7b047f3076e13acc86567cf924c0f83e1
parent94ffba484663ab3fc695ce2a34871e8c3db499f7 (diff)
KVM/Eventfd: Avoid crash when assign and deassign specific eventfd in parallel.
Syzbot reports crashes in kvm_irqfd_assign(), caused by use-after-free when kvm_irqfd_assign() and kvm_irqfd_deassign() run in parallel for one specific eventfd. When the assign path hasn't finished but irqfd has been added to kvm->irqfds.items list, another thead may deassign the eventfd and free struct kvm_kernel_irqfd(). The assign path then uses the struct kvm_kernel_irqfd that has been freed by deassign path. To avoid such issue, keep irqfd under kvm->irq_srcu protection after the irqfd has been added to kvm->irqfds.items list, and call synchronize_srcu() in irq_shutdown() to make sure that irqfd has been fully initialized in the assign path. Reported-by: Dmitry Vyukov <dvyukov@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Tianyu Lan <tianyu.lan@intel.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--virt/kvm/eventfd.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 6e865e8b5b10..decefe944b0f 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
119{ 119{
120 struct kvm_kernel_irqfd *irqfd = 120 struct kvm_kernel_irqfd *irqfd =
121 container_of(work, struct kvm_kernel_irqfd, shutdown); 121 container_of(work, struct kvm_kernel_irqfd, shutdown);
122 struct kvm *kvm = irqfd->kvm;
122 u64 cnt; 123 u64 cnt;
123 124
125 /* Make sure irqfd has been initalized in assign path. */
126 synchronize_srcu(&kvm->irq_srcu);
127
124 /* 128 /*
125 * Synchronize with the wait-queue and unhook ourselves to prevent 129 * Synchronize with the wait-queue and unhook ourselves to prevent
126 * further events. 130 * further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
387 391
388 idx = srcu_read_lock(&kvm->irq_srcu); 392 idx = srcu_read_lock(&kvm->irq_srcu);
389 irqfd_update(kvm, irqfd); 393 irqfd_update(kvm, irqfd);
390 srcu_read_unlock(&kvm->irq_srcu, idx);
391 394
392 list_add_tail(&irqfd->list, &kvm->irqfds.items); 395 list_add_tail(&irqfd->list, &kvm->irqfds.items);
393 396
@@ -421,6 +424,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
421 } 424 }
422#endif 425#endif
423 426
427 srcu_read_unlock(&kvm->irq_srcu, idx);
424 return 0; 428 return 0;
425 429
426fail: 430fail: