aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorCornelia Huck <cornelia.huck@de.ibm.com>2014-03-17 14:11:35 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-03-18 12:06:04 -0400
commit684a0b719ddbbafe1c7e6646b9bc239453a1773d (patch)
treeeb83541090766873f36f9916e720ff0a85e1eeb2 /virt/kvm
parent93c4adc7afedf9b0ec190066d45b6d67db5270da (diff)
KVM: eventfd: Fix lock order inversion.
When registering a new irqfd, we call its ->poll method to collect any event that might have previously been pending so that we can trigger it. This is done under the kvm->irqfds.lock, which means the eventfd's ctx lock is taken under it. However, if we get a POLLHUP in irqfd_wakeup, we will be called with the ctx lock held before getting the irqfds.lock to deactivate the irqfd, causing lockdep to complain. Calling the ->poll method does not really need the irqfds.lock, so let's just move it after we've given up the irqfds.lock in kvm_irqfd_assign(). Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/eventfd.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index abe4d6043b36..29c2a04e036e 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -391,19 +391,19 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
391 lockdep_is_held(&kvm->irqfds.lock)); 391 lockdep_is_held(&kvm->irqfds.lock));
392 irqfd_update(kvm, irqfd, irq_rt); 392 irqfd_update(kvm, irqfd, irq_rt);
393 393
394 events = f.file->f_op->poll(f.file, &irqfd->pt);
395
396 list_add_tail(&irqfd->list, &kvm->irqfds.items); 394 list_add_tail(&irqfd->list, &kvm->irqfds.items);
397 395
396 spin_unlock_irq(&kvm->irqfds.lock);
397
398 /* 398 /*
399 * Check if there was an event already pending on the eventfd 399 * Check if there was an event already pending on the eventfd
400 * before we registered, and trigger it as if we didn't miss it. 400 * before we registered, and trigger it as if we didn't miss it.
401 */ 401 */
402 events = f.file->f_op->poll(f.file, &irqfd->pt);
403
402 if (events & POLLIN) 404 if (events & POLLIN)
403 schedule_work(&irqfd->inject); 405 schedule_work(&irqfd->inject);
404 406
405 spin_unlock_irq(&kvm->irqfds.lock);
406
407 /* 407 /*
408 * do not drop the file until the irqfd is fully initialized, otherwise 408 * do not drop the file until the irqfd is fully initialized, otherwise
409 * we might race against the POLLHUP 409 * we might race against the POLLHUP