aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2010-09-19 13:02:31 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2010-09-23 10:31:51 -0400
commit6bbfb2653177a00f70e57e53625502d43804fed0 (patch)
treeb0196884ead501481a3ae3a27d45b41b43ec117d
parentc79bd89282136a4516e842fa542d6abf902ddeac (diff)
KVM: fix irqfd assign/deassign race
I think I see the following (theoretical) race: During irqfd assign, we drop irqfds lock before we schedule inject work. Therefore, deassign running on another CPU could cause shutdown and flush to run before inject, causing user after free in inject. A simple fix it to schedule inject under the lock. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Gregory Haskins <ghaskins@novell.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--virt/kvm/eventfd.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 66cf65b510b1..c1f1e3c62984 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -218,7 +218,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
218 events = file->f_op->poll(file, &irqfd->pt); 218 events = file->f_op->poll(file, &irqfd->pt);
219 219
220 list_add_tail(&irqfd->list, &kvm->irqfds.items); 220 list_add_tail(&irqfd->list, &kvm->irqfds.items);
221 spin_unlock_irq(&kvm->irqfds.lock);
222 221
223 /* 222 /*
224 * Check if there was an event already pending on the eventfd 223 * Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
227 if (events & POLLIN) 226 if (events & POLLIN)
228 schedule_work(&irqfd->inject); 227 schedule_work(&irqfd->inject);
229 228
229 spin_unlock_irq(&kvm->irqfds.lock);
230
230 /* 231 /*
231 * do not drop the file until the irqfd is fully initialized, otherwise 232 * do not drop the file until the irqfd is fully initialized, otherwise
232 * we might race against the POLLHUP 233 * we might race against the POLLHUP