diff options
| -rw-r--r-- | virt/kvm/eventfd.c | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 30f70fd511c4..62e4cd947a90 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
| @@ -166,7 +166,7 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, | |||
| 166 | static int | 166 | static int |
| 167 | kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) | 167 | kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) |
| 168 | { | 168 | { |
| 169 | struct _irqfd *irqfd; | 169 | struct _irqfd *irqfd, *tmp; |
| 170 | struct file *file = NULL; | 170 | struct file *file = NULL; |
| 171 | struct eventfd_ctx *eventfd = NULL; | 171 | struct eventfd_ctx *eventfd = NULL; |
| 172 | int ret; | 172 | int ret; |
| @@ -203,9 +203,20 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) | |||
| 203 | init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); | 203 | init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); |
| 204 | init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); | 204 | init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); |
| 205 | 205 | ||
| 206 | spin_lock_irq(&kvm->irqfds.lock); | ||
| 207 | |||
| 208 | ret = 0; | ||
| 209 | list_for_each_entry(tmp, &kvm->irqfds.items, list) { | ||
| 210 | if (irqfd->eventfd != tmp->eventfd) | ||
| 211 | continue; | ||
| 212 | /* This fd is used for another irq already. */ | ||
| 213 | ret = -EBUSY; | ||
| 214 | spin_unlock_irq(&kvm->irqfds.lock); | ||
| 215 | goto fail; | ||
| 216 | } | ||
| 217 | |||
| 206 | events = file->f_op->poll(file, &irqfd->pt); | 218 | events = file->f_op->poll(file, &irqfd->pt); |
| 207 | 219 | ||
| 208 | spin_lock_irq(&kvm->irqfds.lock); | ||
| 209 | list_add_tail(&irqfd->list, &kvm->irqfds.items); | 220 | list_add_tail(&irqfd->list, &kvm->irqfds.items); |
| 210 | spin_unlock_irq(&kvm->irqfds.lock); | 221 | spin_unlock_irq(&kvm->irqfds.lock); |
| 211 | 222 | ||
