aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/eventfd.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /virt/kvm/eventfd.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'virt/kvm/eventfd.c')
-rw-r--r--virt/kvm/eventfd.c96
1 files changed, 82 insertions, 14 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index c1f1e3c62984..73358d256fa2 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -44,14 +44,19 @@
44 */ 44 */
45 45
46struct _irqfd { 46struct _irqfd {
47 struct kvm *kvm; 47 /* Used for MSI fast-path */
48 struct eventfd_ctx *eventfd; 48 struct kvm *kvm;
49 int gsi; 49 wait_queue_t wait;
50 struct list_head list; 50 /* Update side is protected by irqfds.lock */
51 poll_table pt; 51 struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
52 wait_queue_t wait; 52 /* Used for level IRQ fast-path */
53 struct work_struct inject; 53 int gsi;
54 struct work_struct shutdown; 54 struct work_struct inject;
55 /* Used for setup/shutdown */
56 struct eventfd_ctx *eventfd;
57 struct list_head list;
58 poll_table pt;
59 struct work_struct shutdown;
55}; 60};
56 61
57static struct workqueue_struct *irqfd_cleanup_wq; 62static struct workqueue_struct *irqfd_cleanup_wq;
@@ -85,7 +90,7 @@ irqfd_shutdown(struct work_struct *work)
85 * We know no new events will be scheduled at this point, so block 90 * We know no new events will be scheduled at this point, so block
86 * until all previously outstanding events have completed 91 * until all previously outstanding events have completed
87 */ 92 */
88 flush_work(&irqfd->inject); 93 flush_work_sync(&irqfd->inject);
89 94
90 /* 95 /*
91 * It is now safe to release the object's resources 96 * It is now safe to release the object's resources
@@ -125,14 +130,22 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
125{ 130{
126 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait); 131 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
127 unsigned long flags = (unsigned long)key; 132 unsigned long flags = (unsigned long)key;
133 struct kvm_kernel_irq_routing_entry *irq;
134 struct kvm *kvm = irqfd->kvm;
128 135
129 if (flags & POLLIN) 136 if (flags & POLLIN) {
137 rcu_read_lock();
138 irq = rcu_dereference(irqfd->irq_entry);
130 /* An event has been signaled, inject an interrupt */ 139 /* An event has been signaled, inject an interrupt */
131 schedule_work(&irqfd->inject); 140 if (irq)
141 kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
142 else
143 schedule_work(&irqfd->inject);
144 rcu_read_unlock();
145 }
132 146
133 if (flags & POLLHUP) { 147 if (flags & POLLHUP) {
134 /* The eventfd is closing, detach from KVM */ 148 /* The eventfd is closing, detach from KVM */
135 struct kvm *kvm = irqfd->kvm;
136 unsigned long flags; 149 unsigned long flags;
137 150
138 spin_lock_irqsave(&kvm->irqfds.lock, flags); 151 spin_lock_irqsave(&kvm->irqfds.lock, flags);
@@ -163,9 +176,31 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
163 add_wait_queue(wqh, &irqfd->wait); 176 add_wait_queue(wqh, &irqfd->wait);
164} 177}
165 178
179/* Must be called under irqfds.lock */
180static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
181 struct kvm_irq_routing_table *irq_rt)
182{
183 struct kvm_kernel_irq_routing_entry *e;
184 struct hlist_node *n;
185
186 if (irqfd->gsi >= irq_rt->nr_rt_entries) {
187 rcu_assign_pointer(irqfd->irq_entry, NULL);
188 return;
189 }
190
191 hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
192 /* Only fast-path MSI. */
193 if (e->type == KVM_IRQ_ROUTING_MSI)
194 rcu_assign_pointer(irqfd->irq_entry, e);
195 else
196 rcu_assign_pointer(irqfd->irq_entry, NULL);
197 }
198}
199
166static int 200static int
167kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) 201kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
168{ 202{
203 struct kvm_irq_routing_table *irq_rt;
169 struct _irqfd *irqfd, *tmp; 204 struct _irqfd *irqfd, *tmp;
170 struct file *file = NULL; 205 struct file *file = NULL;
171 struct eventfd_ctx *eventfd = NULL; 206 struct eventfd_ctx *eventfd = NULL;
@@ -215,6 +250,10 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
215 goto fail; 250 goto fail;
216 } 251 }
217 252
253 irq_rt = rcu_dereference_protected(kvm->irq_routing,
254 lockdep_is_held(&kvm->irqfds.lock));
255 irqfd_update(kvm, irqfd, irq_rt);
256
218 events = file->f_op->poll(file, &irqfd->pt); 257 events = file->f_op->poll(file, &irqfd->pt);
219 258
220 list_add_tail(&irqfd->list, &kvm->irqfds.items); 259 list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -271,8 +310,18 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
271 spin_lock_irq(&kvm->irqfds.lock); 310 spin_lock_irq(&kvm->irqfds.lock);
272 311
273 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { 312 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
274 if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) 313 if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
314 /*
315 * This rcu_assign_pointer is needed for when
316 * another thread calls kvm_irq_routing_update before
317 * we flush workqueue below (we synchronize with
318 * kvm_irq_routing_update using irqfds.lock).
319 * It is paired with synchronize_rcu done by caller
320 * of that function.
321 */
322 rcu_assign_pointer(irqfd->irq_entry, NULL);
275 irqfd_deactivate(irqfd); 323 irqfd_deactivate(irqfd);
324 }
276 } 325 }
277 326
278 spin_unlock_irq(&kvm->irqfds.lock); 327 spin_unlock_irq(&kvm->irqfds.lock);
@@ -322,6 +371,25 @@ kvm_irqfd_release(struct kvm *kvm)
322} 371}
323 372
324/* 373/*
374 * Change irq_routing and irqfd.
375 * Caller must invoke synchronize_rcu afterwards.
376 */
377void kvm_irq_routing_update(struct kvm *kvm,
378 struct kvm_irq_routing_table *irq_rt)
379{
380 struct _irqfd *irqfd;
381
382 spin_lock_irq(&kvm->irqfds.lock);
383
384 rcu_assign_pointer(kvm->irq_routing, irq_rt);
385
386 list_for_each_entry(irqfd, &kvm->irqfds.items, list)
387 irqfd_update(kvm, irqfd, irq_rt);
388
389 spin_unlock_irq(&kvm->irqfds.lock);
390}
391
392/*
325 * create a host-wide workqueue for issuing deferred shutdown requests 393 * create a host-wide workqueue for issuing deferred shutdown requests
326 * aggregated from all vm* instances. We need our own isolated single-thread 394 * aggregated from all vm* instances. We need our own isolated single-thread
327 * queue to prevent deadlock against flushing the normal work-queue. 395 * queue to prevent deadlock against flushing the normal work-queue.
@@ -510,7 +578,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
510 578
511 mutex_lock(&kvm->slots_lock); 579 mutex_lock(&kvm->slots_lock);
512 580
513 /* Verify that there isnt a match already */ 581 /* Verify that there isn't a match already */
514 if (ioeventfd_check_collision(kvm, p)) { 582 if (ioeventfd_check_collision(kvm, p)) {
515 ret = -EEXIST; 583 ret = -EEXIST;
516 goto unlock_fail; 584 goto unlock_fail;