diff options
author | Alex Williamson <alex.williamson@redhat.com> | 2012-09-21 13:58:03 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-09-23 07:50:15 -0400 |
commit | 7a84428af7ca6a847f058c9ff244a18a2664fd1b (patch) | |
tree | e6c573c701736a75d618b670d66e98398b62769f /virt/kvm | |
parent | 1e08ec4a130e2745d96df169e67c58df98a07311 (diff) |
KVM: Add resampling irqfds for level triggered interrupts
To emulate level triggered interrupts, add a resample option to
KVM_IRQFD. When specified, a new resamplefd is provided that notifies
the user when the irqchip has been resampled by the VM. This may, for
instance, indicate an EOI. Also in this mode, posting of an interrupt
through an irqfd only asserts the interrupt. On resampling, the
interrupt is automatically de-asserted prior to user notification.
This enables level triggered interrupts to be posted and re-enabled
from vfio with no userspace intervention.
All resampling irqfds can make use of a single irq source ID, so we
reserve a new one for this interface.
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/eventfd.c | 150 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 6 |
2 files changed, 152 insertions, 4 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 7d7e2aaffece..356965c9d107 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -43,6 +43,31 @@ | |||
43 | * -------------------------------------------------------------------- | 43 | * -------------------------------------------------------------------- |
44 | */ | 44 | */ |
45 | 45 | ||
46 | /* | ||
47 | * Resampling irqfds are a special variety of irqfds used to emulate | ||
48 | * level triggered interrupts. The interrupt is asserted on eventfd | ||
49 | * trigger. On acknowledgement through the irq ack notifier, the | ||
50 | * interrupt is de-asserted and userspace is notified through the | ||
51 | * resamplefd. All resamplers on the same gsi are de-asserted | ||
52 | * together, so we don't need to track the state of each individual | ||
53 | * user. We can also therefore share the same irq source ID. | ||
54 | */ | ||
55 | struct _irqfd_resampler { | ||
56 | struct kvm *kvm; | ||
57 | /* | ||
58 | * List of resampling struct _irqfd objects sharing this gsi. | ||
59 | * RCU list modified under kvm->irqfds.resampler_lock | ||
60 | */ | ||
61 | struct list_head list; | ||
62 | struct kvm_irq_ack_notifier notifier; | ||
63 | /* | ||
64 | * Entry in list of kvm->irqfd.resampler_list. Use for sharing | ||
65 | * resamplers among irqfds on the same gsi. | ||
66 | * Accessed and modified under kvm->irqfds.resampler_lock | ||
67 | */ | ||
68 | struct list_head link; | ||
69 | }; | ||
70 | |||
46 | struct _irqfd { | 71 | struct _irqfd { |
47 | /* Used for MSI fast-path */ | 72 | /* Used for MSI fast-path */ |
48 | struct kvm *kvm; | 73 | struct kvm *kvm; |
@@ -52,6 +77,12 @@ struct _irqfd { | |||
52 | /* Used for level IRQ fast-path */ | 77 | /* Used for level IRQ fast-path */ |
53 | int gsi; | 78 | int gsi; |
54 | struct work_struct inject; | 79 | struct work_struct inject; |
80 | /* The resampler used by this irqfd (resampler-only) */ | ||
81 | struct _irqfd_resampler *resampler; | ||
82 | /* Eventfd notified on resample (resampler-only) */ | ||
83 | struct eventfd_ctx *resamplefd; | ||
84 | /* Entry in list of irqfds for a resampler (resampler-only) */ | ||
85 | struct list_head resampler_link; | ||
55 | /* Used for setup/shutdown */ | 86 | /* Used for setup/shutdown */ |
56 | struct eventfd_ctx *eventfd; | 87 | struct eventfd_ctx *eventfd; |
57 | struct list_head list; | 88 | struct list_head list; |
@@ -67,8 +98,58 @@ irqfd_inject(struct work_struct *work) | |||
67 | struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); | 98 | struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); |
68 | struct kvm *kvm = irqfd->kvm; | 99 | struct kvm *kvm = irqfd->kvm; |
69 | 100 | ||
70 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); | 101 | if (!irqfd->resampler) { |
71 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); | 102 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); |
103 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); | ||
104 | } else | ||
105 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | ||
106 | irqfd->gsi, 1); | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Since resampler irqfds share an IRQ source ID, we de-assert once | ||
111 | * then notify all of the resampler irqfds using this GSI. We can't | ||
112 | * do multiple de-asserts or we risk racing with incoming re-asserts. | ||
113 | */ | ||
114 | static void | ||
115 | irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) | ||
116 | { | ||
117 | struct _irqfd_resampler *resampler; | ||
118 | struct _irqfd *irqfd; | ||
119 | |||
120 | resampler = container_of(kian, struct _irqfd_resampler, notifier); | ||
121 | |||
122 | kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | ||
123 | resampler->notifier.gsi, 0); | ||
124 | |||
125 | rcu_read_lock(); | ||
126 | |||
127 | list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) | ||
128 | eventfd_signal(irqfd->resamplefd, 1); | ||
129 | |||
130 | rcu_read_unlock(); | ||
131 | } | ||
132 | |||
133 | static void | ||
134 | irqfd_resampler_shutdown(struct _irqfd *irqfd) | ||
135 | { | ||
136 | struct _irqfd_resampler *resampler = irqfd->resampler; | ||
137 | struct kvm *kvm = resampler->kvm; | ||
138 | |||
139 | mutex_lock(&kvm->irqfds.resampler_lock); | ||
140 | |||
141 | list_del_rcu(&irqfd->resampler_link); | ||
142 | synchronize_rcu(); | ||
143 | |||
144 | if (list_empty(&resampler->list)) { | ||
145 | list_del(&resampler->link); | ||
146 | kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); | ||
147 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | ||
148 | resampler->notifier.gsi, 0); | ||
149 | kfree(resampler); | ||
150 | } | ||
151 | |||
152 | mutex_unlock(&kvm->irqfds.resampler_lock); | ||
72 | } | 153 | } |
73 | 154 | ||
74 | /* | 155 | /* |
@@ -92,6 +173,11 @@ irqfd_shutdown(struct work_struct *work) | |||
92 | */ | 173 | */ |
93 | flush_work_sync(&irqfd->inject); | 174 | flush_work_sync(&irqfd->inject); |
94 | 175 | ||
176 | if (irqfd->resampler) { | ||
177 | irqfd_resampler_shutdown(irqfd); | ||
178 | eventfd_ctx_put(irqfd->resamplefd); | ||
179 | } | ||
180 | |||
95 | /* | 181 | /* |
96 | * It is now safe to release the object's resources | 182 | * It is now safe to release the object's resources |
97 | */ | 183 | */ |
@@ -203,7 +289,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | |||
203 | struct kvm_irq_routing_table *irq_rt; | 289 | struct kvm_irq_routing_table *irq_rt; |
204 | struct _irqfd *irqfd, *tmp; | 290 | struct _irqfd *irqfd, *tmp; |
205 | struct file *file = NULL; | 291 | struct file *file = NULL; |
206 | struct eventfd_ctx *eventfd = NULL; | 292 | struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; |
207 | int ret; | 293 | int ret; |
208 | unsigned int events; | 294 | unsigned int events; |
209 | 295 | ||
@@ -231,6 +317,54 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | |||
231 | 317 | ||
232 | irqfd->eventfd = eventfd; | 318 | irqfd->eventfd = eventfd; |
233 | 319 | ||
320 | if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) { | ||
321 | struct _irqfd_resampler *resampler; | ||
322 | |||
323 | resamplefd = eventfd_ctx_fdget(args->resamplefd); | ||
324 | if (IS_ERR(resamplefd)) { | ||
325 | ret = PTR_ERR(resamplefd); | ||
326 | goto fail; | ||
327 | } | ||
328 | |||
329 | irqfd->resamplefd = resamplefd; | ||
330 | INIT_LIST_HEAD(&irqfd->resampler_link); | ||
331 | |||
332 | mutex_lock(&kvm->irqfds.resampler_lock); | ||
333 | |||
334 | list_for_each_entry(resampler, | ||
335 | &kvm->irqfds.resampler_list, list) { | ||
336 | if (resampler->notifier.gsi == irqfd->gsi) { | ||
337 | irqfd->resampler = resampler; | ||
338 | break; | ||
339 | } | ||
340 | } | ||
341 | |||
342 | if (!irqfd->resampler) { | ||
343 | resampler = kzalloc(sizeof(*resampler), GFP_KERNEL); | ||
344 | if (!resampler) { | ||
345 | ret = -ENOMEM; | ||
346 | mutex_unlock(&kvm->irqfds.resampler_lock); | ||
347 | goto fail; | ||
348 | } | ||
349 | |||
350 | resampler->kvm = kvm; | ||
351 | INIT_LIST_HEAD(&resampler->list); | ||
352 | resampler->notifier.gsi = irqfd->gsi; | ||
353 | resampler->notifier.irq_acked = irqfd_resampler_ack; | ||
354 | INIT_LIST_HEAD(&resampler->link); | ||
355 | |||
356 | list_add(&resampler->link, &kvm->irqfds.resampler_list); | ||
357 | kvm_register_irq_ack_notifier(kvm, | ||
358 | &resampler->notifier); | ||
359 | irqfd->resampler = resampler; | ||
360 | } | ||
361 | |||
362 | list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); | ||
363 | synchronize_rcu(); | ||
364 | |||
365 | mutex_unlock(&kvm->irqfds.resampler_lock); | ||
366 | } | ||
367 | |||
234 | /* | 368 | /* |
235 | * Install our own custom wake-up handling so we are notified via | 369 | * Install our own custom wake-up handling so we are notified via |
236 | * a callback whenever someone signals the underlying eventfd | 370 | * a callback whenever someone signals the underlying eventfd |
@@ -276,6 +410,12 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | |||
276 | return 0; | 410 | return 0; |
277 | 411 | ||
278 | fail: | 412 | fail: |
413 | if (irqfd->resampler) | ||
414 | irqfd_resampler_shutdown(irqfd); | ||
415 | |||
416 | if (resamplefd && !IS_ERR(resamplefd)) | ||
417 | eventfd_ctx_put(resamplefd); | ||
418 | |||
279 | if (eventfd && !IS_ERR(eventfd)) | 419 | if (eventfd && !IS_ERR(eventfd)) |
280 | eventfd_ctx_put(eventfd); | 420 | eventfd_ctx_put(eventfd); |
281 | 421 | ||
@@ -291,6 +431,8 @@ kvm_eventfd_init(struct kvm *kvm) | |||
291 | { | 431 | { |
292 | spin_lock_init(&kvm->irqfds.lock); | 432 | spin_lock_init(&kvm->irqfds.lock); |
293 | INIT_LIST_HEAD(&kvm->irqfds.items); | 433 | INIT_LIST_HEAD(&kvm->irqfds.items); |
434 | INIT_LIST_HEAD(&kvm->irqfds.resampler_list); | ||
435 | mutex_init(&kvm->irqfds.resampler_lock); | ||
294 | INIT_LIST_HEAD(&kvm->ioeventfds); | 436 | INIT_LIST_HEAD(&kvm->ioeventfds); |
295 | } | 437 | } |
296 | 438 | ||
@@ -340,7 +482,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) | |||
340 | int | 482 | int |
341 | kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) | 483 | kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
342 | { | 484 | { |
343 | if (args->flags & ~KVM_IRQFD_FLAG_DEASSIGN) | 485 | if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) |
344 | return -EINVAL; | 486 | return -EINVAL; |
345 | 487 | ||
346 | if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) | 488 | if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 3ca89c451d6b..2eb58af7ee99 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -228,6 +228,9 @@ int kvm_request_irq_source_id(struct kvm *kvm) | |||
228 | } | 228 | } |
229 | 229 | ||
230 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); | 230 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); |
231 | #ifdef CONFIG_X86 | ||
232 | ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); | ||
233 | #endif | ||
231 | set_bit(irq_source_id, bitmap); | 234 | set_bit(irq_source_id, bitmap); |
232 | unlock: | 235 | unlock: |
233 | mutex_unlock(&kvm->irq_lock); | 236 | mutex_unlock(&kvm->irq_lock); |
@@ -238,6 +241,9 @@ unlock: | |||
238 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) | 241 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) |
239 | { | 242 | { |
240 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); | 243 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); |
244 | #ifdef CONFIG_X86 | ||
245 | ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); | ||
246 | #endif | ||
241 | 247 | ||
242 | mutex_lock(&kvm->irq_lock); | 248 | mutex_lock(&kvm->irq_lock); |
243 | if (irq_source_id < 0 || | 249 | if (irq_source_id < 0 || |