aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2009-07-07 17:08:49 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:33:12 -0400
commitd34e6b175e61821026893ec5298cc8e7558df43a (patch)
tree8f2934bb0df05d18372509f9ac59aecee5884997
parent090b7aff27120cdae76a346a70db394844fea598 (diff)
KVM: add ioeventfd support
ioeventfd is a mechanism to register PIO/MMIO regions to trigger an eventfd signal when written to by a guest. Host userspace can register any arbitrary IO address with a corresponding eventfd and then pass the eventfd to a specific end-point of interest for handling. Normal IO requires a blocking round-trip since the operation may cause side-effects in the emulated model or may return data to the caller. Therefore, an IO in KVM traps from the guest to the host, causes a VMX/SVM "heavy-weight" exit back to userspace, and is ultimately serviced by qemu's device model synchronously before returning control back to the vcpu. However, there is a subclass of IO which acts purely as a trigger for other IO (such as to kick off an out-of-band DMA request, etc). For these patterns, the synchronous call is particularly expensive since we really only want to simply get our notification transmitted asychronously and return as quickly as possible. All the sychronous infrastructure to ensure proper data-dependencies are met in the normal IO case are just unecessary overhead for signalling. This adds additional computational load on the system, as well as latency to the signalling path. Therefore, we provide a mechanism for registration of an in-kernel trigger point that allows the VCPU to only require a very brief, lightweight exit just long enough to signal an eventfd. This also means that any clients compatible with the eventfd interface (which includes userspace and kernelspace equally well) can now register to be notified. The end result should be a more flexible and higher performance notification API for the backend KVM hypervisor and perhipheral components. To test this theory, we built a test-harness called "doorbell". This module has a function called "doorbell_ring()" which simply increments a counter for each time the doorbell is signaled. It supports signalling from either an eventfd, or an ioctl(). We then wired up two paths to the doorbell: One via QEMU via a registered io region and through the doorbell ioctl(). The other is direct via ioeventfd. You can download this test harness here: ftp://ftp.novell.com/dev/ghaskins/doorbell.tar.bz2 The measured results are as follows: qemu-mmio: 110000 iops, 9.09us rtt ioeventfd-mmio: 200100 iops, 5.00us rtt ioeventfd-pio: 367300 iops, 2.72us rtt I didn't measure qemu-pio, because I have to figure out how to register a PIO region with qemu's device model, and I got lazy. However, for now we can extrapolate based on the data from the NULLIO runs of +2.56us for MMIO, and -350ns for HC, we get: qemu-pio: 153139 iops, 6.53us rtt ioeventfd-hc: 412585 iops, 2.37us rtt these are just for fun, for now, until I can gather more data. Here is a graph for your convenience: http://developer.novell.com/wiki/images/7/76/Iofd-chart.png The conclusion to draw is that we save about 4us by skipping the userspace hop. -------------------- Signed-off-by: Gregory Haskins <ghaskins@novell.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--include/linux/kvm.h24
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--virt/kvm/eventfd.c251
-rw-r--r--virt/kvm/kvm_main.c11
5 files changed, 293 insertions, 4 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2214384ff610..42160b031fcd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1212,6 +1212,7 @@ int kvm_dev_ioctl_check_extension(long ext)
1212 case KVM_CAP_IRQ_INJECT_STATUS: 1212 case KVM_CAP_IRQ_INJECT_STATUS:
1213 case KVM_CAP_ASSIGN_DEV_IRQ: 1213 case KVM_CAP_ASSIGN_DEV_IRQ:
1214 case KVM_CAP_IRQFD: 1214 case KVM_CAP_IRQFD:
1215 case KVM_CAP_IOEVENTFD:
1215 case KVM_CAP_PIT2: 1216 case KVM_CAP_PIT2:
1216 case KVM_CAP_PIT_STATE2: 1217 case KVM_CAP_PIT_STATE2:
1217 r = 1; 1218 r = 1;
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index a74a1fcc28e9..230a91aa61c9 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -307,6 +307,28 @@ struct kvm_guest_debug {
307 struct kvm_guest_debug_arch arch; 307 struct kvm_guest_debug_arch arch;
308}; 308};
309 309
310enum {
311 kvm_ioeventfd_flag_nr_datamatch,
312 kvm_ioeventfd_flag_nr_pio,
313 kvm_ioeventfd_flag_nr_deassign,
314 kvm_ioeventfd_flag_nr_max,
315};
316
317#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
318#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
319#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign)
320
321#define KVM_IOEVENTFD_VALID_FLAG_MASK ((1 << kvm_ioeventfd_flag_nr_max) - 1)
322
323struct kvm_ioeventfd {
324 __u64 datamatch;
325 __u64 addr; /* legal pio/mmio address */
326 __u32 len; /* 1, 2, 4, or 8 bytes */
327 __s32 fd;
328 __u32 flags;
329 __u8 pad[36];
330};
331
310#define KVM_TRC_SHIFT 16 332#define KVM_TRC_SHIFT 16
311/* 333/*
312 * kvm trace categories 334 * kvm trace categories
@@ -412,6 +434,7 @@ struct kvm_guest_debug {
412#ifdef __KVM_HAVE_PIT_STATE2 434#ifdef __KVM_HAVE_PIT_STATE2
413#define KVM_CAP_PIT_STATE2 35 435#define KVM_CAP_PIT_STATE2 35
414#endif 436#endif
437#define KVM_CAP_IOEVENTFD 36
415 438
416#ifdef KVM_CAP_IRQ_ROUTING 439#ifdef KVM_CAP_IRQ_ROUTING
417 440
@@ -520,6 +543,7 @@ struct kvm_irqfd {
520#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) 543#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
521#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) 544#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
522#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) 545#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
546#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
523 547
524/* 548/*
525 * ioctls for vcpu fds 549 * ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 983b0bdeb3ff..6ec9fc56a49e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -155,6 +155,7 @@ struct kvm {
155 spinlock_t lock; 155 spinlock_t lock;
156 struct list_head items; 156 struct list_head items;
157 } irqfds; 157 } irqfds;
158 struct list_head ioeventfds;
158#endif 159#endif
159 struct kvm_vm_stat stat; 160 struct kvm_vm_stat stat;
160 struct kvm_arch arch; 161 struct kvm_arch arch;
@@ -528,19 +529,24 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
528 529
529#ifdef CONFIG_HAVE_KVM_EVENTFD 530#ifdef CONFIG_HAVE_KVM_EVENTFD
530 531
531void kvm_irqfd_init(struct kvm *kvm); 532void kvm_eventfd_init(struct kvm *kvm);
532int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); 533int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
533void kvm_irqfd_release(struct kvm *kvm); 534void kvm_irqfd_release(struct kvm *kvm);
535int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
534 536
535#else 537#else
536 538
537static inline void kvm_irqfd_init(struct kvm *kvm) {} 539static inline void kvm_eventfd_init(struct kvm *kvm) {}
538static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) 540static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
539{ 541{
540 return -EINVAL; 542 return -EINVAL;
541} 543}
542 544
543static inline void kvm_irqfd_release(struct kvm *kvm) {} 545static inline void kvm_irqfd_release(struct kvm *kvm) {}
546static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
547{
548 return -ENOSYS;
549}
544 550
545#endif /* CONFIG_HAVE_KVM_EVENTFD */ 551#endif /* CONFIG_HAVE_KVM_EVENTFD */
546 552
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 4092b8dcd510..99017e8a92ac 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24#include <linux/kvm.h>
24#include <linux/workqueue.h> 25#include <linux/workqueue.h>
25#include <linux/syscalls.h> 26#include <linux/syscalls.h>
26#include <linux/wait.h> 27#include <linux/wait.h>
@@ -28,6 +29,9 @@
28#include <linux/file.h> 29#include <linux/file.h>
29#include <linux/list.h> 30#include <linux/list.h>
30#include <linux/eventfd.h> 31#include <linux/eventfd.h>
32#include <linux/kernel.h>
33
34#include "iodev.h"
31 35
32/* 36/*
33 * -------------------------------------------------------------------- 37 * --------------------------------------------------------------------
@@ -234,10 +238,11 @@ fail:
234} 238}
235 239
236void 240void
237kvm_irqfd_init(struct kvm *kvm) 241kvm_eventfd_init(struct kvm *kvm)
238{ 242{
239 spin_lock_init(&kvm->irqfds.lock); 243 spin_lock_init(&kvm->irqfds.lock);
240 INIT_LIST_HEAD(&kvm->irqfds.items); 244 INIT_LIST_HEAD(&kvm->irqfds.items);
245 INIT_LIST_HEAD(&kvm->ioeventfds);
241} 246}
242 247
243/* 248/*
@@ -327,3 +332,247 @@ static void __exit irqfd_module_exit(void)
327 332
328module_init(irqfd_module_init); 333module_init(irqfd_module_init);
329module_exit(irqfd_module_exit); 334module_exit(irqfd_module_exit);
335
336/*
337 * --------------------------------------------------------------------
338 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
339 *
340 * userspace can register a PIO/MMIO address with an eventfd for receiving
341 * notification when the memory has been touched.
342 * --------------------------------------------------------------------
343 */
344
345struct _ioeventfd {
346 struct list_head list;
347 u64 addr;
348 int length;
349 struct eventfd_ctx *eventfd;
350 u64 datamatch;
351 struct kvm_io_device dev;
352 bool wildcard;
353};
354
355static inline struct _ioeventfd *
356to_ioeventfd(struct kvm_io_device *dev)
357{
358 return container_of(dev, struct _ioeventfd, dev);
359}
360
361static void
362ioeventfd_release(struct _ioeventfd *p)
363{
364 eventfd_ctx_put(p->eventfd);
365 list_del(&p->list);
366 kfree(p);
367}
368
369static bool
370ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
371{
372 u64 _val;
373
374 if (!(addr == p->addr && len == p->length))
375 /* address-range must be precise for a hit */
376 return false;
377
378 if (p->wildcard)
379 /* all else equal, wildcard is always a hit */
380 return true;
381
382 /* otherwise, we have to actually compare the data */
383
384 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
385
386 switch (len) {
387 case 1:
388 _val = *(u8 *)val;
389 break;
390 case 2:
391 _val = *(u16 *)val;
392 break;
393 case 4:
394 _val = *(u32 *)val;
395 break;
396 case 8:
397 _val = *(u64 *)val;
398 break;
399 default:
400 return false;
401 }
402
403 return _val == p->datamatch ? true : false;
404}
405
406/* MMIO/PIO writes trigger an event if the addr/val match */
407static int
408ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
409 const void *val)
410{
411 struct _ioeventfd *p = to_ioeventfd(this);
412
413 if (!ioeventfd_in_range(p, addr, len, val))
414 return -EOPNOTSUPP;
415
416 eventfd_signal(p->eventfd, 1);
417 return 0;
418}
419
420/*
421 * This function is called as KVM is completely shutting down. We do not
422 * need to worry about locking just nuke anything we have as quickly as possible
423 */
424static void
425ioeventfd_destructor(struct kvm_io_device *this)
426{
427 struct _ioeventfd *p = to_ioeventfd(this);
428
429 ioeventfd_release(p);
430}
431
432static const struct kvm_io_device_ops ioeventfd_ops = {
433 .write = ioeventfd_write,
434 .destructor = ioeventfd_destructor,
435};
436
437/* assumes kvm->slots_lock held */
438static bool
439ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
440{
441 struct _ioeventfd *_p;
442
443 list_for_each_entry(_p, &kvm->ioeventfds, list)
444 if (_p->addr == p->addr && _p->length == p->length &&
445 (_p->wildcard || p->wildcard ||
446 _p->datamatch == p->datamatch))
447 return true;
448
449 return false;
450}
451
452static int
453kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
454{
455 int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
456 struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus;
457 struct _ioeventfd *p;
458 struct eventfd_ctx *eventfd;
459 int ret;
460
461 /* must be natural-word sized */
462 switch (args->len) {
463 case 1:
464 case 2:
465 case 4:
466 case 8:
467 break;
468 default:
469 return -EINVAL;
470 }
471
472 /* check for range overflow */
473 if (args->addr + args->len < args->addr)
474 return -EINVAL;
475
476 /* check for extra flags that we don't understand */
477 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
478 return -EINVAL;
479
480 eventfd = eventfd_ctx_fdget(args->fd);
481 if (IS_ERR(eventfd))
482 return PTR_ERR(eventfd);
483
484 p = kzalloc(sizeof(*p), GFP_KERNEL);
485 if (!p) {
486 ret = -ENOMEM;
487 goto fail;
488 }
489
490 INIT_LIST_HEAD(&p->list);
491 p->addr = args->addr;
492 p->length = args->len;
493 p->eventfd = eventfd;
494
495 /* The datamatch feature is optional, otherwise this is a wildcard */
496 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
497 p->datamatch = args->datamatch;
498 else
499 p->wildcard = true;
500
501 down_write(&kvm->slots_lock);
502
503 /* Verify that there isnt a match already */
504 if (ioeventfd_check_collision(kvm, p)) {
505 ret = -EEXIST;
506 goto unlock_fail;
507 }
508
509 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
510
511 ret = __kvm_io_bus_register_dev(bus, &p->dev);
512 if (ret < 0)
513 goto unlock_fail;
514
515 list_add_tail(&p->list, &kvm->ioeventfds);
516
517 up_write(&kvm->slots_lock);
518
519 return 0;
520
521unlock_fail:
522 up_write(&kvm->slots_lock);
523
524fail:
525 kfree(p);
526 eventfd_ctx_put(eventfd);
527
528 return ret;
529}
530
531static int
532kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
533{
534 int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
535 struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus;
536 struct _ioeventfd *p, *tmp;
537 struct eventfd_ctx *eventfd;
538 int ret = -ENOENT;
539
540 eventfd = eventfd_ctx_fdget(args->fd);
541 if (IS_ERR(eventfd))
542 return PTR_ERR(eventfd);
543
544 down_write(&kvm->slots_lock);
545
546 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
547 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
548
549 if (p->eventfd != eventfd ||
550 p->addr != args->addr ||
551 p->length != args->len ||
552 p->wildcard != wildcard)
553 continue;
554
555 if (!p->wildcard && p->datamatch != args->datamatch)
556 continue;
557
558 __kvm_io_bus_unregister_dev(bus, &p->dev);
559 ioeventfd_release(p);
560 ret = 0;
561 break;
562 }
563
564 up_write(&kvm->slots_lock);
565
566 eventfd_ctx_put(eventfd);
567
568 return ret;
569}
570
571int
572kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
573{
574 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
575 return kvm_deassign_ioeventfd(kvm, args);
576
577 return kvm_assign_ioeventfd(kvm, args);
578}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9c2fd025b8ae..d7b9bbba26da 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -979,7 +979,7 @@ static struct kvm *kvm_create_vm(void)
979 spin_lock_init(&kvm->mmu_lock); 979 spin_lock_init(&kvm->mmu_lock);
980 spin_lock_init(&kvm->requests_lock); 980 spin_lock_init(&kvm->requests_lock);
981 kvm_io_bus_init(&kvm->pio_bus); 981 kvm_io_bus_init(&kvm->pio_bus);
982 kvm_irqfd_init(kvm); 982 kvm_eventfd_init(kvm);
983 mutex_init(&kvm->lock); 983 mutex_init(&kvm->lock);
984 mutex_init(&kvm->irq_lock); 984 mutex_init(&kvm->irq_lock);
985 kvm_io_bus_init(&kvm->mmio_bus); 985 kvm_io_bus_init(&kvm->mmio_bus);
@@ -2271,6 +2271,15 @@ static long kvm_vm_ioctl(struct file *filp,
2271 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 2271 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2272 break; 2272 break;
2273 } 2273 }
2274 case KVM_IOEVENTFD: {
2275 struct kvm_ioeventfd data;
2276
2277 r = -EFAULT;
2278 if (copy_from_user(&data, argp, sizeof data))
2279 goto out;
2280 r = kvm_ioeventfd(kvm, &data);
2281 break;
2282 }
2274#ifdef CONFIG_KVM_APIC_ARCHITECTURE 2283#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2275 case KVM_SET_BOOT_CPU_ID: 2284 case KVM_SET_BOOT_CPU_ID:
2276 r = 0; 2285 r = 0;