diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-05 17:47:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-05 17:47:31 -0400 |
commit | 01227a889ed56ae53aeebb9f93be9d54dd8b2de8 (patch) | |
tree | d5eba9359a9827e84d4112b84d48c54df5c5acde /virt | |
parent | 9e6879460c8edb0cd3c24c09b83d06541b5af0dc (diff) | |
parent | db6ae6158186a17165ef990bda2895ae7594b039 (diff) |
Merge tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Gleb Natapov:
"Highlights of the updates are:
general:
- new emulated device API
- legacy device assignment is now optional
- irqfd interface is more generic and can be shared between arches
x86:
- VMCS shadow support and other nested VMX improvements
- APIC virtualization and Posted Interrupt hardware support
- Optimize mmio spte zapping
ppc:
- BookE: in-kernel MPIC emulation with irqfd support
- Book3S: in-kernel XICS emulation (incomplete)
- Book3S: HV: migration fixes
- BookE: more debug support preparation
- BookE: e6500 support
ARM:
- reworking of Hyp idmaps
s390:
- ioeventfd for virtio-ccw
And many other bug fixes, cleanups and improvements"
* tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits)
kvm: Add compat_ioctl for device control API
KVM: x86: Account for failing enable_irq_window for NMI window request
KVM: PPC: Book3S: Add API for in-kernel XICS emulation
kvm/ppc/mpic: fix missing unlock in set_base_addr()
kvm/ppc: Hold srcu lock when calling kvm_io_bus_read/write
kvm/ppc/mpic: remove users
kvm/ppc/mpic: fix mmio region lists when multiple guests used
kvm/ppc/mpic: remove default routes from documentation
kvm: KVM_CAP_IOMMU only available with device assignment
ARM: KVM: iterate over all CPUs for CPU compatibility check
KVM: ARM: Fix spelling in error message
ARM: KVM: define KVM_ARM_MAX_VCPUS unconditionally
KVM: ARM: Fix API documentation for ONE_REG encoding
ARM: KVM: promote vfp_host pointer to generic host cpu context
ARM: KVM: add architecture specific hook for capabilities
ARM: KVM: perform HYP initilization for hotplugged CPUs
ARM: KVM: switch to a dual-step HYP init code
ARM: KVM: rework HYP page table freeing
ARM: KVM: enforce maximum size for identity mapped code
ARM: KVM: move to a KVM provided HYP idmap
...
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/Kconfig | 3 | ||||
-rw-r--r-- | virt/kvm/assigned-dev.c | 43 | ||||
-rw-r--r-- | virt/kvm/eventfd.c | 53 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 163 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 27 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 215 | ||||
-rw-r--r-- | virt/kvm/irqchip.c | 237 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 258 |
8 files changed, 659 insertions, 340 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index d01b24b72c61..779262f59e25 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig | |||
@@ -6,6 +6,9 @@ config HAVE_KVM | |||
6 | config HAVE_KVM_IRQCHIP | 6 | config HAVE_KVM_IRQCHIP |
7 | bool | 7 | bool |
8 | 8 | ||
9 | config HAVE_KVM_IRQ_ROUTING | ||
10 | bool | ||
11 | |||
9 | config HAVE_KVM_EVENTFD | 12 | config HAVE_KVM_EVENTFD |
10 | bool | 13 | bool |
11 | select EVENTFD | 14 | select EVENTFD |
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 3642239252b0..8db43701016f 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c | |||
@@ -80,11 +80,12 @@ kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev, | |||
80 | spin_lock(&assigned_dev->intx_mask_lock); | 80 | spin_lock(&assigned_dev->intx_mask_lock); |
81 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) | 81 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) |
82 | kvm_set_irq(assigned_dev->kvm, | 82 | kvm_set_irq(assigned_dev->kvm, |
83 | assigned_dev->irq_source_id, vector, 1); | 83 | assigned_dev->irq_source_id, vector, 1, |
84 | false); | ||
84 | spin_unlock(&assigned_dev->intx_mask_lock); | 85 | spin_unlock(&assigned_dev->intx_mask_lock); |
85 | } else | 86 | } else |
86 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | 87 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, |
87 | vector, 1); | 88 | vector, 1, false); |
88 | } | 89 | } |
89 | 90 | ||
90 | static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) | 91 | static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) |
@@ -165,7 +166,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
165 | container_of(kian, struct kvm_assigned_dev_kernel, | 166 | container_of(kian, struct kvm_assigned_dev_kernel, |
166 | ack_notifier); | 167 | ack_notifier); |
167 | 168 | ||
168 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | 169 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false); |
169 | 170 | ||
170 | spin_lock(&dev->intx_mask_lock); | 171 | spin_lock(&dev->intx_mask_lock); |
171 | 172 | ||
@@ -188,7 +189,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
188 | 189 | ||
189 | if (reassert) | 190 | if (reassert) |
190 | kvm_set_irq(dev->kvm, dev->irq_source_id, | 191 | kvm_set_irq(dev->kvm, dev->irq_source_id, |
191 | dev->guest_irq, 1); | 192 | dev->guest_irq, 1, false); |
192 | } | 193 | } |
193 | 194 | ||
194 | spin_unlock(&dev->intx_mask_lock); | 195 | spin_unlock(&dev->intx_mask_lock); |
@@ -202,7 +203,7 @@ static void deassign_guest_irq(struct kvm *kvm, | |||
202 | &assigned_dev->ack_notifier); | 203 | &assigned_dev->ack_notifier); |
203 | 204 | ||
204 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | 205 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, |
205 | assigned_dev->guest_irq, 0); | 206 | assigned_dev->guest_irq, 0, false); |
206 | 207 | ||
207 | if (assigned_dev->irq_source_id != -1) | 208 | if (assigned_dev->irq_source_id != -1) |
208 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | 209 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); |
@@ -901,7 +902,7 @@ static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm, | |||
901 | if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | 902 | if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { |
902 | if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { | 903 | if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { |
903 | kvm_set_irq(match->kvm, match->irq_source_id, | 904 | kvm_set_irq(match->kvm, match->irq_source_id, |
904 | match->guest_irq, 0); | 905 | match->guest_irq, 0, false); |
905 | /* | 906 | /* |
906 | * Masking at hardware-level is performed on demand, | 907 | * Masking at hardware-level is performed on demand, |
907 | * i.e. when an IRQ actually arrives at the host. | 908 | * i.e. when an IRQ actually arrives at the host. |
@@ -982,36 +983,6 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | |||
982 | goto out; | 983 | goto out; |
983 | break; | 984 | break; |
984 | } | 985 | } |
985 | #ifdef KVM_CAP_IRQ_ROUTING | ||
986 | case KVM_SET_GSI_ROUTING: { | ||
987 | struct kvm_irq_routing routing; | ||
988 | struct kvm_irq_routing __user *urouting; | ||
989 | struct kvm_irq_routing_entry *entries; | ||
990 | |||
991 | r = -EFAULT; | ||
992 | if (copy_from_user(&routing, argp, sizeof(routing))) | ||
993 | goto out; | ||
994 | r = -EINVAL; | ||
995 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | ||
996 | goto out; | ||
997 | if (routing.flags) | ||
998 | goto out; | ||
999 | r = -ENOMEM; | ||
1000 | entries = vmalloc(routing.nr * sizeof(*entries)); | ||
1001 | if (!entries) | ||
1002 | goto out; | ||
1003 | r = -EFAULT; | ||
1004 | urouting = argp; | ||
1005 | if (copy_from_user(entries, urouting->entries, | ||
1006 | routing.nr * sizeof(*entries))) | ||
1007 | goto out_free_irq_routing; | ||
1008 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | ||
1009 | routing.flags); | ||
1010 | out_free_irq_routing: | ||
1011 | vfree(entries); | ||
1012 | break; | ||
1013 | } | ||
1014 | #endif /* KVM_CAP_IRQ_ROUTING */ | ||
1015 | #ifdef __KVM_HAVE_MSIX | 986 | #ifdef __KVM_HAVE_MSIX |
1016 | case KVM_ASSIGN_SET_MSIX_NR: { | 987 | case KVM_ASSIGN_SET_MSIX_NR: { |
1017 | struct kvm_assigned_msix_nr entry_nr; | 988 | struct kvm_assigned_msix_nr entry_nr; |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index adb17f266b28..64ee720b75c7 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | #include "iodev.h" | 36 | #include "iodev.h" |
37 | 37 | ||
38 | #ifdef __KVM_HAVE_IOAPIC | 38 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
39 | /* | 39 | /* |
40 | * -------------------------------------------------------------------- | 40 | * -------------------------------------------------------------------- |
41 | * irqfd: Allows an fd to be used to inject an interrupt to the guest | 41 | * irqfd: Allows an fd to be used to inject an interrupt to the guest |
@@ -100,11 +100,13 @@ irqfd_inject(struct work_struct *work) | |||
100 | struct kvm *kvm = irqfd->kvm; | 100 | struct kvm *kvm = irqfd->kvm; |
101 | 101 | ||
102 | if (!irqfd->resampler) { | 102 | if (!irqfd->resampler) { |
103 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); | 103 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, |
104 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); | 104 | false); |
105 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, | ||
106 | false); | ||
105 | } else | 107 | } else |
106 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | 108 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, |
107 | irqfd->gsi, 1); | 109 | irqfd->gsi, 1, false); |
108 | } | 110 | } |
109 | 111 | ||
110 | /* | 112 | /* |
@@ -121,7 +123,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) | |||
121 | resampler = container_of(kian, struct _irqfd_resampler, notifier); | 123 | resampler = container_of(kian, struct _irqfd_resampler, notifier); |
122 | 124 | ||
123 | kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | 125 | kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, |
124 | resampler->notifier.gsi, 0); | 126 | resampler->notifier.gsi, 0, false); |
125 | 127 | ||
126 | rcu_read_lock(); | 128 | rcu_read_lock(); |
127 | 129 | ||
@@ -146,7 +148,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd) | |||
146 | list_del(&resampler->link); | 148 | list_del(&resampler->link); |
147 | kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); | 149 | kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); |
148 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | 150 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, |
149 | resampler->notifier.gsi, 0); | 151 | resampler->notifier.gsi, 0, false); |
150 | kfree(resampler); | 152 | kfree(resampler); |
151 | } | 153 | } |
152 | 154 | ||
@@ -225,7 +227,8 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
225 | irq = rcu_dereference(irqfd->irq_entry); | 227 | irq = rcu_dereference(irqfd->irq_entry); |
226 | /* An event has been signaled, inject an interrupt */ | 228 | /* An event has been signaled, inject an interrupt */ |
227 | if (irq) | 229 | if (irq) |
228 | kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1); | 230 | kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, |
231 | false); | ||
229 | else | 232 | else |
230 | schedule_work(&irqfd->inject); | 233 | schedule_work(&irqfd->inject); |
231 | rcu_read_unlock(); | 234 | rcu_read_unlock(); |
@@ -430,7 +433,7 @@ fail: | |||
430 | void | 433 | void |
431 | kvm_eventfd_init(struct kvm *kvm) | 434 | kvm_eventfd_init(struct kvm *kvm) |
432 | { | 435 | { |
433 | #ifdef __KVM_HAVE_IOAPIC | 436 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
434 | spin_lock_init(&kvm->irqfds.lock); | 437 | spin_lock_init(&kvm->irqfds.lock); |
435 | INIT_LIST_HEAD(&kvm->irqfds.items); | 438 | INIT_LIST_HEAD(&kvm->irqfds.items); |
436 | INIT_LIST_HEAD(&kvm->irqfds.resampler_list); | 439 | INIT_LIST_HEAD(&kvm->irqfds.resampler_list); |
@@ -439,7 +442,7 @@ kvm_eventfd_init(struct kvm *kvm) | |||
439 | INIT_LIST_HEAD(&kvm->ioeventfds); | 442 | INIT_LIST_HEAD(&kvm->ioeventfds); |
440 | } | 443 | } |
441 | 444 | ||
442 | #ifdef __KVM_HAVE_IOAPIC | 445 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
443 | /* | 446 | /* |
444 | * shutdown any irqfd's that match fd+gsi | 447 | * shutdown any irqfd's that match fd+gsi |
445 | */ | 448 | */ |
@@ -543,7 +546,7 @@ void kvm_irq_routing_update(struct kvm *kvm, | |||
543 | * aggregated from all vm* instances. We need our own isolated single-thread | 546 | * aggregated from all vm* instances. We need our own isolated single-thread |
544 | * queue to prevent deadlock against flushing the normal work-queue. | 547 | * queue to prevent deadlock against flushing the normal work-queue. |
545 | */ | 548 | */ |
546 | static int __init irqfd_module_init(void) | 549 | int kvm_irqfd_init(void) |
547 | { | 550 | { |
548 | irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup"); | 551 | irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup"); |
549 | if (!irqfd_cleanup_wq) | 552 | if (!irqfd_cleanup_wq) |
@@ -552,13 +555,10 @@ static int __init irqfd_module_init(void) | |||
552 | return 0; | 555 | return 0; |
553 | } | 556 | } |
554 | 557 | ||
555 | static void __exit irqfd_module_exit(void) | 558 | void kvm_irqfd_exit(void) |
556 | { | 559 | { |
557 | destroy_workqueue(irqfd_cleanup_wq); | 560 | destroy_workqueue(irqfd_cleanup_wq); |
558 | } | 561 | } |
559 | |||
560 | module_init(irqfd_module_init); | ||
561 | module_exit(irqfd_module_exit); | ||
562 | #endif | 562 | #endif |
563 | 563 | ||
564 | /* | 564 | /* |
@@ -577,6 +577,7 @@ struct _ioeventfd { | |||
577 | struct eventfd_ctx *eventfd; | 577 | struct eventfd_ctx *eventfd; |
578 | u64 datamatch; | 578 | u64 datamatch; |
579 | struct kvm_io_device dev; | 579 | struct kvm_io_device dev; |
580 | u8 bus_idx; | ||
580 | bool wildcard; | 581 | bool wildcard; |
581 | }; | 582 | }; |
582 | 583 | ||
@@ -669,7 +670,8 @@ ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) | |||
669 | struct _ioeventfd *_p; | 670 | struct _ioeventfd *_p; |
670 | 671 | ||
671 | list_for_each_entry(_p, &kvm->ioeventfds, list) | 672 | list_for_each_entry(_p, &kvm->ioeventfds, list) |
672 | if (_p->addr == p->addr && _p->length == p->length && | 673 | if (_p->bus_idx == p->bus_idx && |
674 | _p->addr == p->addr && _p->length == p->length && | ||
673 | (_p->wildcard || p->wildcard || | 675 | (_p->wildcard || p->wildcard || |
674 | _p->datamatch == p->datamatch)) | 676 | _p->datamatch == p->datamatch)) |
675 | return true; | 677 | return true; |
@@ -677,15 +679,24 @@ ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) | |||
677 | return false; | 679 | return false; |
678 | } | 680 | } |
679 | 681 | ||
682 | static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags) | ||
683 | { | ||
684 | if (flags & KVM_IOEVENTFD_FLAG_PIO) | ||
685 | return KVM_PIO_BUS; | ||
686 | if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY) | ||
687 | return KVM_VIRTIO_CCW_NOTIFY_BUS; | ||
688 | return KVM_MMIO_BUS; | ||
689 | } | ||
690 | |||
680 | static int | 691 | static int |
681 | kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 692 | kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
682 | { | 693 | { |
683 | int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; | 694 | enum kvm_bus bus_idx; |
684 | enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; | ||
685 | struct _ioeventfd *p; | 695 | struct _ioeventfd *p; |
686 | struct eventfd_ctx *eventfd; | 696 | struct eventfd_ctx *eventfd; |
687 | int ret; | 697 | int ret; |
688 | 698 | ||
699 | bus_idx = ioeventfd_bus_from_flags(args->flags); | ||
689 | /* must be natural-word sized */ | 700 | /* must be natural-word sized */ |
690 | switch (args->len) { | 701 | switch (args->len) { |
691 | case 1: | 702 | case 1: |
@@ -717,6 +728,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
717 | 728 | ||
718 | INIT_LIST_HEAD(&p->list); | 729 | INIT_LIST_HEAD(&p->list); |
719 | p->addr = args->addr; | 730 | p->addr = args->addr; |
731 | p->bus_idx = bus_idx; | ||
720 | p->length = args->len; | 732 | p->length = args->len; |
721 | p->eventfd = eventfd; | 733 | p->eventfd = eventfd; |
722 | 734 | ||
@@ -760,12 +772,12 @@ fail: | |||
760 | static int | 772 | static int |
761 | kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 773 | kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
762 | { | 774 | { |
763 | int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; | 775 | enum kvm_bus bus_idx; |
764 | enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; | ||
765 | struct _ioeventfd *p, *tmp; | 776 | struct _ioeventfd *p, *tmp; |
766 | struct eventfd_ctx *eventfd; | 777 | struct eventfd_ctx *eventfd; |
767 | int ret = -ENOENT; | 778 | int ret = -ENOENT; |
768 | 779 | ||
780 | bus_idx = ioeventfd_bus_from_flags(args->flags); | ||
769 | eventfd = eventfd_ctx_fdget(args->fd); | 781 | eventfd = eventfd_ctx_fdget(args->fd); |
770 | if (IS_ERR(eventfd)) | 782 | if (IS_ERR(eventfd)) |
771 | return PTR_ERR(eventfd); | 783 | return PTR_ERR(eventfd); |
@@ -775,7 +787,8 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
775 | list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { | 787 | list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { |
776 | bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); | 788 | bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); |
777 | 789 | ||
778 | if (p->eventfd != eventfd || | 790 | if (p->bus_idx != bus_idx || |
791 | p->eventfd != eventfd || | ||
779 | p->addr != args->addr || | 792 | p->addr != args->addr || |
780 | p->length != args->len || | 793 | p->length != args->len || |
781 | p->wildcard != wildcard) | 794 | p->wildcard != wildcard) |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 5ba005c00e2f..2d682977ce82 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -50,7 +50,8 @@ | |||
50 | #else | 50 | #else |
51 | #define ioapic_debug(fmt, arg...) | 51 | #define ioapic_debug(fmt, arg...) |
52 | #endif | 52 | #endif |
53 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); | 53 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq, |
54 | bool line_status); | ||
54 | 55 | ||
55 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | 56 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, |
56 | unsigned long addr, | 57 | unsigned long addr, |
@@ -90,7 +91,80 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | |||
90 | return result; | 91 | return result; |
91 | } | 92 | } |
92 | 93 | ||
93 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | 94 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) |
95 | { | ||
96 | ioapic->rtc_status.pending_eoi = 0; | ||
97 | bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS); | ||
98 | } | ||
99 | |||
100 | static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) | ||
101 | { | ||
102 | bool new_val, old_val; | ||
103 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | ||
104 | union kvm_ioapic_redirect_entry *e; | ||
105 | |||
106 | e = &ioapic->redirtbl[RTC_GSI]; | ||
107 | if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, | ||
108 | e->fields.dest_mode)) | ||
109 | return; | ||
110 | |||
111 | new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); | ||
112 | old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
113 | |||
114 | if (new_val == old_val) | ||
115 | return; | ||
116 | |||
117 | if (new_val) { | ||
118 | __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
119 | ioapic->rtc_status.pending_eoi++; | ||
120 | } else { | ||
121 | __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
122 | ioapic->rtc_status.pending_eoi--; | ||
123 | } | ||
124 | |||
125 | WARN_ON(ioapic->rtc_status.pending_eoi < 0); | ||
126 | } | ||
127 | |||
128 | void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) | ||
129 | { | ||
130 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | ||
131 | |||
132 | spin_lock(&ioapic->lock); | ||
133 | __rtc_irq_eoi_tracking_restore_one(vcpu); | ||
134 | spin_unlock(&ioapic->lock); | ||
135 | } | ||
136 | |||
137 | static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) | ||
138 | { | ||
139 | struct kvm_vcpu *vcpu; | ||
140 | int i; | ||
141 | |||
142 | if (RTC_GSI >= IOAPIC_NUM_PINS) | ||
143 | return; | ||
144 | |||
145 | rtc_irq_eoi_tracking_reset(ioapic); | ||
146 | kvm_for_each_vcpu(i, vcpu, ioapic->kvm) | ||
147 | __rtc_irq_eoi_tracking_restore_one(vcpu); | ||
148 | } | ||
149 | |||
150 | static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) | ||
151 | { | ||
152 | if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) | ||
153 | --ioapic->rtc_status.pending_eoi; | ||
154 | |||
155 | WARN_ON(ioapic->rtc_status.pending_eoi < 0); | ||
156 | } | ||
157 | |||
158 | static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) | ||
159 | { | ||
160 | if (ioapic->rtc_status.pending_eoi > 0) | ||
161 | return true; /* coalesced */ | ||
162 | |||
163 | return false; | ||
164 | } | ||
165 | |||
166 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx, | ||
167 | bool line_status) | ||
94 | { | 168 | { |
95 | union kvm_ioapic_redirect_entry *pent; | 169 | union kvm_ioapic_redirect_entry *pent; |
96 | int injected = -1; | 170 | int injected = -1; |
@@ -98,7 +172,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | |||
98 | pent = &ioapic->redirtbl[idx]; | 172 | pent = &ioapic->redirtbl[idx]; |
99 | 173 | ||
100 | if (!pent->fields.mask) { | 174 | if (!pent->fields.mask) { |
101 | injected = ioapic_deliver(ioapic, idx); | 175 | injected = ioapic_deliver(ioapic, idx, line_status); |
102 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) | 176 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
103 | pent->fields.remote_irr = 1; | 177 | pent->fields.remote_irr = 1; |
104 | } | 178 | } |
@@ -119,41 +193,48 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic) | |||
119 | smp_wmb(); | 193 | smp_wmb(); |
120 | } | 194 | } |
121 | 195 | ||
122 | void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, | 196 | void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, |
123 | u64 *eoi_exit_bitmap) | 197 | u32 *tmr) |
124 | { | 198 | { |
125 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | 199 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; |
126 | union kvm_ioapic_redirect_entry *e; | 200 | union kvm_ioapic_redirect_entry *e; |
127 | struct kvm_lapic_irq irqe; | ||
128 | int index; | 201 | int index; |
129 | 202 | ||
130 | spin_lock(&ioapic->lock); | 203 | spin_lock(&ioapic->lock); |
131 | /* traverse ioapic entry to set eoi exit bitmap*/ | ||
132 | for (index = 0; index < IOAPIC_NUM_PINS; index++) { | 204 | for (index = 0; index < IOAPIC_NUM_PINS; index++) { |
133 | e = &ioapic->redirtbl[index]; | 205 | e = &ioapic->redirtbl[index]; |
134 | if (!e->fields.mask && | 206 | if (!e->fields.mask && |
135 | (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || | 207 | (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || |
136 | kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, | 208 | kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, |
137 | index))) { | 209 | index) || index == RTC_GSI)) { |
138 | irqe.dest_id = e->fields.dest_id; | 210 | if (kvm_apic_match_dest(vcpu, NULL, 0, |
139 | irqe.vector = e->fields.vector; | 211 | e->fields.dest_id, e->fields.dest_mode)) { |
140 | irqe.dest_mode = e->fields.dest_mode; | 212 | __set_bit(e->fields.vector, |
141 | irqe.delivery_mode = e->fields.delivery_mode << 8; | 213 | (unsigned long *)eoi_exit_bitmap); |
142 | kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap); | 214 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
215 | __set_bit(e->fields.vector, | ||
216 | (unsigned long *)tmr); | ||
217 | } | ||
143 | } | 218 | } |
144 | } | 219 | } |
145 | spin_unlock(&ioapic->lock); | 220 | spin_unlock(&ioapic->lock); |
146 | } | 221 | } |
147 | EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap); | ||
148 | 222 | ||
149 | void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm) | 223 | #ifdef CONFIG_X86 |
224 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) | ||
150 | { | 225 | { |
151 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 226 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
152 | 227 | ||
153 | if (!kvm_apic_vid_enabled(kvm) || !ioapic) | 228 | if (!ioapic) |
154 | return; | 229 | return; |
155 | kvm_make_update_eoibitmap_request(kvm); | 230 | kvm_make_scan_ioapic_request(kvm); |
156 | } | 231 | } |
232 | #else | ||
233 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) | ||
234 | { | ||
235 | return; | ||
236 | } | ||
237 | #endif | ||
157 | 238 | ||
158 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | 239 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
159 | { | 240 | { |
@@ -195,16 +276,17 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
195 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); | 276 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); |
196 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG | 277 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG |
197 | && ioapic->irr & (1 << index)) | 278 | && ioapic->irr & (1 << index)) |
198 | ioapic_service(ioapic, index); | 279 | ioapic_service(ioapic, index, false); |
199 | kvm_ioapic_make_eoibitmap_request(ioapic->kvm); | 280 | kvm_vcpu_request_scan_ioapic(ioapic->kvm); |
200 | break; | 281 | break; |
201 | } | 282 | } |
202 | } | 283 | } |
203 | 284 | ||
204 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | 285 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status) |
205 | { | 286 | { |
206 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; | 287 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; |
207 | struct kvm_lapic_irq irqe; | 288 | struct kvm_lapic_irq irqe; |
289 | int ret; | ||
208 | 290 | ||
209 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | 291 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " |
210 | "vector=%x trig_mode=%x\n", | 292 | "vector=%x trig_mode=%x\n", |
@@ -220,11 +302,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
220 | irqe.level = 1; | 302 | irqe.level = 1; |
221 | irqe.shorthand = 0; | 303 | irqe.shorthand = 0; |
222 | 304 | ||
223 | return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); | 305 | if (irq == RTC_GSI && line_status) { |
306 | BUG_ON(ioapic->rtc_status.pending_eoi != 0); | ||
307 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, | ||
308 | ioapic->rtc_status.dest_map); | ||
309 | ioapic->rtc_status.pending_eoi = ret; | ||
310 | } else | ||
311 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); | ||
312 | |||
313 | return ret; | ||
224 | } | 314 | } |
225 | 315 | ||
226 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | 316 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, |
227 | int level) | 317 | int level, bool line_status) |
228 | { | 318 | { |
229 | u32 old_irr; | 319 | u32 old_irr; |
230 | u32 mask = 1 << irq; | 320 | u32 mask = 1 << irq; |
@@ -244,13 +334,20 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | |||
244 | ret = 1; | 334 | ret = 1; |
245 | } else { | 335 | } else { |
246 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | 336 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); |
337 | |||
338 | if (irq == RTC_GSI && line_status && | ||
339 | rtc_irq_check_coalesced(ioapic)) { | ||
340 | ret = 0; /* coalesced */ | ||
341 | goto out; | ||
342 | } | ||
247 | ioapic->irr |= mask; | 343 | ioapic->irr |= mask; |
248 | if ((edge && old_irr != ioapic->irr) || | 344 | if ((edge && old_irr != ioapic->irr) || |
249 | (!edge && !entry.fields.remote_irr)) | 345 | (!edge && !entry.fields.remote_irr)) |
250 | ret = ioapic_service(ioapic, irq); | 346 | ret = ioapic_service(ioapic, irq, line_status); |
251 | else | 347 | else |
252 | ret = 0; /* report coalesced interrupt */ | 348 | ret = 0; /* report coalesced interrupt */ |
253 | } | 349 | } |
350 | out: | ||
254 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | 351 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); |
255 | spin_unlock(&ioapic->lock); | 352 | spin_unlock(&ioapic->lock); |
256 | 353 | ||
@@ -267,8 +364,8 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id) | |||
267 | spin_unlock(&ioapic->lock); | 364 | spin_unlock(&ioapic->lock); |
268 | } | 365 | } |
269 | 366 | ||
270 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | 367 | static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, |
271 | int trigger_mode) | 368 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) |
272 | { | 369 | { |
273 | int i; | 370 | int i; |
274 | 371 | ||
@@ -278,6 +375,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | |||
278 | if (ent->fields.vector != vector) | 375 | if (ent->fields.vector != vector) |
279 | continue; | 376 | continue; |
280 | 377 | ||
378 | if (i == RTC_GSI) | ||
379 | rtc_irq_eoi(ioapic, vcpu); | ||
281 | /* | 380 | /* |
282 | * We are dropping lock while calling ack notifiers because ack | 381 | * We are dropping lock while calling ack notifiers because ack |
283 | * notifier callbacks for assigned devices call into IOAPIC | 382 | * notifier callbacks for assigned devices call into IOAPIC |
@@ -296,7 +395,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, | |||
296 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 395 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
297 | ent->fields.remote_irr = 0; | 396 | ent->fields.remote_irr = 0; |
298 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) | 397 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) |
299 | ioapic_service(ioapic, i); | 398 | ioapic_service(ioapic, i, false); |
300 | } | 399 | } |
301 | } | 400 | } |
302 | 401 | ||
@@ -307,12 +406,12 @@ bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector) | |||
307 | return test_bit(vector, ioapic->handled_vectors); | 406 | return test_bit(vector, ioapic->handled_vectors); |
308 | } | 407 | } |
309 | 408 | ||
310 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) | 409 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) |
311 | { | 410 | { |
312 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 411 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; |
313 | 412 | ||
314 | spin_lock(&ioapic->lock); | 413 | spin_lock(&ioapic->lock); |
315 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); | 414 | __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode); |
316 | spin_unlock(&ioapic->lock); | 415 | spin_unlock(&ioapic->lock); |
317 | } | 416 | } |
318 | 417 | ||
@@ -410,7 +509,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
410 | break; | 509 | break; |
411 | #ifdef CONFIG_IA64 | 510 | #ifdef CONFIG_IA64 |
412 | case IOAPIC_REG_EOI: | 511 | case IOAPIC_REG_EOI: |
413 | __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); | 512 | __kvm_ioapic_update_eoi(NULL, ioapic, data, IOAPIC_LEVEL_TRIG); |
414 | break; | 513 | break; |
415 | #endif | 514 | #endif |
416 | 515 | ||
@@ -431,6 +530,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | |||
431 | ioapic->ioregsel = 0; | 530 | ioapic->ioregsel = 0; |
432 | ioapic->irr = 0; | 531 | ioapic->irr = 0; |
433 | ioapic->id = 0; | 532 | ioapic->id = 0; |
533 | rtc_irq_eoi_tracking_reset(ioapic); | ||
434 | update_handled_vectors(ioapic); | 534 | update_handled_vectors(ioapic); |
435 | } | 535 | } |
436 | 536 | ||
@@ -496,7 +596,8 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | |||
496 | spin_lock(&ioapic->lock); | 596 | spin_lock(&ioapic->lock); |
497 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | 597 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); |
498 | update_handled_vectors(ioapic); | 598 | update_handled_vectors(ioapic); |
499 | kvm_ioapic_make_eoibitmap_request(kvm); | 599 | kvm_vcpu_request_scan_ioapic(kvm); |
600 | kvm_rtc_eoi_tracking_restore_all(ioapic); | ||
500 | spin_unlock(&ioapic->lock); | 601 | spin_unlock(&ioapic->lock); |
501 | return 0; | 602 | return 0; |
502 | } | 603 | } |
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 0400a466c50c..615d8c995c3c 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -34,6 +34,17 @@ struct kvm_vcpu; | |||
34 | #define IOAPIC_INIT 0x5 | 34 | #define IOAPIC_INIT 0x5 |
35 | #define IOAPIC_EXTINT 0x7 | 35 | #define IOAPIC_EXTINT 0x7 |
36 | 36 | ||
37 | #ifdef CONFIG_X86 | ||
38 | #define RTC_GSI 8 | ||
39 | #else | ||
40 | #define RTC_GSI -1U | ||
41 | #endif | ||
42 | |||
43 | struct rtc_status { | ||
44 | int pending_eoi; | ||
45 | DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS); | ||
46 | }; | ||
47 | |||
37 | struct kvm_ioapic { | 48 | struct kvm_ioapic { |
38 | u64 base_address; | 49 | u64 base_address; |
39 | u32 ioregsel; | 50 | u32 ioregsel; |
@@ -47,6 +58,7 @@ struct kvm_ioapic { | |||
47 | void (*ack_notifier)(void *opaque, int irq); | 58 | void (*ack_notifier)(void *opaque, int irq); |
48 | spinlock_t lock; | 59 | spinlock_t lock; |
49 | DECLARE_BITMAP(handled_vectors, 256); | 60 | DECLARE_BITMAP(handled_vectors, 256); |
61 | struct rtc_status rtc_status; | ||
50 | }; | 62 | }; |
51 | 63 | ||
52 | #ifdef DEBUG | 64 | #ifdef DEBUG |
@@ -67,24 +79,25 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) | |||
67 | return kvm->arch.vioapic; | 79 | return kvm->arch.vioapic; |
68 | } | 80 | } |
69 | 81 | ||
82 | void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu); | ||
70 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | 83 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
71 | int short_hand, int dest, int dest_mode); | 84 | int short_hand, int dest, int dest_mode); |
72 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); | 85 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); |
73 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); | 86 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, |
87 | int trigger_mode); | ||
74 | bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector); | 88 | bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector); |
75 | int kvm_ioapic_init(struct kvm *kvm); | 89 | int kvm_ioapic_init(struct kvm *kvm); |
76 | void kvm_ioapic_destroy(struct kvm *kvm); | 90 | void kvm_ioapic_destroy(struct kvm *kvm); |
77 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | 91 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, |
78 | int level); | 92 | int level, bool line_status); |
79 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); | 93 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); |
80 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | 94 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); |
81 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 95 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
82 | struct kvm_lapic_irq *irq); | 96 | struct kvm_lapic_irq *irq, unsigned long *dest_map); |
83 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | 97 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); |
84 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | 98 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); |
85 | void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm); | 99 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); |
86 | void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, | 100 | void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, |
87 | u64 *eoi_exit_bitmap); | 101 | u32 *tmr); |
88 | |||
89 | 102 | ||
90 | #endif | 103 | #endif |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index e9073cf4d040..e2e6b4473a96 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -35,7 +35,8 @@ | |||
35 | #include "ioapic.h" | 35 | #include "ioapic.h" |
36 | 36 | ||
37 | static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, | 37 | static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, |
38 | struct kvm *kvm, int irq_source_id, int level) | 38 | struct kvm *kvm, int irq_source_id, int level, |
39 | bool line_status) | ||
39 | { | 40 | { |
40 | #ifdef CONFIG_X86 | 41 | #ifdef CONFIG_X86 |
41 | struct kvm_pic *pic = pic_irqchip(kvm); | 42 | struct kvm_pic *pic = pic_irqchip(kvm); |
@@ -46,10 +47,12 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, | |||
46 | } | 47 | } |
47 | 48 | ||
48 | static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, | 49 | static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, |
49 | struct kvm *kvm, int irq_source_id, int level) | 50 | struct kvm *kvm, int irq_source_id, int level, |
51 | bool line_status) | ||
50 | { | 52 | { |
51 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 53 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
52 | return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level); | 54 | return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, |
55 | line_status); | ||
53 | } | 56 | } |
54 | 57 | ||
55 | inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) | 58 | inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) |
@@ -63,7 +66,7 @@ inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) | |||
63 | } | 66 | } |
64 | 67 | ||
65 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 68 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
66 | struct kvm_lapic_irq *irq) | 69 | struct kvm_lapic_irq *irq, unsigned long *dest_map) |
67 | { | 70 | { |
68 | int i, r = -1; | 71 | int i, r = -1; |
69 | struct kvm_vcpu *vcpu, *lowest = NULL; | 72 | struct kvm_vcpu *vcpu, *lowest = NULL; |
@@ -74,7 +77,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | |||
74 | irq->delivery_mode = APIC_DM_FIXED; | 77 | irq->delivery_mode = APIC_DM_FIXED; |
75 | } | 78 | } |
76 | 79 | ||
77 | if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r)) | 80 | if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) |
78 | return r; | 81 | return r; |
79 | 82 | ||
80 | kvm_for_each_vcpu(i, vcpu, kvm) { | 83 | kvm_for_each_vcpu(i, vcpu, kvm) { |
@@ -88,7 +91,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | |||
88 | if (!kvm_is_dm_lowest_prio(irq)) { | 91 | if (!kvm_is_dm_lowest_prio(irq)) { |
89 | if (r < 0) | 92 | if (r < 0) |
90 | r = 0; | 93 | r = 0; |
91 | r += kvm_apic_set_irq(vcpu, irq); | 94 | r += kvm_apic_set_irq(vcpu, irq, dest_map); |
92 | } else if (kvm_lapic_enabled(vcpu)) { | 95 | } else if (kvm_lapic_enabled(vcpu)) { |
93 | if (!lowest) | 96 | if (!lowest) |
94 | lowest = vcpu; | 97 | lowest = vcpu; |
@@ -98,7 +101,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | |||
98 | } | 101 | } |
99 | 102 | ||
100 | if (lowest) | 103 | if (lowest) |
101 | r = kvm_apic_set_irq(lowest, irq); | 104 | r = kvm_apic_set_irq(lowest, irq, dest_map); |
102 | 105 | ||
103 | return r; | 106 | return r; |
104 | } | 107 | } |
@@ -121,7 +124,7 @@ static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e, | |||
121 | } | 124 | } |
122 | 125 | ||
123 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | 126 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, |
124 | struct kvm *kvm, int irq_source_id, int level) | 127 | struct kvm *kvm, int irq_source_id, int level, bool line_status) |
125 | { | 128 | { |
126 | struct kvm_lapic_irq irq; | 129 | struct kvm_lapic_irq irq; |
127 | 130 | ||
@@ -130,7 +133,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | |||
130 | 133 | ||
131 | kvm_set_msi_irq(e, &irq); | 134 | kvm_set_msi_irq(e, &irq); |
132 | 135 | ||
133 | return kvm_irq_delivery_to_apic(kvm, NULL, &irq); | 136 | return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); |
134 | } | 137 | } |
135 | 138 | ||
136 | 139 | ||
@@ -142,63 +145,12 @@ static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e, | |||
142 | 145 | ||
143 | kvm_set_msi_irq(e, &irq); | 146 | kvm_set_msi_irq(e, &irq); |
144 | 147 | ||
145 | if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r)) | 148 | if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) |
146 | return r; | 149 | return r; |
147 | else | 150 | else |
148 | return -EWOULDBLOCK; | 151 | return -EWOULDBLOCK; |
149 | } | 152 | } |
150 | 153 | ||
151 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) | ||
152 | { | ||
153 | struct kvm_kernel_irq_routing_entry route; | ||
154 | |||
155 | if (!irqchip_in_kernel(kvm) || msi->flags != 0) | ||
156 | return -EINVAL; | ||
157 | |||
158 | route.msi.address_lo = msi->address_lo; | ||
159 | route.msi.address_hi = msi->address_hi; | ||
160 | route.msi.data = msi->data; | ||
161 | |||
162 | return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Return value: | ||
167 | * < 0 Interrupt was ignored (masked or not delivered for other reasons) | ||
168 | * = 0 Interrupt was coalesced (previous irq is still pending) | ||
169 | * > 0 Number of CPUs interrupt was delivered to | ||
170 | */ | ||
171 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) | ||
172 | { | ||
173 | struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; | ||
174 | int ret = -1, i = 0; | ||
175 | struct kvm_irq_routing_table *irq_rt; | ||
176 | |||
177 | trace_kvm_set_irq(irq, level, irq_source_id); | ||
178 | |||
179 | /* Not possible to detect if the guest uses the PIC or the | ||
180 | * IOAPIC. So set the bit in both. The guest will ignore | ||
181 | * writes to the unused one. | ||
182 | */ | ||
183 | rcu_read_lock(); | ||
184 | irq_rt = rcu_dereference(kvm->irq_routing); | ||
185 | if (irq < irq_rt->nr_rt_entries) | ||
186 | hlist_for_each_entry(e, &irq_rt->map[irq], link) | ||
187 | irq_set[i++] = *e; | ||
188 | rcu_read_unlock(); | ||
189 | |||
190 | while(i--) { | ||
191 | int r; | ||
192 | r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level); | ||
193 | if (r < 0) | ||
194 | continue; | ||
195 | |||
196 | ret = r + ((ret < 0) ? 0 : ret); | ||
197 | } | ||
198 | |||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | /* | 154 | /* |
203 | * Deliver an IRQ in an atomic context if we can, or return a failure, | 155 | * Deliver an IRQ in an atomic context if we can, or return a failure, |
204 | * user can retry in a process context. | 156 | * user can retry in a process context. |
@@ -236,63 +188,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) | |||
236 | return ret; | 188 | return ret; |
237 | } | 189 | } |
238 | 190 | ||
239 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) | ||
240 | { | ||
241 | struct kvm_irq_ack_notifier *kian; | ||
242 | int gsi; | ||
243 | |||
244 | rcu_read_lock(); | ||
245 | gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; | ||
246 | if (gsi != -1) | ||
247 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, | ||
248 | link) | ||
249 | if (kian->gsi == gsi) { | ||
250 | rcu_read_unlock(); | ||
251 | return true; | ||
252 | } | ||
253 | |||
254 | rcu_read_unlock(); | ||
255 | |||
256 | return false; | ||
257 | } | ||
258 | EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); | ||
259 | |||
260 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) | ||
261 | { | ||
262 | struct kvm_irq_ack_notifier *kian; | ||
263 | int gsi; | ||
264 | |||
265 | trace_kvm_ack_irq(irqchip, pin); | ||
266 | |||
267 | rcu_read_lock(); | ||
268 | gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; | ||
269 | if (gsi != -1) | ||
270 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, | ||
271 | link) | ||
272 | if (kian->gsi == gsi) | ||
273 | kian->irq_acked(kian); | ||
274 | rcu_read_unlock(); | ||
275 | } | ||
276 | |||
277 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | ||
278 | struct kvm_irq_ack_notifier *kian) | ||
279 | { | ||
280 | mutex_lock(&kvm->irq_lock); | ||
281 | hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); | ||
282 | mutex_unlock(&kvm->irq_lock); | ||
283 | kvm_ioapic_make_eoibitmap_request(kvm); | ||
284 | } | ||
285 | |||
286 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | ||
287 | struct kvm_irq_ack_notifier *kian) | ||
288 | { | ||
289 | mutex_lock(&kvm->irq_lock); | ||
290 | hlist_del_init_rcu(&kian->link); | ||
291 | mutex_unlock(&kvm->irq_lock); | ||
292 | synchronize_rcu(); | ||
293 | kvm_ioapic_make_eoibitmap_request(kvm); | ||
294 | } | ||
295 | |||
296 | int kvm_request_irq_source_id(struct kvm *kvm) | 191 | int kvm_request_irq_source_id(struct kvm *kvm) |
297 | { | 192 | { |
298 | unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; | 193 | unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; |
@@ -376,34 +271,14 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | |||
376 | rcu_read_unlock(); | 271 | rcu_read_unlock(); |
377 | } | 272 | } |
378 | 273 | ||
379 | void kvm_free_irq_routing(struct kvm *kvm) | 274 | int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, |
380 | { | 275 | struct kvm_kernel_irq_routing_entry *e, |
381 | /* Called only during vm destruction. Nobody can use the pointer | 276 | const struct kvm_irq_routing_entry *ue) |
382 | at this stage */ | ||
383 | kfree(kvm->irq_routing); | ||
384 | } | ||
385 | |||
386 | static int setup_routing_entry(struct kvm_irq_routing_table *rt, | ||
387 | struct kvm_kernel_irq_routing_entry *e, | ||
388 | const struct kvm_irq_routing_entry *ue) | ||
389 | { | 277 | { |
390 | int r = -EINVAL; | 278 | int r = -EINVAL; |
391 | int delta; | 279 | int delta; |
392 | unsigned max_pin; | 280 | unsigned max_pin; |
393 | struct kvm_kernel_irq_routing_entry *ei; | ||
394 | 281 | ||
395 | /* | ||
396 | * Do not allow GSI to be mapped to the same irqchip more than once. | ||
397 | * Allow only one to one mapping between GSI and MSI. | ||
398 | */ | ||
399 | hlist_for_each_entry(ei, &rt->map[ue->gsi], link) | ||
400 | if (ei->type == KVM_IRQ_ROUTING_MSI || | ||
401 | ue->type == KVM_IRQ_ROUTING_MSI || | ||
402 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) | ||
403 | return r; | ||
404 | |||
405 | e->gsi = ue->gsi; | ||
406 | e->type = ue->type; | ||
407 | switch (ue->type) { | 282 | switch (ue->type) { |
408 | case KVM_IRQ_ROUTING_IRQCHIP: | 283 | case KVM_IRQ_ROUTING_IRQCHIP: |
409 | delta = 0; | 284 | delta = 0; |
@@ -440,69 +315,11 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, | |||
440 | goto out; | 315 | goto out; |
441 | } | 316 | } |
442 | 317 | ||
443 | hlist_add_head(&e->link, &rt->map[e->gsi]); | ||
444 | r = 0; | 318 | r = 0; |
445 | out: | 319 | out: |
446 | return r; | 320 | return r; |
447 | } | 321 | } |
448 | 322 | ||
449 | |||
450 | int kvm_set_irq_routing(struct kvm *kvm, | ||
451 | const struct kvm_irq_routing_entry *ue, | ||
452 | unsigned nr, | ||
453 | unsigned flags) | ||
454 | { | ||
455 | struct kvm_irq_routing_table *new, *old; | ||
456 | u32 i, j, nr_rt_entries = 0; | ||
457 | int r; | ||
458 | |||
459 | for (i = 0; i < nr; ++i) { | ||
460 | if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES) | ||
461 | return -EINVAL; | ||
462 | nr_rt_entries = max(nr_rt_entries, ue[i].gsi); | ||
463 | } | ||
464 | |||
465 | nr_rt_entries += 1; | ||
466 | |||
467 | new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)) | ||
468 | + (nr * sizeof(struct kvm_kernel_irq_routing_entry)), | ||
469 | GFP_KERNEL); | ||
470 | |||
471 | if (!new) | ||
472 | return -ENOMEM; | ||
473 | |||
474 | new->rt_entries = (void *)&new->map[nr_rt_entries]; | ||
475 | |||
476 | new->nr_rt_entries = nr_rt_entries; | ||
477 | for (i = 0; i < 3; i++) | ||
478 | for (j = 0; j < KVM_IOAPIC_NUM_PINS; j++) | ||
479 | new->chip[i][j] = -1; | ||
480 | |||
481 | for (i = 0; i < nr; ++i) { | ||
482 | r = -EINVAL; | ||
483 | if (ue->flags) | ||
484 | goto out; | ||
485 | r = setup_routing_entry(new, &new->rt_entries[i], ue); | ||
486 | if (r) | ||
487 | goto out; | ||
488 | ++ue; | ||
489 | } | ||
490 | |||
491 | mutex_lock(&kvm->irq_lock); | ||
492 | old = kvm->irq_routing; | ||
493 | kvm_irq_routing_update(kvm, new); | ||
494 | mutex_unlock(&kvm->irq_lock); | ||
495 | |||
496 | synchronize_rcu(); | ||
497 | |||
498 | new = old; | ||
499 | r = 0; | ||
500 | |||
501 | out: | ||
502 | kfree(new); | ||
503 | return r; | ||
504 | } | ||
505 | |||
506 | #define IOAPIC_ROUTING_ENTRY(irq) \ | 323 | #define IOAPIC_ROUTING_ENTRY(irq) \ |
507 | { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ | 324 | { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ |
508 | .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } | 325 | .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } |
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c new file mode 100644 index 000000000000..20dc9e4a8f6c --- /dev/null +++ b/virt/kvm/irqchip.c | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * irqchip.c: Common API for in kernel interrupt controllers | ||
3 | * Copyright (c) 2007, Intel Corporation. | ||
4 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | ||
5 | * Copyright (c) 2013, Alexander Graf <agraf@suse.de> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms and conditions of the GNU General Public License, | ||
9 | * version 2, as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
18 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
19 | * | ||
20 | * This file is derived from virt/kvm/irq_comm.c. | ||
21 | * | ||
22 | * Authors: | ||
23 | * Yaozu (Eddie) Dong <Eddie.dong@intel.com> | ||
24 | * Alexander Graf <agraf@suse.de> | ||
25 | */ | ||
26 | |||
27 | #include <linux/kvm_host.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/export.h> | ||
30 | #include <trace/events/kvm.h> | ||
31 | #include "irq.h" | ||
32 | |||
33 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) | ||
34 | { | ||
35 | struct kvm_irq_ack_notifier *kian; | ||
36 | int gsi; | ||
37 | |||
38 | rcu_read_lock(); | ||
39 | gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; | ||
40 | if (gsi != -1) | ||
41 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, | ||
42 | link) | ||
43 | if (kian->gsi == gsi) { | ||
44 | rcu_read_unlock(); | ||
45 | return true; | ||
46 | } | ||
47 | |||
48 | rcu_read_unlock(); | ||
49 | |||
50 | return false; | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); | ||
53 | |||
54 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) | ||
55 | { | ||
56 | struct kvm_irq_ack_notifier *kian; | ||
57 | int gsi; | ||
58 | |||
59 | trace_kvm_ack_irq(irqchip, pin); | ||
60 | |||
61 | rcu_read_lock(); | ||
62 | gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; | ||
63 | if (gsi != -1) | ||
64 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, | ||
65 | link) | ||
66 | if (kian->gsi == gsi) | ||
67 | kian->irq_acked(kian); | ||
68 | rcu_read_unlock(); | ||
69 | } | ||
70 | |||
71 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | ||
72 | struct kvm_irq_ack_notifier *kian) | ||
73 | { | ||
74 | mutex_lock(&kvm->irq_lock); | ||
75 | hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); | ||
76 | mutex_unlock(&kvm->irq_lock); | ||
77 | #ifdef __KVM_HAVE_IOAPIC | ||
78 | kvm_vcpu_request_scan_ioapic(kvm); | ||
79 | #endif | ||
80 | } | ||
81 | |||
82 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | ||
83 | struct kvm_irq_ack_notifier *kian) | ||
84 | { | ||
85 | mutex_lock(&kvm->irq_lock); | ||
86 | hlist_del_init_rcu(&kian->link); | ||
87 | mutex_unlock(&kvm->irq_lock); | ||
88 | synchronize_rcu(); | ||
89 | #ifdef __KVM_HAVE_IOAPIC | ||
90 | kvm_vcpu_request_scan_ioapic(kvm); | ||
91 | #endif | ||
92 | } | ||
93 | |||
94 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) | ||
95 | { | ||
96 | struct kvm_kernel_irq_routing_entry route; | ||
97 | |||
98 | if (!irqchip_in_kernel(kvm) || msi->flags != 0) | ||
99 | return -EINVAL; | ||
100 | |||
101 | route.msi.address_lo = msi->address_lo; | ||
102 | route.msi.address_hi = msi->address_hi; | ||
103 | route.msi.data = msi->data; | ||
104 | |||
105 | return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Return value: | ||
110 | * < 0 Interrupt was ignored (masked or not delivered for other reasons) | ||
111 | * = 0 Interrupt was coalesced (previous irq is still pending) | ||
112 | * > 0 Number of CPUs interrupt was delivered to | ||
113 | */ | ||
114 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, | ||
115 | bool line_status) | ||
116 | { | ||
117 | struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; | ||
118 | int ret = -1, i = 0; | ||
119 | struct kvm_irq_routing_table *irq_rt; | ||
120 | |||
121 | trace_kvm_set_irq(irq, level, irq_source_id); | ||
122 | |||
123 | /* Not possible to detect if the guest uses the PIC or the | ||
124 | * IOAPIC. So set the bit in both. The guest will ignore | ||
125 | * writes to the unused one. | ||
126 | */ | ||
127 | rcu_read_lock(); | ||
128 | irq_rt = rcu_dereference(kvm->irq_routing); | ||
129 | if (irq < irq_rt->nr_rt_entries) | ||
130 | hlist_for_each_entry(e, &irq_rt->map[irq], link) | ||
131 | irq_set[i++] = *e; | ||
132 | rcu_read_unlock(); | ||
133 | |||
134 | while(i--) { | ||
135 | int r; | ||
136 | r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level, | ||
137 | line_status); | ||
138 | if (r < 0) | ||
139 | continue; | ||
140 | |||
141 | ret = r + ((ret < 0) ? 0 : ret); | ||
142 | } | ||
143 | |||
144 | return ret; | ||
145 | } | ||
146 | |||
147 | void kvm_free_irq_routing(struct kvm *kvm) | ||
148 | { | ||
149 | /* Called only during vm destruction. Nobody can use the pointer | ||
150 | at this stage */ | ||
151 | kfree(kvm->irq_routing); | ||
152 | } | ||
153 | |||
154 | static int setup_routing_entry(struct kvm_irq_routing_table *rt, | ||
155 | struct kvm_kernel_irq_routing_entry *e, | ||
156 | const struct kvm_irq_routing_entry *ue) | ||
157 | { | ||
158 | int r = -EINVAL; | ||
159 | struct kvm_kernel_irq_routing_entry *ei; | ||
160 | |||
161 | /* | ||
162 | * Do not allow GSI to be mapped to the same irqchip more than once. | ||
163 | * Allow only one to one mapping between GSI and MSI. | ||
164 | */ | ||
165 | hlist_for_each_entry(ei, &rt->map[ue->gsi], link) | ||
166 | if (ei->type == KVM_IRQ_ROUTING_MSI || | ||
167 | ue->type == KVM_IRQ_ROUTING_MSI || | ||
168 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) | ||
169 | return r; | ||
170 | |||
171 | e->gsi = ue->gsi; | ||
172 | e->type = ue->type; | ||
173 | r = kvm_set_routing_entry(rt, e, ue); | ||
174 | if (r) | ||
175 | goto out; | ||
176 | |||
177 | hlist_add_head(&e->link, &rt->map[e->gsi]); | ||
178 | r = 0; | ||
179 | out: | ||
180 | return r; | ||
181 | } | ||
182 | |||
183 | int kvm_set_irq_routing(struct kvm *kvm, | ||
184 | const struct kvm_irq_routing_entry *ue, | ||
185 | unsigned nr, | ||
186 | unsigned flags) | ||
187 | { | ||
188 | struct kvm_irq_routing_table *new, *old; | ||
189 | u32 i, j, nr_rt_entries = 0; | ||
190 | int r; | ||
191 | |||
192 | for (i = 0; i < nr; ++i) { | ||
193 | if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES) | ||
194 | return -EINVAL; | ||
195 | nr_rt_entries = max(nr_rt_entries, ue[i].gsi); | ||
196 | } | ||
197 | |||
198 | nr_rt_entries += 1; | ||
199 | |||
200 | new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)) | ||
201 | + (nr * sizeof(struct kvm_kernel_irq_routing_entry)), | ||
202 | GFP_KERNEL); | ||
203 | |||
204 | if (!new) | ||
205 | return -ENOMEM; | ||
206 | |||
207 | new->rt_entries = (void *)&new->map[nr_rt_entries]; | ||
208 | |||
209 | new->nr_rt_entries = nr_rt_entries; | ||
210 | for (i = 0; i < KVM_NR_IRQCHIPS; i++) | ||
211 | for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++) | ||
212 | new->chip[i][j] = -1; | ||
213 | |||
214 | for (i = 0; i < nr; ++i) { | ||
215 | r = -EINVAL; | ||
216 | if (ue->flags) | ||
217 | goto out; | ||
218 | r = setup_routing_entry(new, &new->rt_entries[i], ue); | ||
219 | if (r) | ||
220 | goto out; | ||
221 | ++ue; | ||
222 | } | ||
223 | |||
224 | mutex_lock(&kvm->irq_lock); | ||
225 | old = kvm->irq_routing; | ||
226 | kvm_irq_routing_update(kvm, new); | ||
227 | mutex_unlock(&kvm->irq_lock); | ||
228 | |||
229 | synchronize_rcu(); | ||
230 | |||
231 | new = old; | ||
232 | r = 0; | ||
233 | |||
234 | out: | ||
235 | kfree(new); | ||
236 | return r; | ||
237 | } | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f18013f09e68..45f09362ee7b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -217,9 +217,9 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm) | |||
217 | make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); | 217 | make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); |
218 | } | 218 | } |
219 | 219 | ||
220 | void kvm_make_update_eoibitmap_request(struct kvm *kvm) | 220 | void kvm_make_scan_ioapic_request(struct kvm *kvm) |
221 | { | 221 | { |
222 | make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP); | 222 | make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); |
223 | } | 223 | } |
224 | 224 | ||
225 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | 225 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
@@ -244,6 +244,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | |||
244 | 244 | ||
245 | kvm_vcpu_set_in_spin_loop(vcpu, false); | 245 | kvm_vcpu_set_in_spin_loop(vcpu, false); |
246 | kvm_vcpu_set_dy_eligible(vcpu, false); | 246 | kvm_vcpu_set_dy_eligible(vcpu, false); |
247 | vcpu->preempted = false; | ||
247 | 248 | ||
248 | r = kvm_arch_vcpu_init(vcpu); | 249 | r = kvm_arch_vcpu_init(vcpu); |
249 | if (r < 0) | 250 | if (r < 0) |
@@ -503,6 +504,7 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
503 | mutex_init(&kvm->irq_lock); | 504 | mutex_init(&kvm->irq_lock); |
504 | mutex_init(&kvm->slots_lock); | 505 | mutex_init(&kvm->slots_lock); |
505 | atomic_set(&kvm->users_count, 1); | 506 | atomic_set(&kvm->users_count, 1); |
507 | INIT_LIST_HEAD(&kvm->devices); | ||
506 | 508 | ||
507 | r = kvm_init_mmu_notifier(kvm); | 509 | r = kvm_init_mmu_notifier(kvm); |
508 | if (r) | 510 | if (r) |
@@ -580,6 +582,19 @@ void kvm_free_physmem(struct kvm *kvm) | |||
580 | kfree(kvm->memslots); | 582 | kfree(kvm->memslots); |
581 | } | 583 | } |
582 | 584 | ||
585 | static void kvm_destroy_devices(struct kvm *kvm) | ||
586 | { | ||
587 | struct list_head *node, *tmp; | ||
588 | |||
589 | list_for_each_safe(node, tmp, &kvm->devices) { | ||
590 | struct kvm_device *dev = | ||
591 | list_entry(node, struct kvm_device, vm_node); | ||
592 | |||
593 | list_del(node); | ||
594 | dev->ops->destroy(dev); | ||
595 | } | ||
596 | } | ||
597 | |||
583 | static void kvm_destroy_vm(struct kvm *kvm) | 598 | static void kvm_destroy_vm(struct kvm *kvm) |
584 | { | 599 | { |
585 | int i; | 600 | int i; |
@@ -599,6 +614,7 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
599 | kvm_arch_flush_shadow_all(kvm); | 614 | kvm_arch_flush_shadow_all(kvm); |
600 | #endif | 615 | #endif |
601 | kvm_arch_destroy_vm(kvm); | 616 | kvm_arch_destroy_vm(kvm); |
617 | kvm_destroy_devices(kvm); | ||
602 | kvm_free_physmem(kvm); | 618 | kvm_free_physmem(kvm); |
603 | cleanup_srcu_struct(&kvm->srcu); | 619 | cleanup_srcu_struct(&kvm->srcu); |
604 | kvm_arch_free_vm(kvm); | 620 | kvm_arch_free_vm(kvm); |
@@ -719,24 +735,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, | |||
719 | } | 735 | } |
720 | 736 | ||
721 | /* | 737 | /* |
722 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: | ||
723 | * - create a new memory slot | ||
724 | * - delete an existing memory slot | ||
725 | * - modify an existing memory slot | ||
726 | * -- move it in the guest physical memory space | ||
727 | * -- just change its flags | ||
728 | * | ||
729 | * Since flags can be changed by some of these operations, the following | ||
730 | * differentiation is the best we can do for __kvm_set_memory_region(): | ||
731 | */ | ||
732 | enum kvm_mr_change { | ||
733 | KVM_MR_CREATE, | ||
734 | KVM_MR_DELETE, | ||
735 | KVM_MR_MOVE, | ||
736 | KVM_MR_FLAGS_ONLY, | ||
737 | }; | ||
738 | |||
739 | /* | ||
740 | * Allocate some memory and give it an address in the guest physical address | 738 | * Allocate some memory and give it an address in the guest physical address |
741 | * space. | 739 | * space. |
742 | * | 740 | * |
@@ -745,8 +743,7 @@ enum kvm_mr_change { | |||
745 | * Must be called holding mmap_sem for write. | 743 | * Must be called holding mmap_sem for write. |
746 | */ | 744 | */ |
747 | int __kvm_set_memory_region(struct kvm *kvm, | 745 | int __kvm_set_memory_region(struct kvm *kvm, |
748 | struct kvm_userspace_memory_region *mem, | 746 | struct kvm_userspace_memory_region *mem) |
749 | bool user_alloc) | ||
750 | { | 747 | { |
751 | int r; | 748 | int r; |
752 | gfn_t base_gfn; | 749 | gfn_t base_gfn; |
@@ -767,7 +764,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
767 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) | 764 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
768 | goto out; | 765 | goto out; |
769 | /* We can read the guest memory with __xxx_user() later on. */ | 766 | /* We can read the guest memory with __xxx_user() later on. */ |
770 | if (user_alloc && | 767 | if ((mem->slot < KVM_USER_MEM_SLOTS) && |
771 | ((mem->userspace_addr & (PAGE_SIZE - 1)) || | 768 | ((mem->userspace_addr & (PAGE_SIZE - 1)) || |
772 | !access_ok(VERIFY_WRITE, | 769 | !access_ok(VERIFY_WRITE, |
773 | (void __user *)(unsigned long)mem->userspace_addr, | 770 | (void __user *)(unsigned long)mem->userspace_addr, |
@@ -875,7 +872,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
875 | slots = old_memslots; | 872 | slots = old_memslots; |
876 | } | 873 | } |
877 | 874 | ||
878 | r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); | 875 | r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); |
879 | if (r) | 876 | if (r) |
880 | goto out_slots; | 877 | goto out_slots; |
881 | 878 | ||
@@ -915,7 +912,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
915 | 912 | ||
916 | old_memslots = install_new_memslots(kvm, slots, &new); | 913 | old_memslots = install_new_memslots(kvm, slots, &new); |
917 | 914 | ||
918 | kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); | 915 | kvm_arch_commit_memory_region(kvm, mem, &old, change); |
919 | 916 | ||
920 | kvm_free_physmem_slot(&old, &new); | 917 | kvm_free_physmem_slot(&old, &new); |
921 | kfree(old_memslots); | 918 | kfree(old_memslots); |
@@ -932,26 +929,23 @@ out: | |||
932 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); | 929 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); |
933 | 930 | ||
934 | int kvm_set_memory_region(struct kvm *kvm, | 931 | int kvm_set_memory_region(struct kvm *kvm, |
935 | struct kvm_userspace_memory_region *mem, | 932 | struct kvm_userspace_memory_region *mem) |
936 | bool user_alloc) | ||
937 | { | 933 | { |
938 | int r; | 934 | int r; |
939 | 935 | ||
940 | mutex_lock(&kvm->slots_lock); | 936 | mutex_lock(&kvm->slots_lock); |
941 | r = __kvm_set_memory_region(kvm, mem, user_alloc); | 937 | r = __kvm_set_memory_region(kvm, mem); |
942 | mutex_unlock(&kvm->slots_lock); | 938 | mutex_unlock(&kvm->slots_lock); |
943 | return r; | 939 | return r; |
944 | } | 940 | } |
945 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 941 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
946 | 942 | ||
947 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 943 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
948 | struct | 944 | struct kvm_userspace_memory_region *mem) |
949 | kvm_userspace_memory_region *mem, | ||
950 | bool user_alloc) | ||
951 | { | 945 | { |
952 | if (mem->slot >= KVM_USER_MEM_SLOTS) | 946 | if (mem->slot >= KVM_USER_MEM_SLOTS) |
953 | return -EINVAL; | 947 | return -EINVAL; |
954 | return kvm_set_memory_region(kvm, mem, user_alloc); | 948 | return kvm_set_memory_region(kvm, mem); |
955 | } | 949 | } |
956 | 950 | ||
957 | int kvm_get_dirty_log(struct kvm *kvm, | 951 | int kvm_get_dirty_log(struct kvm *kvm, |
@@ -1099,7 +1093,7 @@ static int kvm_read_hva_atomic(void *data, void __user *hva, int len) | |||
1099 | return __copy_from_user_inatomic(data, hva, len); | 1093 | return __copy_from_user_inatomic(data, hva, len); |
1100 | } | 1094 | } |
1101 | 1095 | ||
1102 | int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, | 1096 | static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, |
1103 | unsigned long start, int write, struct page **page) | 1097 | unsigned long start, int write, struct page **page) |
1104 | { | 1098 | { |
1105 | int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; | 1099 | int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; |
@@ -1719,6 +1713,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |||
1719 | smp_send_reschedule(cpu); | 1713 | smp_send_reschedule(cpu); |
1720 | put_cpu(); | 1714 | put_cpu(); |
1721 | } | 1715 | } |
1716 | EXPORT_SYMBOL_GPL(kvm_vcpu_kick); | ||
1722 | #endif /* !CONFIG_S390 */ | 1717 | #endif /* !CONFIG_S390 */ |
1723 | 1718 | ||
1724 | void kvm_resched(struct kvm_vcpu *vcpu) | 1719 | void kvm_resched(struct kvm_vcpu *vcpu) |
@@ -1816,6 +1811,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) | |||
1816 | continue; | 1811 | continue; |
1817 | } else if (pass && i > last_boosted_vcpu) | 1812 | } else if (pass && i > last_boosted_vcpu) |
1818 | break; | 1813 | break; |
1814 | if (!ACCESS_ONCE(vcpu->preempted)) | ||
1815 | continue; | ||
1819 | if (vcpu == me) | 1816 | if (vcpu == me) |
1820 | continue; | 1817 | continue; |
1821 | if (waitqueue_active(&vcpu->wq)) | 1818 | if (waitqueue_active(&vcpu->wq)) |
@@ -2204,6 +2201,119 @@ out: | |||
2204 | } | 2201 | } |
2205 | #endif | 2202 | #endif |
2206 | 2203 | ||
2204 | static int kvm_device_ioctl_attr(struct kvm_device *dev, | ||
2205 | int (*accessor)(struct kvm_device *dev, | ||
2206 | struct kvm_device_attr *attr), | ||
2207 | unsigned long arg) | ||
2208 | { | ||
2209 | struct kvm_device_attr attr; | ||
2210 | |||
2211 | if (!accessor) | ||
2212 | return -EPERM; | ||
2213 | |||
2214 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) | ||
2215 | return -EFAULT; | ||
2216 | |||
2217 | return accessor(dev, &attr); | ||
2218 | } | ||
2219 | |||
2220 | static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, | ||
2221 | unsigned long arg) | ||
2222 | { | ||
2223 | struct kvm_device *dev = filp->private_data; | ||
2224 | |||
2225 | switch (ioctl) { | ||
2226 | case KVM_SET_DEVICE_ATTR: | ||
2227 | return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); | ||
2228 | case KVM_GET_DEVICE_ATTR: | ||
2229 | return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); | ||
2230 | case KVM_HAS_DEVICE_ATTR: | ||
2231 | return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); | ||
2232 | default: | ||
2233 | if (dev->ops->ioctl) | ||
2234 | return dev->ops->ioctl(dev, ioctl, arg); | ||
2235 | |||
2236 | return -ENOTTY; | ||
2237 | } | ||
2238 | } | ||
2239 | |||
2240 | static int kvm_device_release(struct inode *inode, struct file *filp) | ||
2241 | { | ||
2242 | struct kvm_device *dev = filp->private_data; | ||
2243 | struct kvm *kvm = dev->kvm; | ||
2244 | |||
2245 | kvm_put_kvm(kvm); | ||
2246 | return 0; | ||
2247 | } | ||
2248 | |||
2249 | static const struct file_operations kvm_device_fops = { | ||
2250 | .unlocked_ioctl = kvm_device_ioctl, | ||
2251 | #ifdef CONFIG_COMPAT | ||
2252 | .compat_ioctl = kvm_device_ioctl, | ||
2253 | #endif | ||
2254 | .release = kvm_device_release, | ||
2255 | }; | ||
2256 | |||
2257 | struct kvm_device *kvm_device_from_filp(struct file *filp) | ||
2258 | { | ||
2259 | if (filp->f_op != &kvm_device_fops) | ||
2260 | return NULL; | ||
2261 | |||
2262 | return filp->private_data; | ||
2263 | } | ||
2264 | |||
2265 | static int kvm_ioctl_create_device(struct kvm *kvm, | ||
2266 | struct kvm_create_device *cd) | ||
2267 | { | ||
2268 | struct kvm_device_ops *ops = NULL; | ||
2269 | struct kvm_device *dev; | ||
2270 | bool test = cd->flags & KVM_CREATE_DEVICE_TEST; | ||
2271 | int ret; | ||
2272 | |||
2273 | switch (cd->type) { | ||
2274 | #ifdef CONFIG_KVM_MPIC | ||
2275 | case KVM_DEV_TYPE_FSL_MPIC_20: | ||
2276 | case KVM_DEV_TYPE_FSL_MPIC_42: | ||
2277 | ops = &kvm_mpic_ops; | ||
2278 | break; | ||
2279 | #endif | ||
2280 | #ifdef CONFIG_KVM_XICS | ||
2281 | case KVM_DEV_TYPE_XICS: | ||
2282 | ops = &kvm_xics_ops; | ||
2283 | break; | ||
2284 | #endif | ||
2285 | default: | ||
2286 | return -ENODEV; | ||
2287 | } | ||
2288 | |||
2289 | if (test) | ||
2290 | return 0; | ||
2291 | |||
2292 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
2293 | if (!dev) | ||
2294 | return -ENOMEM; | ||
2295 | |||
2296 | dev->ops = ops; | ||
2297 | dev->kvm = kvm; | ||
2298 | |||
2299 | ret = ops->create(dev, cd->type); | ||
2300 | if (ret < 0) { | ||
2301 | kfree(dev); | ||
2302 | return ret; | ||
2303 | } | ||
2304 | |||
2305 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR); | ||
2306 | if (ret < 0) { | ||
2307 | ops->destroy(dev); | ||
2308 | return ret; | ||
2309 | } | ||
2310 | |||
2311 | list_add(&dev->vm_node, &kvm->devices); | ||
2312 | kvm_get_kvm(kvm); | ||
2313 | cd->fd = ret; | ||
2314 | return 0; | ||
2315 | } | ||
2316 | |||
2207 | static long kvm_vm_ioctl(struct file *filp, | 2317 | static long kvm_vm_ioctl(struct file *filp, |
2208 | unsigned int ioctl, unsigned long arg) | 2318 | unsigned int ioctl, unsigned long arg) |
2209 | { | 2319 | { |
@@ -2225,7 +2335,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2225 | sizeof kvm_userspace_mem)) | 2335 | sizeof kvm_userspace_mem)) |
2226 | goto out; | 2336 | goto out; |
2227 | 2337 | ||
2228 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true); | 2338 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); |
2229 | break; | 2339 | break; |
2230 | } | 2340 | } |
2231 | case KVM_GET_DIRTY_LOG: { | 2341 | case KVM_GET_DIRTY_LOG: { |
@@ -2304,7 +2414,8 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2304 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | 2414 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) |
2305 | goto out; | 2415 | goto out; |
2306 | 2416 | ||
2307 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event); | 2417 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event, |
2418 | ioctl == KVM_IRQ_LINE_STATUS); | ||
2308 | if (r) | 2419 | if (r) |
2309 | goto out; | 2420 | goto out; |
2310 | 2421 | ||
@@ -2318,6 +2429,54 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2318 | break; | 2429 | break; |
2319 | } | 2430 | } |
2320 | #endif | 2431 | #endif |
2432 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING | ||
2433 | case KVM_SET_GSI_ROUTING: { | ||
2434 | struct kvm_irq_routing routing; | ||
2435 | struct kvm_irq_routing __user *urouting; | ||
2436 | struct kvm_irq_routing_entry *entries; | ||
2437 | |||
2438 | r = -EFAULT; | ||
2439 | if (copy_from_user(&routing, argp, sizeof(routing))) | ||
2440 | goto out; | ||
2441 | r = -EINVAL; | ||
2442 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | ||
2443 | goto out; | ||
2444 | if (routing.flags) | ||
2445 | goto out; | ||
2446 | r = -ENOMEM; | ||
2447 | entries = vmalloc(routing.nr * sizeof(*entries)); | ||
2448 | if (!entries) | ||
2449 | goto out; | ||
2450 | r = -EFAULT; | ||
2451 | urouting = argp; | ||
2452 | if (copy_from_user(entries, urouting->entries, | ||
2453 | routing.nr * sizeof(*entries))) | ||
2454 | goto out_free_irq_routing; | ||
2455 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | ||
2456 | routing.flags); | ||
2457 | out_free_irq_routing: | ||
2458 | vfree(entries); | ||
2459 | break; | ||
2460 | } | ||
2461 | #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ | ||
2462 | case KVM_CREATE_DEVICE: { | ||
2463 | struct kvm_create_device cd; | ||
2464 | |||
2465 | r = -EFAULT; | ||
2466 | if (copy_from_user(&cd, argp, sizeof(cd))) | ||
2467 | goto out; | ||
2468 | |||
2469 | r = kvm_ioctl_create_device(kvm, &cd); | ||
2470 | if (r) | ||
2471 | goto out; | ||
2472 | |||
2473 | r = -EFAULT; | ||
2474 | if (copy_to_user(argp, &cd, sizeof(cd))) | ||
2475 | goto out; | ||
2476 | |||
2477 | r = 0; | ||
2478 | break; | ||
2479 | } | ||
2321 | default: | 2480 | default: |
2322 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | 2481 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
2323 | if (r == -ENOTTY) | 2482 | if (r == -ENOTTY) |
@@ -2447,8 +2606,11 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) | |||
2447 | #ifdef CONFIG_HAVE_KVM_MSI | 2606 | #ifdef CONFIG_HAVE_KVM_MSI |
2448 | case KVM_CAP_SIGNAL_MSI: | 2607 | case KVM_CAP_SIGNAL_MSI: |
2449 | #endif | 2608 | #endif |
2609 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING | ||
2610 | case KVM_CAP_IRQFD_RESAMPLE: | ||
2611 | #endif | ||
2450 | return 1; | 2612 | return 1; |
2451 | #ifdef KVM_CAP_IRQ_ROUTING | 2613 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
2452 | case KVM_CAP_IRQ_ROUTING: | 2614 | case KVM_CAP_IRQ_ROUTING: |
2453 | return KVM_MAX_IRQ_ROUTES; | 2615 | return KVM_MAX_IRQ_ROUTES; |
2454 | #endif | 2616 | #endif |
@@ -2618,14 +2780,6 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
2618 | return NOTIFY_OK; | 2780 | return NOTIFY_OK; |
2619 | } | 2781 | } |
2620 | 2782 | ||
2621 | |||
2622 | asmlinkage void kvm_spurious_fault(void) | ||
2623 | { | ||
2624 | /* Fault while not rebooting. We want the trace. */ | ||
2625 | BUG(); | ||
2626 | } | ||
2627 | EXPORT_SYMBOL_GPL(kvm_spurious_fault); | ||
2628 | |||
2629 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | 2783 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
2630 | void *v) | 2784 | void *v) |
2631 | { | 2785 | { |
@@ -2658,7 +2812,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) | |||
2658 | kfree(bus); | 2812 | kfree(bus); |
2659 | } | 2813 | } |
2660 | 2814 | ||
2661 | int kvm_io_bus_sort_cmp(const void *p1, const void *p2) | 2815 | static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) |
2662 | { | 2816 | { |
2663 | const struct kvm_io_range *r1 = p1; | 2817 | const struct kvm_io_range *r1 = p1; |
2664 | const struct kvm_io_range *r2 = p2; | 2818 | const struct kvm_io_range *r2 = p2; |
@@ -2670,7 +2824,7 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2) | |||
2670 | return 0; | 2824 | return 0; |
2671 | } | 2825 | } |
2672 | 2826 | ||
2673 | int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, | 2827 | static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, |
2674 | gpa_t addr, int len) | 2828 | gpa_t addr, int len) |
2675 | { | 2829 | { |
2676 | bus->range[bus->dev_count++] = (struct kvm_io_range) { | 2830 | bus->range[bus->dev_count++] = (struct kvm_io_range) { |
@@ -2685,7 +2839,7 @@ int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, | |||
2685 | return 0; | 2839 | return 0; |
2686 | } | 2840 | } |
2687 | 2841 | ||
2688 | int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, | 2842 | static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, |
2689 | gpa_t addr, int len) | 2843 | gpa_t addr, int len) |
2690 | { | 2844 | { |
2691 | struct kvm_io_range *range, key; | 2845 | struct kvm_io_range *range, key; |
@@ -2929,6 +3083,8 @@ struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) | |||
2929 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) | 3083 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) |
2930 | { | 3084 | { |
2931 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 3085 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
3086 | if (vcpu->preempted) | ||
3087 | vcpu->preempted = false; | ||
2932 | 3088 | ||
2933 | kvm_arch_vcpu_load(vcpu, cpu); | 3089 | kvm_arch_vcpu_load(vcpu, cpu); |
2934 | } | 3090 | } |
@@ -2938,6 +3094,8 @@ static void kvm_sched_out(struct preempt_notifier *pn, | |||
2938 | { | 3094 | { |
2939 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 3095 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
2940 | 3096 | ||
3097 | if (current->state == TASK_RUNNING) | ||
3098 | vcpu->preempted = true; | ||
2941 | kvm_arch_vcpu_put(vcpu); | 3099 | kvm_arch_vcpu_put(vcpu); |
2942 | } | 3100 | } |
2943 | 3101 | ||
@@ -2947,6 +3105,9 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | |||
2947 | int r; | 3105 | int r; |
2948 | int cpu; | 3106 | int cpu; |
2949 | 3107 | ||
3108 | r = kvm_irqfd_init(); | ||
3109 | if (r) | ||
3110 | goto out_irqfd; | ||
2950 | r = kvm_arch_init(opaque); | 3111 | r = kvm_arch_init(opaque); |
2951 | if (r) | 3112 | if (r) |
2952 | goto out_fail; | 3113 | goto out_fail; |
@@ -3027,6 +3188,8 @@ out_free_0a: | |||
3027 | out_free_0: | 3188 | out_free_0: |
3028 | kvm_arch_exit(); | 3189 | kvm_arch_exit(); |
3029 | out_fail: | 3190 | out_fail: |
3191 | kvm_irqfd_exit(); | ||
3192 | out_irqfd: | ||
3030 | return r; | 3193 | return r; |
3031 | } | 3194 | } |
3032 | EXPORT_SYMBOL_GPL(kvm_init); | 3195 | EXPORT_SYMBOL_GPL(kvm_init); |
@@ -3043,6 +3206,7 @@ void kvm_exit(void) | |||
3043 | on_each_cpu(hardware_disable_nolock, NULL, 1); | 3206 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
3044 | kvm_arch_hardware_unsetup(); | 3207 | kvm_arch_hardware_unsetup(); |
3045 | kvm_arch_exit(); | 3208 | kvm_arch_exit(); |
3209 | kvm_irqfd_exit(); | ||
3046 | free_cpumask_var(cpus_hardware_enabled); | 3210 | free_cpumask_var(cpus_hardware_enabled); |
3047 | } | 3211 | } |
3048 | EXPORT_SYMBOL_GPL(kvm_exit); | 3212 | EXPORT_SYMBOL_GPL(kvm_exit); |