diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-05 17:47:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-05 17:47:31 -0400 |
commit | 01227a889ed56ae53aeebb9f93be9d54dd8b2de8 (patch) | |
tree | d5eba9359a9827e84d4112b84d48c54df5c5acde /virt/kvm/kvm_main.c | |
parent | 9e6879460c8edb0cd3c24c09b83d06541b5af0dc (diff) | |
parent | db6ae6158186a17165ef990bda2895ae7594b039 (diff) |
Merge tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Gleb Natapov:
"Highlights of the updates are:
general:
- new emulated device API
- legacy device assignment is now optional
- irqfd interface is more generic and can be shared between arches
x86:
- VMCS shadow support and other nested VMX improvements
- APIC virtualization and Posted Interrupt hardware support
- Optimize mmio spte zapping
ppc:
- BookE: in-kernel MPIC emulation with irqfd support
- Book3S: in-kernel XICS emulation (incomplete)
- Book3S: HV: migration fixes
- BookE: more debug support preparation
- BookE: e6500 support
ARM:
- reworking of Hyp idmaps
s390:
- ioeventfd for virtio-ccw
And many other bug fixes, cleanups and improvements"
* tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits)
kvm: Add compat_ioctl for device control API
KVM: x86: Account for failing enable_irq_window for NMI window request
KVM: PPC: Book3S: Add API for in-kernel XICS emulation
kvm/ppc/mpic: fix missing unlock in set_base_addr()
kvm/ppc: Hold srcu lock when calling kvm_io_bus_read/write
kvm/ppc/mpic: remove users
kvm/ppc/mpic: fix mmio region lists when multiple guests used
kvm/ppc/mpic: remove default routes from documentation
kvm: KVM_CAP_IOMMU only available with device assignment
ARM: KVM: iterate over all CPUs for CPU compatibility check
KVM: ARM: Fix spelling in error message
ARM: KVM: define KVM_ARM_MAX_VCPUS unconditionally
KVM: ARM: Fix API documentation for ONE_REG encoding
ARM: KVM: promote vfp_host pointer to generic host cpu context
ARM: KVM: add architecture specific hook for capabilities
ARM: KVM: perform HYP initilization for hotplugged CPUs
ARM: KVM: switch to a dual-step HYP init code
ARM: KVM: rework HYP page table freeing
ARM: KVM: enforce maximum size for identity mapped code
ARM: KVM: move to a KVM provided HYP idmap
...
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 258 |
1 files changed, 211 insertions, 47 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f18013f09e68..45f09362ee7b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -217,9 +217,9 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm) | |||
217 | make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); | 217 | make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); |
218 | } | 218 | } |
219 | 219 | ||
220 | void kvm_make_update_eoibitmap_request(struct kvm *kvm) | 220 | void kvm_make_scan_ioapic_request(struct kvm *kvm) |
221 | { | 221 | { |
222 | make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP); | 222 | make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); |
223 | } | 223 | } |
224 | 224 | ||
225 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | 225 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
@@ -244,6 +244,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | |||
244 | 244 | ||
245 | kvm_vcpu_set_in_spin_loop(vcpu, false); | 245 | kvm_vcpu_set_in_spin_loop(vcpu, false); |
246 | kvm_vcpu_set_dy_eligible(vcpu, false); | 246 | kvm_vcpu_set_dy_eligible(vcpu, false); |
247 | vcpu->preempted = false; | ||
247 | 248 | ||
248 | r = kvm_arch_vcpu_init(vcpu); | 249 | r = kvm_arch_vcpu_init(vcpu); |
249 | if (r < 0) | 250 | if (r < 0) |
@@ -503,6 +504,7 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
503 | mutex_init(&kvm->irq_lock); | 504 | mutex_init(&kvm->irq_lock); |
504 | mutex_init(&kvm->slots_lock); | 505 | mutex_init(&kvm->slots_lock); |
505 | atomic_set(&kvm->users_count, 1); | 506 | atomic_set(&kvm->users_count, 1); |
507 | INIT_LIST_HEAD(&kvm->devices); | ||
506 | 508 | ||
507 | r = kvm_init_mmu_notifier(kvm); | 509 | r = kvm_init_mmu_notifier(kvm); |
508 | if (r) | 510 | if (r) |
@@ -580,6 +582,19 @@ void kvm_free_physmem(struct kvm *kvm) | |||
580 | kfree(kvm->memslots); | 582 | kfree(kvm->memslots); |
581 | } | 583 | } |
582 | 584 | ||
585 | static void kvm_destroy_devices(struct kvm *kvm) | ||
586 | { | ||
587 | struct list_head *node, *tmp; | ||
588 | |||
589 | list_for_each_safe(node, tmp, &kvm->devices) { | ||
590 | struct kvm_device *dev = | ||
591 | list_entry(node, struct kvm_device, vm_node); | ||
592 | |||
593 | list_del(node); | ||
594 | dev->ops->destroy(dev); | ||
595 | } | ||
596 | } | ||
597 | |||
583 | static void kvm_destroy_vm(struct kvm *kvm) | 598 | static void kvm_destroy_vm(struct kvm *kvm) |
584 | { | 599 | { |
585 | int i; | 600 | int i; |
@@ -599,6 +614,7 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
599 | kvm_arch_flush_shadow_all(kvm); | 614 | kvm_arch_flush_shadow_all(kvm); |
600 | #endif | 615 | #endif |
601 | kvm_arch_destroy_vm(kvm); | 616 | kvm_arch_destroy_vm(kvm); |
617 | kvm_destroy_devices(kvm); | ||
602 | kvm_free_physmem(kvm); | 618 | kvm_free_physmem(kvm); |
603 | cleanup_srcu_struct(&kvm->srcu); | 619 | cleanup_srcu_struct(&kvm->srcu); |
604 | kvm_arch_free_vm(kvm); | 620 | kvm_arch_free_vm(kvm); |
@@ -719,24 +735,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, | |||
719 | } | 735 | } |
720 | 736 | ||
721 | /* | 737 | /* |
722 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: | ||
723 | * - create a new memory slot | ||
724 | * - delete an existing memory slot | ||
725 | * - modify an existing memory slot | ||
726 | * -- move it in the guest physical memory space | ||
727 | * -- just change its flags | ||
728 | * | ||
729 | * Since flags can be changed by some of these operations, the following | ||
730 | * differentiation is the best we can do for __kvm_set_memory_region(): | ||
731 | */ | ||
732 | enum kvm_mr_change { | ||
733 | KVM_MR_CREATE, | ||
734 | KVM_MR_DELETE, | ||
735 | KVM_MR_MOVE, | ||
736 | KVM_MR_FLAGS_ONLY, | ||
737 | }; | ||
738 | |||
739 | /* | ||
740 | * Allocate some memory and give it an address in the guest physical address | 738 | * Allocate some memory and give it an address in the guest physical address |
741 | * space. | 739 | * space. |
742 | * | 740 | * |
@@ -745,8 +743,7 @@ enum kvm_mr_change { | |||
745 | * Must be called holding mmap_sem for write. | 743 | * Must be called holding mmap_sem for write. |
746 | */ | 744 | */ |
747 | int __kvm_set_memory_region(struct kvm *kvm, | 745 | int __kvm_set_memory_region(struct kvm *kvm, |
748 | struct kvm_userspace_memory_region *mem, | 746 | struct kvm_userspace_memory_region *mem) |
749 | bool user_alloc) | ||
750 | { | 747 | { |
751 | int r; | 748 | int r; |
752 | gfn_t base_gfn; | 749 | gfn_t base_gfn; |
@@ -767,7 +764,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
767 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) | 764 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
768 | goto out; | 765 | goto out; |
769 | /* We can read the guest memory with __xxx_user() later on. */ | 766 | /* We can read the guest memory with __xxx_user() later on. */ |
770 | if (user_alloc && | 767 | if ((mem->slot < KVM_USER_MEM_SLOTS) && |
771 | ((mem->userspace_addr & (PAGE_SIZE - 1)) || | 768 | ((mem->userspace_addr & (PAGE_SIZE - 1)) || |
772 | !access_ok(VERIFY_WRITE, | 769 | !access_ok(VERIFY_WRITE, |
773 | (void __user *)(unsigned long)mem->userspace_addr, | 770 | (void __user *)(unsigned long)mem->userspace_addr, |
@@ -875,7 +872,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
875 | slots = old_memslots; | 872 | slots = old_memslots; |
876 | } | 873 | } |
877 | 874 | ||
878 | r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); | 875 | r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); |
879 | if (r) | 876 | if (r) |
880 | goto out_slots; | 877 | goto out_slots; |
881 | 878 | ||
@@ -915,7 +912,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
915 | 912 | ||
916 | old_memslots = install_new_memslots(kvm, slots, &new); | 913 | old_memslots = install_new_memslots(kvm, slots, &new); |
917 | 914 | ||
918 | kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); | 915 | kvm_arch_commit_memory_region(kvm, mem, &old, change); |
919 | 916 | ||
920 | kvm_free_physmem_slot(&old, &new); | 917 | kvm_free_physmem_slot(&old, &new); |
921 | kfree(old_memslots); | 918 | kfree(old_memslots); |
@@ -932,26 +929,23 @@ out: | |||
932 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); | 929 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); |
933 | 930 | ||
934 | int kvm_set_memory_region(struct kvm *kvm, | 931 | int kvm_set_memory_region(struct kvm *kvm, |
935 | struct kvm_userspace_memory_region *mem, | 932 | struct kvm_userspace_memory_region *mem) |
936 | bool user_alloc) | ||
937 | { | 933 | { |
938 | int r; | 934 | int r; |
939 | 935 | ||
940 | mutex_lock(&kvm->slots_lock); | 936 | mutex_lock(&kvm->slots_lock); |
941 | r = __kvm_set_memory_region(kvm, mem, user_alloc); | 937 | r = __kvm_set_memory_region(kvm, mem); |
942 | mutex_unlock(&kvm->slots_lock); | 938 | mutex_unlock(&kvm->slots_lock); |
943 | return r; | 939 | return r; |
944 | } | 940 | } |
945 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 941 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
946 | 942 | ||
947 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 943 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
948 | struct | 944 | struct kvm_userspace_memory_region *mem) |
949 | kvm_userspace_memory_region *mem, | ||
950 | bool user_alloc) | ||
951 | { | 945 | { |
952 | if (mem->slot >= KVM_USER_MEM_SLOTS) | 946 | if (mem->slot >= KVM_USER_MEM_SLOTS) |
953 | return -EINVAL; | 947 | return -EINVAL; |
954 | return kvm_set_memory_region(kvm, mem, user_alloc); | 948 | return kvm_set_memory_region(kvm, mem); |
955 | } | 949 | } |
956 | 950 | ||
957 | int kvm_get_dirty_log(struct kvm *kvm, | 951 | int kvm_get_dirty_log(struct kvm *kvm, |
@@ -1099,7 +1093,7 @@ static int kvm_read_hva_atomic(void *data, void __user *hva, int len) | |||
1099 | return __copy_from_user_inatomic(data, hva, len); | 1093 | return __copy_from_user_inatomic(data, hva, len); |
1100 | } | 1094 | } |
1101 | 1095 | ||
1102 | int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, | 1096 | static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, |
1103 | unsigned long start, int write, struct page **page) | 1097 | unsigned long start, int write, struct page **page) |
1104 | { | 1098 | { |
1105 | int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; | 1099 | int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; |
@@ -1719,6 +1713,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |||
1719 | smp_send_reschedule(cpu); | 1713 | smp_send_reschedule(cpu); |
1720 | put_cpu(); | 1714 | put_cpu(); |
1721 | } | 1715 | } |
1716 | EXPORT_SYMBOL_GPL(kvm_vcpu_kick); | ||
1722 | #endif /* !CONFIG_S390 */ | 1717 | #endif /* !CONFIG_S390 */ |
1723 | 1718 | ||
1724 | void kvm_resched(struct kvm_vcpu *vcpu) | 1719 | void kvm_resched(struct kvm_vcpu *vcpu) |
@@ -1816,6 +1811,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) | |||
1816 | continue; | 1811 | continue; |
1817 | } else if (pass && i > last_boosted_vcpu) | 1812 | } else if (pass && i > last_boosted_vcpu) |
1818 | break; | 1813 | break; |
1814 | if (!ACCESS_ONCE(vcpu->preempted)) | ||
1815 | continue; | ||
1819 | if (vcpu == me) | 1816 | if (vcpu == me) |
1820 | continue; | 1817 | continue; |
1821 | if (waitqueue_active(&vcpu->wq)) | 1818 | if (waitqueue_active(&vcpu->wq)) |
@@ -2204,6 +2201,119 @@ out: | |||
2204 | } | 2201 | } |
2205 | #endif | 2202 | #endif |
2206 | 2203 | ||
2204 | static int kvm_device_ioctl_attr(struct kvm_device *dev, | ||
2205 | int (*accessor)(struct kvm_device *dev, | ||
2206 | struct kvm_device_attr *attr), | ||
2207 | unsigned long arg) | ||
2208 | { | ||
2209 | struct kvm_device_attr attr; | ||
2210 | |||
2211 | if (!accessor) | ||
2212 | return -EPERM; | ||
2213 | |||
2214 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) | ||
2215 | return -EFAULT; | ||
2216 | |||
2217 | return accessor(dev, &attr); | ||
2218 | } | ||
2219 | |||
2220 | static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, | ||
2221 | unsigned long arg) | ||
2222 | { | ||
2223 | struct kvm_device *dev = filp->private_data; | ||
2224 | |||
2225 | switch (ioctl) { | ||
2226 | case KVM_SET_DEVICE_ATTR: | ||
2227 | return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); | ||
2228 | case KVM_GET_DEVICE_ATTR: | ||
2229 | return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); | ||
2230 | case KVM_HAS_DEVICE_ATTR: | ||
2231 | return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); | ||
2232 | default: | ||
2233 | if (dev->ops->ioctl) | ||
2234 | return dev->ops->ioctl(dev, ioctl, arg); | ||
2235 | |||
2236 | return -ENOTTY; | ||
2237 | } | ||
2238 | } | ||
2239 | |||
2240 | static int kvm_device_release(struct inode *inode, struct file *filp) | ||
2241 | { | ||
2242 | struct kvm_device *dev = filp->private_data; | ||
2243 | struct kvm *kvm = dev->kvm; | ||
2244 | |||
2245 | kvm_put_kvm(kvm); | ||
2246 | return 0; | ||
2247 | } | ||
2248 | |||
2249 | static const struct file_operations kvm_device_fops = { | ||
2250 | .unlocked_ioctl = kvm_device_ioctl, | ||
2251 | #ifdef CONFIG_COMPAT | ||
2252 | .compat_ioctl = kvm_device_ioctl, | ||
2253 | #endif | ||
2254 | .release = kvm_device_release, | ||
2255 | }; | ||
2256 | |||
2257 | struct kvm_device *kvm_device_from_filp(struct file *filp) | ||
2258 | { | ||
2259 | if (filp->f_op != &kvm_device_fops) | ||
2260 | return NULL; | ||
2261 | |||
2262 | return filp->private_data; | ||
2263 | } | ||
2264 | |||
2265 | static int kvm_ioctl_create_device(struct kvm *kvm, | ||
2266 | struct kvm_create_device *cd) | ||
2267 | { | ||
2268 | struct kvm_device_ops *ops = NULL; | ||
2269 | struct kvm_device *dev; | ||
2270 | bool test = cd->flags & KVM_CREATE_DEVICE_TEST; | ||
2271 | int ret; | ||
2272 | |||
2273 | switch (cd->type) { | ||
2274 | #ifdef CONFIG_KVM_MPIC | ||
2275 | case KVM_DEV_TYPE_FSL_MPIC_20: | ||
2276 | case KVM_DEV_TYPE_FSL_MPIC_42: | ||
2277 | ops = &kvm_mpic_ops; | ||
2278 | break; | ||
2279 | #endif | ||
2280 | #ifdef CONFIG_KVM_XICS | ||
2281 | case KVM_DEV_TYPE_XICS: | ||
2282 | ops = &kvm_xics_ops; | ||
2283 | break; | ||
2284 | #endif | ||
2285 | default: | ||
2286 | return -ENODEV; | ||
2287 | } | ||
2288 | |||
2289 | if (test) | ||
2290 | return 0; | ||
2291 | |||
2292 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
2293 | if (!dev) | ||
2294 | return -ENOMEM; | ||
2295 | |||
2296 | dev->ops = ops; | ||
2297 | dev->kvm = kvm; | ||
2298 | |||
2299 | ret = ops->create(dev, cd->type); | ||
2300 | if (ret < 0) { | ||
2301 | kfree(dev); | ||
2302 | return ret; | ||
2303 | } | ||
2304 | |||
2305 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR); | ||
2306 | if (ret < 0) { | ||
2307 | ops->destroy(dev); | ||
2308 | return ret; | ||
2309 | } | ||
2310 | |||
2311 | list_add(&dev->vm_node, &kvm->devices); | ||
2312 | kvm_get_kvm(kvm); | ||
2313 | cd->fd = ret; | ||
2314 | return 0; | ||
2315 | } | ||
2316 | |||
2207 | static long kvm_vm_ioctl(struct file *filp, | 2317 | static long kvm_vm_ioctl(struct file *filp, |
2208 | unsigned int ioctl, unsigned long arg) | 2318 | unsigned int ioctl, unsigned long arg) |
2209 | { | 2319 | { |
@@ -2225,7 +2335,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2225 | sizeof kvm_userspace_mem)) | 2335 | sizeof kvm_userspace_mem)) |
2226 | goto out; | 2336 | goto out; |
2227 | 2337 | ||
2228 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true); | 2338 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); |
2229 | break; | 2339 | break; |
2230 | } | 2340 | } |
2231 | case KVM_GET_DIRTY_LOG: { | 2341 | case KVM_GET_DIRTY_LOG: { |
@@ -2304,7 +2414,8 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2304 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | 2414 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) |
2305 | goto out; | 2415 | goto out; |
2306 | 2416 | ||
2307 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event); | 2417 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event, |
2418 | ioctl == KVM_IRQ_LINE_STATUS); | ||
2308 | if (r) | 2419 | if (r) |
2309 | goto out; | 2420 | goto out; |
2310 | 2421 | ||
@@ -2318,6 +2429,54 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2318 | break; | 2429 | break; |
2319 | } | 2430 | } |
2320 | #endif | 2431 | #endif |
2432 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING | ||
2433 | case KVM_SET_GSI_ROUTING: { | ||
2434 | struct kvm_irq_routing routing; | ||
2435 | struct kvm_irq_routing __user *urouting; | ||
2436 | struct kvm_irq_routing_entry *entries; | ||
2437 | |||
2438 | r = -EFAULT; | ||
2439 | if (copy_from_user(&routing, argp, sizeof(routing))) | ||
2440 | goto out; | ||
2441 | r = -EINVAL; | ||
2442 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | ||
2443 | goto out; | ||
2444 | if (routing.flags) | ||
2445 | goto out; | ||
2446 | r = -ENOMEM; | ||
2447 | entries = vmalloc(routing.nr * sizeof(*entries)); | ||
2448 | if (!entries) | ||
2449 | goto out; | ||
2450 | r = -EFAULT; | ||
2451 | urouting = argp; | ||
2452 | if (copy_from_user(entries, urouting->entries, | ||
2453 | routing.nr * sizeof(*entries))) | ||
2454 | goto out_free_irq_routing; | ||
2455 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | ||
2456 | routing.flags); | ||
2457 | out_free_irq_routing: | ||
2458 | vfree(entries); | ||
2459 | break; | ||
2460 | } | ||
2461 | #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ | ||
2462 | case KVM_CREATE_DEVICE: { | ||
2463 | struct kvm_create_device cd; | ||
2464 | |||
2465 | r = -EFAULT; | ||
2466 | if (copy_from_user(&cd, argp, sizeof(cd))) | ||
2467 | goto out; | ||
2468 | |||
2469 | r = kvm_ioctl_create_device(kvm, &cd); | ||
2470 | if (r) | ||
2471 | goto out; | ||
2472 | |||
2473 | r = -EFAULT; | ||
2474 | if (copy_to_user(argp, &cd, sizeof(cd))) | ||
2475 | goto out; | ||
2476 | |||
2477 | r = 0; | ||
2478 | break; | ||
2479 | } | ||
2321 | default: | 2480 | default: |
2322 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | 2481 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
2323 | if (r == -ENOTTY) | 2482 | if (r == -ENOTTY) |
@@ -2447,8 +2606,11 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) | |||
2447 | #ifdef CONFIG_HAVE_KVM_MSI | 2606 | #ifdef CONFIG_HAVE_KVM_MSI |
2448 | case KVM_CAP_SIGNAL_MSI: | 2607 | case KVM_CAP_SIGNAL_MSI: |
2449 | #endif | 2608 | #endif |
2609 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING | ||
2610 | case KVM_CAP_IRQFD_RESAMPLE: | ||
2611 | #endif | ||
2450 | return 1; | 2612 | return 1; |
2451 | #ifdef KVM_CAP_IRQ_ROUTING | 2613 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
2452 | case KVM_CAP_IRQ_ROUTING: | 2614 | case KVM_CAP_IRQ_ROUTING: |
2453 | return KVM_MAX_IRQ_ROUTES; | 2615 | return KVM_MAX_IRQ_ROUTES; |
2454 | #endif | 2616 | #endif |
@@ -2618,14 +2780,6 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
2618 | return NOTIFY_OK; | 2780 | return NOTIFY_OK; |
2619 | } | 2781 | } |
2620 | 2782 | ||
2621 | |||
2622 | asmlinkage void kvm_spurious_fault(void) | ||
2623 | { | ||
2624 | /* Fault while not rebooting. We want the trace. */ | ||
2625 | BUG(); | ||
2626 | } | ||
2627 | EXPORT_SYMBOL_GPL(kvm_spurious_fault); | ||
2628 | |||
2629 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | 2783 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
2630 | void *v) | 2784 | void *v) |
2631 | { | 2785 | { |
@@ -2658,7 +2812,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) | |||
2658 | kfree(bus); | 2812 | kfree(bus); |
2659 | } | 2813 | } |
2660 | 2814 | ||
2661 | int kvm_io_bus_sort_cmp(const void *p1, const void *p2) | 2815 | static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) |
2662 | { | 2816 | { |
2663 | const struct kvm_io_range *r1 = p1; | 2817 | const struct kvm_io_range *r1 = p1; |
2664 | const struct kvm_io_range *r2 = p2; | 2818 | const struct kvm_io_range *r2 = p2; |
@@ -2670,7 +2824,7 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2) | |||
2670 | return 0; | 2824 | return 0; |
2671 | } | 2825 | } |
2672 | 2826 | ||
2673 | int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, | 2827 | static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, |
2674 | gpa_t addr, int len) | 2828 | gpa_t addr, int len) |
2675 | { | 2829 | { |
2676 | bus->range[bus->dev_count++] = (struct kvm_io_range) { | 2830 | bus->range[bus->dev_count++] = (struct kvm_io_range) { |
@@ -2685,7 +2839,7 @@ int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, | |||
2685 | return 0; | 2839 | return 0; |
2686 | } | 2840 | } |
2687 | 2841 | ||
2688 | int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, | 2842 | static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, |
2689 | gpa_t addr, int len) | 2843 | gpa_t addr, int len) |
2690 | { | 2844 | { |
2691 | struct kvm_io_range *range, key; | 2845 | struct kvm_io_range *range, key; |
@@ -2929,6 +3083,8 @@ struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) | |||
2929 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) | 3083 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) |
2930 | { | 3084 | { |
2931 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 3085 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
3086 | if (vcpu->preempted) | ||
3087 | vcpu->preempted = false; | ||
2932 | 3088 | ||
2933 | kvm_arch_vcpu_load(vcpu, cpu); | 3089 | kvm_arch_vcpu_load(vcpu, cpu); |
2934 | } | 3090 | } |
@@ -2938,6 +3094,8 @@ static void kvm_sched_out(struct preempt_notifier *pn, | |||
2938 | { | 3094 | { |
2939 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 3095 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
2940 | 3096 | ||
3097 | if (current->state == TASK_RUNNING) | ||
3098 | vcpu->preempted = true; | ||
2941 | kvm_arch_vcpu_put(vcpu); | 3099 | kvm_arch_vcpu_put(vcpu); |
2942 | } | 3100 | } |
2943 | 3101 | ||
@@ -2947,6 +3105,9 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | |||
2947 | int r; | 3105 | int r; |
2948 | int cpu; | 3106 | int cpu; |
2949 | 3107 | ||
3108 | r = kvm_irqfd_init(); | ||
3109 | if (r) | ||
3110 | goto out_irqfd; | ||
2950 | r = kvm_arch_init(opaque); | 3111 | r = kvm_arch_init(opaque); |
2951 | if (r) | 3112 | if (r) |
2952 | goto out_fail; | 3113 | goto out_fail; |
@@ -3027,6 +3188,8 @@ out_free_0a: | |||
3027 | out_free_0: | 3188 | out_free_0: |
3028 | kvm_arch_exit(); | 3189 | kvm_arch_exit(); |
3029 | out_fail: | 3190 | out_fail: |
3191 | kvm_irqfd_exit(); | ||
3192 | out_irqfd: | ||
3030 | return r; | 3193 | return r; |
3031 | } | 3194 | } |
3032 | EXPORT_SYMBOL_GPL(kvm_init); | 3195 | EXPORT_SYMBOL_GPL(kvm_init); |
@@ -3043,6 +3206,7 @@ void kvm_exit(void) | |||
3043 | on_each_cpu(hardware_disable_nolock, NULL, 1); | 3206 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
3044 | kvm_arch_hardware_unsetup(); | 3207 | kvm_arch_hardware_unsetup(); |
3045 | kvm_arch_exit(); | 3208 | kvm_arch_exit(); |
3209 | kvm_irqfd_exit(); | ||
3046 | free_cpumask_var(cpus_hardware_enabled); | 3210 | free_cpumask_var(cpus_hardware_enabled); |
3047 | } | 3211 | } |
3048 | EXPORT_SYMBOL_GPL(kvm_exit); | 3212 | EXPORT_SYMBOL_GPL(kvm_exit); |