aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c630
1 files changed, 458 insertions, 172 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5186e728c53e..96ebc0679415 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5,7 +5,7 @@
5 * machines without emulation or binary translation. 5 * machines without emulation or binary translation.
6 * 6 *
7 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affilates. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 * 9 *
10 * Authors: 10 * Authors:
11 * Avi Kivity <avi@qumranet.com> 11 * Avi Kivity <avi@qumranet.com>
@@ -30,7 +30,7 @@
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/file.h> 32#include <linux/file.h>
33#include <linux/sysdev.h> 33#include <linux/syscore_ops.h>
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/cpumask.h> 36#include <linux/cpumask.h>
@@ -52,9 +52,9 @@
52#include <asm/io.h> 52#include <asm/io.h>
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
54#include <asm/pgtable.h> 54#include <asm/pgtable.h>
55#include <asm-generic/bitops/le.h>
56 55
57#include "coalesced_mmio.h" 56#include "coalesced_mmio.h"
57#include "async_pf.h"
58 58
59#define CREATE_TRACE_POINTS 59#define CREATE_TRACE_POINTS
60#include <trace/events/kvm.h> 60#include <trace/events/kvm.h>
@@ -68,7 +68,7 @@ MODULE_LICENSE("GPL");
68 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 68 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
69 */ 69 */
70 70
71DEFINE_SPINLOCK(kvm_lock); 71DEFINE_RAW_SPINLOCK(kvm_lock);
72LIST_HEAD(vm_list); 72LIST_HEAD(vm_list);
73 73
74static cpumask_var_t cpus_hardware_enabled; 74static cpumask_var_t cpus_hardware_enabled;
@@ -89,7 +89,8 @@ static void hardware_disable_all(void);
89 89
90static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 90static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
91 91
92static bool kvm_rebooting; 92bool kvm_rebooting;
93EXPORT_SYMBOL_GPL(kvm_rebooting);
93 94
94static bool largepages_enabled = true; 95static bool largepages_enabled = true;
95 96
@@ -102,8 +103,26 @@ static pfn_t fault_pfn;
102inline int kvm_is_mmio_pfn(pfn_t pfn) 103inline int kvm_is_mmio_pfn(pfn_t pfn)
103{ 104{
104 if (pfn_valid(pfn)) { 105 if (pfn_valid(pfn)) {
105 struct page *page = compound_head(pfn_to_page(pfn)); 106 int reserved;
106 return PageReserved(page); 107 struct page *tail = pfn_to_page(pfn);
108 struct page *head = compound_trans_head(tail);
109 reserved = PageReserved(head);
110 if (head != tail) {
111 /*
112 * "head" is not a dangling pointer
113 * (compound_trans_head takes care of that)
114 * but the hugepage may have been splitted
115 * from under us (and we may not hold a
116 * reference count on the head page so it can
117 * be reused before we run PageReferenced), so
118 * we've to check PageTail before returning
119 * what we just read.
120 */
121 smp_rmb();
122 if (PageTail(tail))
123 return reserved;
124 }
125 return PageReserved(tail);
107 } 126 }
108 127
109 return true; 128 return true;
@@ -117,6 +136,14 @@ void vcpu_load(struct kvm_vcpu *vcpu)
117 int cpu; 136 int cpu;
118 137
119 mutex_lock(&vcpu->mutex); 138 mutex_lock(&vcpu->mutex);
139 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
140 /* The thread running this VCPU changed. */
141 struct pid *oldpid = vcpu->pid;
142 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
143 rcu_assign_pointer(vcpu->pid, newpid);
144 synchronize_rcu();
145 put_pid(oldpid);
146 }
120 cpu = get_cpu(); 147 cpu = get_cpu();
121 preempt_notifier_register(&vcpu->preempt_notifier); 148 preempt_notifier_register(&vcpu->preempt_notifier);
122 kvm_arch_vcpu_load(vcpu, cpu); 149 kvm_arch_vcpu_load(vcpu, cpu);
@@ -145,13 +172,16 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
145 172
146 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 173 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
147 174
148 raw_spin_lock(&kvm->requests_lock); 175 me = get_cpu();
149 me = smp_processor_id();
150 kvm_for_each_vcpu(i, vcpu, kvm) { 176 kvm_for_each_vcpu(i, vcpu, kvm) {
151 if (kvm_make_check_request(req, vcpu)) 177 kvm_make_request(req, vcpu);
152 continue;
153 cpu = vcpu->cpu; 178 cpu = vcpu->cpu;
154 if (cpus != NULL && cpu != -1 && cpu != me) 179
180 /* Set ->requests bit before we read ->mode */
181 smp_mb();
182
183 if (cpus != NULL && cpu != -1 && cpu != me &&
184 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
155 cpumask_set_cpu(cpu, cpus); 185 cpumask_set_cpu(cpu, cpus);
156 } 186 }
157 if (unlikely(cpus == NULL)) 187 if (unlikely(cpus == NULL))
@@ -160,15 +190,19 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
160 smp_call_function_many(cpus, ack_flush, NULL, 1); 190 smp_call_function_many(cpus, ack_flush, NULL, 1);
161 else 191 else
162 called = false; 192 called = false;
163 raw_spin_unlock(&kvm->requests_lock); 193 put_cpu();
164 free_cpumask_var(cpus); 194 free_cpumask_var(cpus);
165 return called; 195 return called;
166} 196}
167 197
168void kvm_flush_remote_tlbs(struct kvm *kvm) 198void kvm_flush_remote_tlbs(struct kvm *kvm)
169{ 199{
200 int dirty_count = kvm->tlbs_dirty;
201
202 smp_mb();
170 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 203 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
171 ++kvm->stat.remote_tlb_flush; 204 ++kvm->stat.remote_tlb_flush;
205 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
172} 206}
173 207
174void kvm_reload_remote_mmus(struct kvm *kvm) 208void kvm_reload_remote_mmus(struct kvm *kvm)
@@ -185,7 +219,9 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
185 vcpu->cpu = -1; 219 vcpu->cpu = -1;
186 vcpu->kvm = kvm; 220 vcpu->kvm = kvm;
187 vcpu->vcpu_id = id; 221 vcpu->vcpu_id = id;
222 vcpu->pid = NULL;
188 init_waitqueue_head(&vcpu->wq); 223 init_waitqueue_head(&vcpu->wq);
224 kvm_async_pf_vcpu_init(vcpu);
189 225
190 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 226 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
191 if (!page) { 227 if (!page) {
@@ -208,6 +244,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
208 244
209void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 245void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
210{ 246{
247 put_pid(vcpu->pid);
211 kvm_arch_vcpu_uninit(vcpu); 248 kvm_arch_vcpu_uninit(vcpu);
212 free_page((unsigned long)vcpu->run); 249 free_page((unsigned long)vcpu->run);
213} 250}
@@ -247,7 +284,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
247 idx = srcu_read_lock(&kvm->srcu); 284 idx = srcu_read_lock(&kvm->srcu);
248 spin_lock(&kvm->mmu_lock); 285 spin_lock(&kvm->mmu_lock);
249 kvm->mmu_notifier_seq++; 286 kvm->mmu_notifier_seq++;
250 need_tlb_flush = kvm_unmap_hva(kvm, address); 287 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
251 spin_unlock(&kvm->mmu_lock); 288 spin_unlock(&kvm->mmu_lock);
252 srcu_read_unlock(&kvm->srcu, idx); 289 srcu_read_unlock(&kvm->srcu, idx);
253 290
@@ -291,6 +328,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
291 kvm->mmu_notifier_count++; 328 kvm->mmu_notifier_count++;
292 for (; start < end; start += PAGE_SIZE) 329 for (; start < end; start += PAGE_SIZE)
293 need_tlb_flush |= kvm_unmap_hva(kvm, start); 330 need_tlb_flush |= kvm_unmap_hva(kvm, start);
331 need_tlb_flush |= kvm->tlbs_dirty;
294 spin_unlock(&kvm->mmu_lock); 332 spin_unlock(&kvm->mmu_lock);
295 srcu_read_unlock(&kvm->srcu, idx); 333 srcu_read_unlock(&kvm->srcu, idx);
296 334
@@ -344,6 +382,22 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
344 return young; 382 return young;
345} 383}
346 384
385static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
386 struct mm_struct *mm,
387 unsigned long address)
388{
389 struct kvm *kvm = mmu_notifier_to_kvm(mn);
390 int young, idx;
391
392 idx = srcu_read_lock(&kvm->srcu);
393 spin_lock(&kvm->mmu_lock);
394 young = kvm_test_age_hva(kvm, address);
395 spin_unlock(&kvm->mmu_lock);
396 srcu_read_unlock(&kvm->srcu, idx);
397
398 return young;
399}
400
347static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 401static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
348 struct mm_struct *mm) 402 struct mm_struct *mm)
349{ 403{
@@ -360,6 +414,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
360 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 414 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
361 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 415 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
362 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 416 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
417 .test_young = kvm_mmu_notifier_test_young,
363 .change_pte = kvm_mmu_notifier_change_pte, 418 .change_pte = kvm_mmu_notifier_change_pte,
364 .release = kvm_mmu_notifier_release, 419 .release = kvm_mmu_notifier_release,
365}; 420};
@@ -381,11 +436,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
381 436
382static struct kvm *kvm_create_vm(void) 437static struct kvm *kvm_create_vm(void)
383{ 438{
384 int r = 0, i; 439 int r, i;
385 struct kvm *kvm = kvm_arch_create_vm(); 440 struct kvm *kvm = kvm_arch_alloc_vm();
386 441
387 if (IS_ERR(kvm)) 442 if (!kvm)
388 goto out; 443 return ERR_PTR(-ENOMEM);
444
445 r = kvm_arch_init_vm(kvm);
446 if (r)
447 goto out_err_nodisable;
389 448
390 r = hardware_enable_all(); 449 r = hardware_enable_all();
391 if (r) 450 if (r)
@@ -399,49 +458,61 @@ static struct kvm *kvm_create_vm(void)
399 r = -ENOMEM; 458 r = -ENOMEM;
400 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 459 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
401 if (!kvm->memslots) 460 if (!kvm->memslots)
402 goto out_err; 461 goto out_err_nosrcu;
403 if (init_srcu_struct(&kvm->srcu)) 462 if (init_srcu_struct(&kvm->srcu))
404 goto out_err; 463 goto out_err_nosrcu;
405 for (i = 0; i < KVM_NR_BUSES; i++) { 464 for (i = 0; i < KVM_NR_BUSES; i++) {
406 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 465 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
407 GFP_KERNEL); 466 GFP_KERNEL);
408 if (!kvm->buses[i]) { 467 if (!kvm->buses[i])
409 cleanup_srcu_struct(&kvm->srcu);
410 goto out_err; 468 goto out_err;
411 }
412 }
413
414 r = kvm_init_mmu_notifier(kvm);
415 if (r) {
416 cleanup_srcu_struct(&kvm->srcu);
417 goto out_err;
418 } 469 }
419 470
471 spin_lock_init(&kvm->mmu_lock);
420 kvm->mm = current->mm; 472 kvm->mm = current->mm;
421 atomic_inc(&kvm->mm->mm_count); 473 atomic_inc(&kvm->mm->mm_count);
422 spin_lock_init(&kvm->mmu_lock);
423 raw_spin_lock_init(&kvm->requests_lock);
424 kvm_eventfd_init(kvm); 474 kvm_eventfd_init(kvm);
425 mutex_init(&kvm->lock); 475 mutex_init(&kvm->lock);
426 mutex_init(&kvm->irq_lock); 476 mutex_init(&kvm->irq_lock);
427 mutex_init(&kvm->slots_lock); 477 mutex_init(&kvm->slots_lock);
428 atomic_set(&kvm->users_count, 1); 478 atomic_set(&kvm->users_count, 1);
429 spin_lock(&kvm_lock); 479
480 r = kvm_init_mmu_notifier(kvm);
481 if (r)
482 goto out_err;
483
484 raw_spin_lock(&kvm_lock);
430 list_add(&kvm->vm_list, &vm_list); 485 list_add(&kvm->vm_list, &vm_list);
431 spin_unlock(&kvm_lock); 486 raw_spin_unlock(&kvm_lock);
432out: 487
433 return kvm; 488 return kvm;
434 489
435out_err: 490out_err:
491 cleanup_srcu_struct(&kvm->srcu);
492out_err_nosrcu:
436 hardware_disable_all(); 493 hardware_disable_all();
437out_err_nodisable: 494out_err_nodisable:
438 for (i = 0; i < KVM_NR_BUSES; i++) 495 for (i = 0; i < KVM_NR_BUSES; i++)
439 kfree(kvm->buses[i]); 496 kfree(kvm->buses[i]);
440 kfree(kvm->memslots); 497 kfree(kvm->memslots);
441 kfree(kvm); 498 kvm_arch_free_vm(kvm);
442 return ERR_PTR(r); 499 return ERR_PTR(r);
443} 500}
444 501
502static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
503{
504 if (!memslot->dirty_bitmap)
505 return;
506
507 if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
508 vfree(memslot->dirty_bitmap_head);
509 else
510 kfree(memslot->dirty_bitmap_head);
511
512 memslot->dirty_bitmap = NULL;
513 memslot->dirty_bitmap_head = NULL;
514}
515
445/* 516/*
446 * Free any memory in @free but not in @dont. 517 * Free any memory in @free but not in @dont.
447 */ 518 */
@@ -454,7 +525,7 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
454 vfree(free->rmap); 525 vfree(free->rmap);
455 526
456 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 527 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
457 vfree(free->dirty_bitmap); 528 kvm_destroy_dirty_bitmap(free);
458 529
459 530
460 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 531 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
@@ -465,7 +536,6 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
465 } 536 }
466 537
467 free->npages = 0; 538 free->npages = 0;
468 free->dirty_bitmap = NULL;
469 free->rmap = NULL; 539 free->rmap = NULL;
470} 540}
471 541
@@ -486,9 +556,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
486 struct mm_struct *mm = kvm->mm; 556 struct mm_struct *mm = kvm->mm;
487 557
488 kvm_arch_sync_events(kvm); 558 kvm_arch_sync_events(kvm);
489 spin_lock(&kvm_lock); 559 raw_spin_lock(&kvm_lock);
490 list_del(&kvm->vm_list); 560 list_del(&kvm->vm_list);
491 spin_unlock(&kvm_lock); 561 raw_spin_unlock(&kvm_lock);
492 kvm_free_irq_routing(kvm); 562 kvm_free_irq_routing(kvm);
493 for (i = 0; i < KVM_NR_BUSES; i++) 563 for (i = 0; i < KVM_NR_BUSES; i++)
494 kvm_io_bus_destroy(kvm->buses[i]); 564 kvm_io_bus_destroy(kvm->buses[i]);
@@ -499,6 +569,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
499 kvm_arch_flush_shadow(kvm); 569 kvm_arch_flush_shadow(kvm);
500#endif 570#endif
501 kvm_arch_destroy_vm(kvm); 571 kvm_arch_destroy_vm(kvm);
572 kvm_free_physmem(kvm);
573 cleanup_srcu_struct(&kvm->srcu);
574 kvm_arch_free_vm(kvm);
502 hardware_disable_all(); 575 hardware_disable_all();
503 mmdrop(mm); 576 mmdrop(mm);
504} 577}
@@ -527,6 +600,29 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
527 return 0; 600 return 0;
528} 601}
529 602
603#ifndef CONFIG_S390
604/*
605 * Allocation size is twice as large as the actual dirty bitmap size.
606 * This makes it possible to do double buffering: see x86's
607 * kvm_vm_ioctl_get_dirty_log().
608 */
609static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
610{
611 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
612
613 if (dirty_bytes > PAGE_SIZE)
614 memslot->dirty_bitmap = vzalloc(dirty_bytes);
615 else
616 memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
617
618 if (!memslot->dirty_bitmap)
619 return -ENOMEM;
620
621 memslot->dirty_bitmap_head = memslot->dirty_bitmap;
622 return 0;
623}
624#endif /* !CONFIG_S390 */
625
530/* 626/*
531 * Allocate some memory and give it an address in the guest physical address 627 * Allocate some memory and give it an address in the guest physical address
532 * space. 628 * space.
@@ -539,7 +635,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
539 struct kvm_userspace_memory_region *mem, 635 struct kvm_userspace_memory_region *mem,
540 int user_alloc) 636 int user_alloc)
541{ 637{
542 int r, flush_shadow = 0; 638 int r;
543 gfn_t base_gfn; 639 gfn_t base_gfn;
544 unsigned long npages; 640 unsigned long npages;
545 unsigned long i; 641 unsigned long i;
@@ -553,7 +649,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
553 goto out; 649 goto out;
554 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 650 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
555 goto out; 651 goto out;
556 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 652 /* We can read the guest memory with __xxx_user() later on. */
653 if (user_alloc &&
654 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
655 !access_ok(VERIFY_WRITE,
656 (void __user *)(unsigned long)mem->userspace_addr,
657 mem->memory_size)))
557 goto out; 658 goto out;
558 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 659 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
559 goto out; 660 goto out;
@@ -604,13 +705,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
604 /* Allocate if a slot is being created */ 705 /* Allocate if a slot is being created */
605#ifndef CONFIG_S390 706#ifndef CONFIG_S390
606 if (npages && !new.rmap) { 707 if (npages && !new.rmap) {
607 new.rmap = vmalloc(npages * sizeof(*new.rmap)); 708 new.rmap = vzalloc(npages * sizeof(*new.rmap));
608 709
609 if (!new.rmap) 710 if (!new.rmap)
610 goto out_free; 711 goto out_free;
611 712
612 memset(new.rmap, 0, npages * sizeof(*new.rmap));
613
614 new.user_alloc = user_alloc; 713 new.user_alloc = user_alloc;
615 new.userspace_addr = mem->userspace_addr; 714 new.userspace_addr = mem->userspace_addr;
616 } 715 }
@@ -633,14 +732,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
633 >> KVM_HPAGE_GFN_SHIFT(level)); 732 >> KVM_HPAGE_GFN_SHIFT(level));
634 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level); 733 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
635 734
636 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); 735 new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
637 736
638 if (!new.lpage_info[i]) 737 if (!new.lpage_info[i])
639 goto out_free; 738 goto out_free;
640 739
641 memset(new.lpage_info[i], 0,
642 lpages * sizeof(*new.lpage_info[i]));
643
644 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 740 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
645 new.lpage_info[i][0].write_count = 1; 741 new.lpage_info[i][0].write_count = 1;
646 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 742 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
@@ -661,15 +757,9 @@ skip_lpage:
661 757
662 /* Allocate page dirty bitmap if needed */ 758 /* Allocate page dirty bitmap if needed */
663 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 759 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
664 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); 760 if (kvm_create_dirty_bitmap(&new) < 0)
665
666 new.dirty_bitmap = vmalloc(dirty_bytes);
667 if (!new.dirty_bitmap)
668 goto out_free; 761 goto out_free;
669 memset(new.dirty_bitmap, 0, dirty_bytes);
670 /* destroy any largepage mappings for dirty tracking */ 762 /* destroy any largepage mappings for dirty tracking */
671 if (old.npages)
672 flush_shadow = 1;
673 } 763 }
674#else /* not defined CONFIG_S390 */ 764#else /* not defined CONFIG_S390 */
675 new.user_alloc = user_alloc; 765 new.user_alloc = user_alloc;
@@ -685,6 +775,7 @@ skip_lpage:
685 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 775 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
686 if (mem->slot >= slots->nmemslots) 776 if (mem->slot >= slots->nmemslots)
687 slots->nmemslots = mem->slot + 1; 777 slots->nmemslots = mem->slot + 1;
778 slots->generation++;
688 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 779 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
689 780
690 old_memslots = kvm->memslots; 781 old_memslots = kvm->memslots;
@@ -705,14 +796,12 @@ skip_lpage:
705 if (r) 796 if (r)
706 goto out_free; 797 goto out_free;
707 798
708#ifdef CONFIG_DMAR
709 /* map the pages in iommu page table */ 799 /* map the pages in iommu page table */
710 if (npages) { 800 if (npages) {
711 r = kvm_iommu_map_pages(kvm, &new); 801 r = kvm_iommu_map_pages(kvm, &new);
712 if (r) 802 if (r)
713 goto out_free; 803 goto out_free;
714 } 804 }
715#endif
716 805
717 r = -ENOMEM; 806 r = -ENOMEM;
718 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 807 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
@@ -721,6 +810,7 @@ skip_lpage:
721 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 810 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
722 if (mem->slot >= slots->nmemslots) 811 if (mem->slot >= slots->nmemslots)
723 slots->nmemslots = mem->slot + 1; 812 slots->nmemslots = mem->slot + 1;
813 slots->generation++;
724 814
725 /* actual memory is freed via old in kvm_free_physmem_slot below */ 815 /* actual memory is freed via old in kvm_free_physmem_slot below */
726 if (!npages) { 816 if (!npages) {
@@ -740,9 +830,6 @@ skip_lpage:
740 kvm_free_physmem_slot(&old, &new); 830 kvm_free_physmem_slot(&old, &new);
741 kfree(old_memslots); 831 kfree(old_memslots);
742 832
743 if (flush_shadow)
744 kvm_arch_flush_shadow(kvm);
745
746 return 0; 833 return 0;
747 834
748out_free: 835out_free:
@@ -851,10 +938,10 @@ int kvm_is_error_hva(unsigned long addr)
851} 938}
852EXPORT_SYMBOL_GPL(kvm_is_error_hva); 939EXPORT_SYMBOL_GPL(kvm_is_error_hva);
853 940
854struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 941static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
942 gfn_t gfn)
855{ 943{
856 int i; 944 int i;
857 struct kvm_memslots *slots = kvm_memslots(kvm);
858 945
859 for (i = 0; i < slots->nmemslots; ++i) { 946 for (i = 0; i < slots->nmemslots; ++i) {
860 struct kvm_memory_slot *memslot = &slots->memslots[i]; 947 struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -865,6 +952,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
865 } 952 }
866 return NULL; 953 return NULL;
867} 954}
955
956struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
957{
958 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
959}
868EXPORT_SYMBOL_GPL(gfn_to_memslot); 960EXPORT_SYMBOL_GPL(gfn_to_memslot);
869 961
870int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 962int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@@ -910,97 +1002,203 @@ out:
910 return size; 1002 return size;
911} 1003}
912 1004
913int memslot_id(struct kvm *kvm, gfn_t gfn) 1005static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1006 gfn_t *nr_pages)
914{ 1007{
915 int i; 1008 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
916 struct kvm_memslots *slots = kvm_memslots(kvm); 1009 return bad_hva();
917 struct kvm_memory_slot *memslot = NULL;
918 1010
919 for (i = 0; i < slots->nmemslots; ++i) { 1011 if (nr_pages)
920 memslot = &slots->memslots[i]; 1012 *nr_pages = slot->npages - (gfn - slot->base_gfn);
921 1013
922 if (gfn >= memslot->base_gfn 1014 return gfn_to_hva_memslot(slot, gfn);
923 && gfn < memslot->base_gfn + memslot->npages) 1015}
924 break;
925 }
926 1016
927 return memslot - slots->memslots; 1017unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1018{
1019 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
928} 1020}
1021EXPORT_SYMBOL_GPL(gfn_to_hva);
929 1022
930static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1023static pfn_t get_fault_pfn(void)
931{ 1024{
932 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 1025 get_page(fault_page);
1026 return fault_pfn;
933} 1027}
934 1028
935unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1029int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1030 unsigned long start, int write, struct page **page)
936{ 1031{
937 struct kvm_memory_slot *slot; 1032 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
938 1033
939 slot = gfn_to_memslot(kvm, gfn); 1034 if (write)
940 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1035 flags |= FOLL_WRITE;
941 return bad_hva(); 1036
942 return gfn_to_hva_memslot(slot, gfn); 1037 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
943} 1038}
944EXPORT_SYMBOL_GPL(gfn_to_hva);
945 1039
946static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) 1040static inline int check_user_page_hwpoison(unsigned long addr)
1041{
1042 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1043
1044 rc = __get_user_pages(current, current->mm, addr, 1,
1045 flags, NULL, NULL, NULL);
1046 return rc == -EHWPOISON;
1047}
1048
1049static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1050 bool *async, bool write_fault, bool *writable)
947{ 1051{
948 struct page *page[1]; 1052 struct page *page[1];
949 int npages; 1053 int npages = 0;
950 pfn_t pfn; 1054 pfn_t pfn;
951 1055
952 might_sleep(); 1056 /* we can do it either atomically or asynchronously, not both */
1057 BUG_ON(atomic && async);
1058
1059 BUG_ON(!write_fault && !writable);
953 1060
954 npages = get_user_pages_fast(addr, 1, 1, page); 1061 if (writable)
1062 *writable = true;
1063
1064 if (atomic || async)
1065 npages = __get_user_pages_fast(addr, 1, 1, page);
1066
1067 if (unlikely(npages != 1) && !atomic) {
1068 might_sleep();
1069
1070 if (writable)
1071 *writable = write_fault;
1072
1073 if (async) {
1074 down_read(&current->mm->mmap_sem);
1075 npages = get_user_page_nowait(current, current->mm,
1076 addr, write_fault, page);
1077 up_read(&current->mm->mmap_sem);
1078 } else
1079 npages = get_user_pages_fast(addr, 1, write_fault,
1080 page);
1081
1082 /* map read fault as writable if possible */
1083 if (unlikely(!write_fault) && npages == 1) {
1084 struct page *wpage[1];
1085
1086 npages = __get_user_pages_fast(addr, 1, 1, wpage);
1087 if (npages == 1) {
1088 *writable = true;
1089 put_page(page[0]);
1090 page[0] = wpage[0];
1091 }
1092 npages = 1;
1093 }
1094 }
955 1095
956 if (unlikely(npages != 1)) { 1096 if (unlikely(npages != 1)) {
957 struct vm_area_struct *vma; 1097 struct vm_area_struct *vma;
958 1098
1099 if (atomic)
1100 return get_fault_pfn();
1101
959 down_read(&current->mm->mmap_sem); 1102 down_read(&current->mm->mmap_sem);
960 if (is_hwpoison_address(addr)) { 1103 if (npages == -EHWPOISON ||
1104 (!async && check_user_page_hwpoison(addr))) {
961 up_read(&current->mm->mmap_sem); 1105 up_read(&current->mm->mmap_sem);
962 get_page(hwpoison_page); 1106 get_page(hwpoison_page);
963 return page_to_pfn(hwpoison_page); 1107 return page_to_pfn(hwpoison_page);
964 } 1108 }
965 1109
966 vma = find_vma(current->mm, addr); 1110 vma = find_vma_intersection(current->mm, addr, addr+1);
967 1111
968 if (vma == NULL || addr < vma->vm_start || 1112 if (vma == NULL)
969 !(vma->vm_flags & VM_PFNMAP)) { 1113 pfn = get_fault_pfn();
970 up_read(&current->mm->mmap_sem); 1114 else if ((vma->vm_flags & VM_PFNMAP)) {
971 get_page(fault_page); 1115 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
972 return page_to_pfn(fault_page); 1116 vma->vm_pgoff;
1117 BUG_ON(!kvm_is_mmio_pfn(pfn));
1118 } else {
1119 if (async && (vma->vm_flags & VM_WRITE))
1120 *async = true;
1121 pfn = get_fault_pfn();
973 } 1122 }
974
975 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
976 up_read(&current->mm->mmap_sem); 1123 up_read(&current->mm->mmap_sem);
977 BUG_ON(!kvm_is_mmio_pfn(pfn));
978 } else 1124 } else
979 pfn = page_to_pfn(page[0]); 1125 pfn = page_to_pfn(page[0]);
980 1126
981 return pfn; 1127 return pfn;
982} 1128}
983 1129
984pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1130pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
1131{
1132 return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
1133}
1134EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
1135
1136static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1137 bool write_fault, bool *writable)
985{ 1138{
986 unsigned long addr; 1139 unsigned long addr;
987 1140
1141 if (async)
1142 *async = false;
1143
988 addr = gfn_to_hva(kvm, gfn); 1144 addr = gfn_to_hva(kvm, gfn);
989 if (kvm_is_error_hva(addr)) { 1145 if (kvm_is_error_hva(addr)) {
990 get_page(bad_page); 1146 get_page(bad_page);
991 return page_to_pfn(bad_page); 1147 return page_to_pfn(bad_page);
992 } 1148 }
993 1149
994 return hva_to_pfn(kvm, addr); 1150 return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
1151}
1152
1153pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1154{
1155 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
1156}
1157EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1158
1159pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
1160 bool write_fault, bool *writable)
1161{
1162 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
1163}
1164EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
1165
1166pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1167{
1168 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
995} 1169}
996EXPORT_SYMBOL_GPL(gfn_to_pfn); 1170EXPORT_SYMBOL_GPL(gfn_to_pfn);
997 1171
1172pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1173 bool *writable)
1174{
1175 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
1176}
1177EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1178
998pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 1179pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
999 struct kvm_memory_slot *slot, gfn_t gfn) 1180 struct kvm_memory_slot *slot, gfn_t gfn)
1000{ 1181{
1001 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 1182 unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1002 return hva_to_pfn(kvm, addr); 1183 return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
1184}
1185
1186int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1187 int nr_pages)
1188{
1189 unsigned long addr;
1190 gfn_t entry;
1191
1192 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1193 if (kvm_is_error_hva(addr))
1194 return -1;
1195
1196 if (entry < nr_pages)
1197 return 0;
1198
1199 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1003} 1200}
1201EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1004 1202
1005struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1203struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1006{ 1204{
@@ -1091,7 +1289,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1091 addr = gfn_to_hva(kvm, gfn); 1289 addr = gfn_to_hva(kvm, gfn);
1092 if (kvm_is_error_hva(addr)) 1290 if (kvm_is_error_hva(addr))
1093 return -EFAULT; 1291 return -EFAULT;
1094 r = copy_from_user(data, (void __user *)addr + offset, len); 1292 r = __copy_from_user(data, (void __user *)addr + offset, len);
1095 if (r) 1293 if (r)
1096 return -EFAULT; 1294 return -EFAULT;
1097 return 0; 1295 return 0;
@@ -1175,9 +1373,51 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1175 return 0; 1373 return 0;
1176} 1374}
1177 1375
1376int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1377 gpa_t gpa)
1378{
1379 struct kvm_memslots *slots = kvm_memslots(kvm);
1380 int offset = offset_in_page(gpa);
1381 gfn_t gfn = gpa >> PAGE_SHIFT;
1382
1383 ghc->gpa = gpa;
1384 ghc->generation = slots->generation;
1385 ghc->memslot = __gfn_to_memslot(slots, gfn);
1386 ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1387 if (!kvm_is_error_hva(ghc->hva))
1388 ghc->hva += offset;
1389 else
1390 return -EFAULT;
1391
1392 return 0;
1393}
1394EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1395
1396int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1397 void *data, unsigned long len)
1398{
1399 struct kvm_memslots *slots = kvm_memslots(kvm);
1400 int r;
1401
1402 if (slots->generation != ghc->generation)
1403 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1404
1405 if (kvm_is_error_hva(ghc->hva))
1406 return -EFAULT;
1407
1408 r = copy_to_user((void __user *)ghc->hva, data, len);
1409 if (r)
1410 return -EFAULT;
1411 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1412
1413 return 0;
1414}
1415EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1416
1178int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1417int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1179{ 1418{
1180 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); 1419 return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
1420 offset, len);
1181} 1421}
1182EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1422EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1183 1423
@@ -1200,18 +1440,24 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1200} 1440}
1201EXPORT_SYMBOL_GPL(kvm_clear_guest); 1441EXPORT_SYMBOL_GPL(kvm_clear_guest);
1202 1442
1203void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1443void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1444 gfn_t gfn)
1204{ 1445{
1205 struct kvm_memory_slot *memslot;
1206
1207 memslot = gfn_to_memslot(kvm, gfn);
1208 if (memslot && memslot->dirty_bitmap) { 1446 if (memslot && memslot->dirty_bitmap) {
1209 unsigned long rel_gfn = gfn - memslot->base_gfn; 1447 unsigned long rel_gfn = gfn - memslot->base_gfn;
1210 1448
1211 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1449 __set_bit_le(rel_gfn, memslot->dirty_bitmap);
1212 } 1450 }
1213} 1451}
1214 1452
1453void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1454{
1455 struct kvm_memory_slot *memslot;
1456
1457 memslot = gfn_to_memslot(kvm, gfn);
1458 mark_page_dirty_in_slot(kvm, memslot, gfn);
1459}
1460
1215/* 1461/*
1216 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1462 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1217 */ 1463 */
@@ -1245,18 +1491,55 @@ void kvm_resched(struct kvm_vcpu *vcpu)
1245} 1491}
1246EXPORT_SYMBOL_GPL(kvm_resched); 1492EXPORT_SYMBOL_GPL(kvm_resched);
1247 1493
1248void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) 1494void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1249{ 1495{
1250 ktime_t expires; 1496 struct kvm *kvm = me->kvm;
1251 DEFINE_WAIT(wait); 1497 struct kvm_vcpu *vcpu;
1252 1498 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
1253 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1499 int yielded = 0;
1254 1500 int pass;
1255 /* Sleep for 100 us, and hope lock-holder got scheduled */ 1501 int i;
1256 expires = ktime_add_ns(ktime_get(), 100000UL);
1257 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1258 1502
1259 finish_wait(&vcpu->wq, &wait); 1503 /*
1504 * We boost the priority of a VCPU that is runnable but not
1505 * currently running, because it got preempted by something
1506 * else and called schedule in __vcpu_run. Hopefully that
1507 * VCPU is holding the lock that we need and will release it.
1508 * We approximate round-robin by starting at the last boosted VCPU.
1509 */
1510 for (pass = 0; pass < 2 && !yielded; pass++) {
1511 kvm_for_each_vcpu(i, vcpu, kvm) {
1512 struct task_struct *task = NULL;
1513 struct pid *pid;
1514 if (!pass && i < last_boosted_vcpu) {
1515 i = last_boosted_vcpu;
1516 continue;
1517 } else if (pass && i > last_boosted_vcpu)
1518 break;
1519 if (vcpu == me)
1520 continue;
1521 if (waitqueue_active(&vcpu->wq))
1522 continue;
1523 rcu_read_lock();
1524 pid = rcu_dereference(vcpu->pid);
1525 if (pid)
1526 task = get_pid_task(vcpu->pid, PIDTYPE_PID);
1527 rcu_read_unlock();
1528 if (!task)
1529 continue;
1530 if (task->flags & PF_VCPU) {
1531 put_task_struct(task);
1532 continue;
1533 }
1534 if (yield_to(task, 1)) {
1535 put_task_struct(task);
1536 kvm->last_boosted_vcpu = i;
1537 yielded = 1;
1538 break;
1539 }
1540 put_task_struct(task);
1541 }
1542 }
1260} 1543}
1261EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1544EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1262 1545
@@ -1305,6 +1588,7 @@ static struct file_operations kvm_vcpu_fops = {
1305 .unlocked_ioctl = kvm_vcpu_ioctl, 1588 .unlocked_ioctl = kvm_vcpu_ioctl,
1306 .compat_ioctl = kvm_vcpu_ioctl, 1589 .compat_ioctl = kvm_vcpu_ioctl,
1307 .mmap = kvm_vcpu_mmap, 1590 .mmap = kvm_vcpu_mmap,
1591 .llseek = noop_llseek,
1308}; 1592};
1309 1593
1310/* 1594/*
@@ -1412,6 +1696,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
1412 if (arg) 1696 if (arg)
1413 goto out; 1697 goto out;
1414 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1698 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1699 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
1415 break; 1700 break;
1416 case KVM_GET_REGS: { 1701 case KVM_GET_REGS: {
1417 struct kvm_regs *kvm_regs; 1702 struct kvm_regs *kvm_regs;
@@ -1774,11 +2059,12 @@ static struct file_operations kvm_vm_fops = {
1774 .compat_ioctl = kvm_vm_compat_ioctl, 2059 .compat_ioctl = kvm_vm_compat_ioctl,
1775#endif 2060#endif
1776 .mmap = kvm_vm_mmap, 2061 .mmap = kvm_vm_mmap,
2062 .llseek = noop_llseek,
1777}; 2063};
1778 2064
1779static int kvm_dev_ioctl_create_vm(void) 2065static int kvm_dev_ioctl_create_vm(void)
1780{ 2066{
1781 int fd, r; 2067 int r;
1782 struct kvm *kvm; 2068 struct kvm *kvm;
1783 2069
1784 kvm = kvm_create_vm(); 2070 kvm = kvm_create_vm();
@@ -1791,11 +2077,11 @@ static int kvm_dev_ioctl_create_vm(void)
1791 return r; 2077 return r;
1792 } 2078 }
1793#endif 2079#endif
1794 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 2080 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
1795 if (fd < 0) 2081 if (r < 0)
1796 kvm_put_kvm(kvm); 2082 kvm_put_kvm(kvm);
1797 2083
1798 return fd; 2084 return r;
1799} 2085}
1800 2086
1801static long kvm_dev_ioctl_check_extension_generic(long arg) 2087static long kvm_dev_ioctl_check_extension_generic(long arg)
@@ -1867,6 +2153,7 @@ out:
1867static struct file_operations kvm_chardev_ops = { 2153static struct file_operations kvm_chardev_ops = {
1868 .unlocked_ioctl = kvm_dev_ioctl, 2154 .unlocked_ioctl = kvm_dev_ioctl,
1869 .compat_ioctl = kvm_dev_ioctl, 2155 .compat_ioctl = kvm_dev_ioctl,
2156 .llseek = noop_llseek,
1870}; 2157};
1871 2158
1872static struct miscdevice kvm_dev = { 2159static struct miscdevice kvm_dev = {
@@ -1875,7 +2162,7 @@ static struct miscdevice kvm_dev = {
1875 &kvm_chardev_ops, 2162 &kvm_chardev_ops,
1876}; 2163};
1877 2164
1878static void hardware_enable(void *junk) 2165static void hardware_enable_nolock(void *junk)
1879{ 2166{
1880 int cpu = raw_smp_processor_id(); 2167 int cpu = raw_smp_processor_id();
1881 int r; 2168 int r;
@@ -1895,7 +2182,14 @@ static void hardware_enable(void *junk)
1895 } 2182 }
1896} 2183}
1897 2184
1898static void hardware_disable(void *junk) 2185static void hardware_enable(void *junk)
2186{
2187 raw_spin_lock(&kvm_lock);
2188 hardware_enable_nolock(junk);
2189 raw_spin_unlock(&kvm_lock);
2190}
2191
2192static void hardware_disable_nolock(void *junk)
1899{ 2193{
1900 int cpu = raw_smp_processor_id(); 2194 int cpu = raw_smp_processor_id();
1901 2195
@@ -1905,32 +2199,39 @@ static void hardware_disable(void *junk)
1905 kvm_arch_hardware_disable(NULL); 2199 kvm_arch_hardware_disable(NULL);
1906} 2200}
1907 2201
2202static void hardware_disable(void *junk)
2203{
2204 raw_spin_lock(&kvm_lock);
2205 hardware_disable_nolock(junk);
2206 raw_spin_unlock(&kvm_lock);
2207}
2208
1908static void hardware_disable_all_nolock(void) 2209static void hardware_disable_all_nolock(void)
1909{ 2210{
1910 BUG_ON(!kvm_usage_count); 2211 BUG_ON(!kvm_usage_count);
1911 2212
1912 kvm_usage_count--; 2213 kvm_usage_count--;
1913 if (!kvm_usage_count) 2214 if (!kvm_usage_count)
1914 on_each_cpu(hardware_disable, NULL, 1); 2215 on_each_cpu(hardware_disable_nolock, NULL, 1);
1915} 2216}
1916 2217
1917static void hardware_disable_all(void) 2218static void hardware_disable_all(void)
1918{ 2219{
1919 spin_lock(&kvm_lock); 2220 raw_spin_lock(&kvm_lock);
1920 hardware_disable_all_nolock(); 2221 hardware_disable_all_nolock();
1921 spin_unlock(&kvm_lock); 2222 raw_spin_unlock(&kvm_lock);
1922} 2223}
1923 2224
1924static int hardware_enable_all(void) 2225static int hardware_enable_all(void)
1925{ 2226{
1926 int r = 0; 2227 int r = 0;
1927 2228
1928 spin_lock(&kvm_lock); 2229 raw_spin_lock(&kvm_lock);
1929 2230
1930 kvm_usage_count++; 2231 kvm_usage_count++;
1931 if (kvm_usage_count == 1) { 2232 if (kvm_usage_count == 1) {
1932 atomic_set(&hardware_enable_failed, 0); 2233 atomic_set(&hardware_enable_failed, 0);
1933 on_each_cpu(hardware_enable, NULL, 1); 2234 on_each_cpu(hardware_enable_nolock, NULL, 1);
1934 2235
1935 if (atomic_read(&hardware_enable_failed)) { 2236 if (atomic_read(&hardware_enable_failed)) {
1936 hardware_disable_all_nolock(); 2237 hardware_disable_all_nolock();
@@ -1938,7 +2239,7 @@ static int hardware_enable_all(void)
1938 } 2239 }
1939 } 2240 }
1940 2241
1941 spin_unlock(&kvm_lock); 2242 raw_spin_unlock(&kvm_lock);
1942 2243
1943 return r; 2244 return r;
1944} 2245}
@@ -1968,18 +2269,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1968} 2269}
1969 2270
1970 2271
1971asmlinkage void kvm_handle_fault_on_reboot(void) 2272asmlinkage void kvm_spurious_fault(void)
1972{ 2273{
1973 if (kvm_rebooting) {
1974 /* spin while reset goes on */
1975 local_irq_enable();
1976 while (true)
1977 ;
1978 }
1979 /* Fault while not rebooting. We want the trace. */ 2274 /* Fault while not rebooting. We want the trace. */
1980 BUG(); 2275 BUG();
1981} 2276}
1982EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); 2277EXPORT_SYMBOL_GPL(kvm_spurious_fault);
1983 2278
1984static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 2279static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
1985 void *v) 2280 void *v)
@@ -1992,7 +2287,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
1992 */ 2287 */
1993 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2288 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1994 kvm_rebooting = true; 2289 kvm_rebooting = true;
1995 on_each_cpu(hardware_disable, NULL, 1); 2290 on_each_cpu(hardware_disable_nolock, NULL, 1);
1996 return NOTIFY_OK; 2291 return NOTIFY_OK;
1997} 2292}
1998 2293
@@ -2106,10 +2401,10 @@ static int vm_stat_get(void *_offset, u64 *val)
2106 struct kvm *kvm; 2401 struct kvm *kvm;
2107 2402
2108 *val = 0; 2403 *val = 0;
2109 spin_lock(&kvm_lock); 2404 raw_spin_lock(&kvm_lock);
2110 list_for_each_entry(kvm, &vm_list, vm_list) 2405 list_for_each_entry(kvm, &vm_list, vm_list)
2111 *val += *(u32 *)((void *)kvm + offset); 2406 *val += *(u32 *)((void *)kvm + offset);
2112 spin_unlock(&kvm_lock); 2407 raw_spin_unlock(&kvm_lock);
2113 return 0; 2408 return 0;
2114} 2409}
2115 2410
@@ -2123,12 +2418,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
2123 int i; 2418 int i;
2124 2419
2125 *val = 0; 2420 *val = 0;
2126 spin_lock(&kvm_lock); 2421 raw_spin_lock(&kvm_lock);
2127 list_for_each_entry(kvm, &vm_list, vm_list) 2422 list_for_each_entry(kvm, &vm_list, vm_list)
2128 kvm_for_each_vcpu(i, vcpu, kvm) 2423 kvm_for_each_vcpu(i, vcpu, kvm)
2129 *val += *(u32 *)((void *)vcpu + offset); 2424 *val += *(u32 *)((void *)vcpu + offset);
2130 2425
2131 spin_unlock(&kvm_lock); 2426 raw_spin_unlock(&kvm_lock);
2132 return 0; 2427 return 0;
2133} 2428}
2134 2429
@@ -2159,31 +2454,26 @@ static void kvm_exit_debug(void)
2159 debugfs_remove(kvm_debugfs_dir); 2454 debugfs_remove(kvm_debugfs_dir);
2160} 2455}
2161 2456
2162static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2457static int kvm_suspend(void)
2163{ 2458{
2164 if (kvm_usage_count) 2459 if (kvm_usage_count)
2165 hardware_disable(NULL); 2460 hardware_disable_nolock(NULL);
2166 return 0; 2461 return 0;
2167} 2462}
2168 2463
2169static int kvm_resume(struct sys_device *dev) 2464static void kvm_resume(void)
2170{ 2465{
2171 if (kvm_usage_count) 2466 if (kvm_usage_count) {
2172 hardware_enable(NULL); 2467 WARN_ON(raw_spin_is_locked(&kvm_lock));
2173 return 0; 2468 hardware_enable_nolock(NULL);
2469 }
2174} 2470}
2175 2471
2176static struct sysdev_class kvm_sysdev_class = { 2472static struct syscore_ops kvm_syscore_ops = {
2177 .name = "kvm",
2178 .suspend = kvm_suspend, 2473 .suspend = kvm_suspend,
2179 .resume = kvm_resume, 2474 .resume = kvm_resume,
2180}; 2475};
2181 2476
2182static struct sys_device kvm_sysdev = {
2183 .id = 0,
2184 .cls = &kvm_sysdev_class,
2185};
2186
2187struct page *bad_page; 2477struct page *bad_page;
2188pfn_t bad_pfn; 2478pfn_t bad_pfn;
2189 2479
@@ -2267,14 +2557,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2267 goto out_free_2; 2557 goto out_free_2;
2268 register_reboot_notifier(&kvm_reboot_notifier); 2558 register_reboot_notifier(&kvm_reboot_notifier);
2269 2559
2270 r = sysdev_class_register(&kvm_sysdev_class);
2271 if (r)
2272 goto out_free_3;
2273
2274 r = sysdev_register(&kvm_sysdev);
2275 if (r)
2276 goto out_free_4;
2277
2278 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2560 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2279 if (!vcpu_align) 2561 if (!vcpu_align)
2280 vcpu_align = __alignof__(struct kvm_vcpu); 2562 vcpu_align = __alignof__(struct kvm_vcpu);
@@ -2282,9 +2564,13 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2282 0, NULL); 2564 0, NULL);
2283 if (!kvm_vcpu_cache) { 2565 if (!kvm_vcpu_cache) {
2284 r = -ENOMEM; 2566 r = -ENOMEM;
2285 goto out_free_5; 2567 goto out_free_3;
2286 } 2568 }
2287 2569
2570 r = kvm_async_pf_init();
2571 if (r)
2572 goto out_free;
2573
2288 kvm_chardev_ops.owner = module; 2574 kvm_chardev_ops.owner = module;
2289 kvm_vm_fops.owner = module; 2575 kvm_vm_fops.owner = module;
2290 kvm_vcpu_fops.owner = module; 2576 kvm_vcpu_fops.owner = module;
@@ -2292,9 +2578,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2292 r = misc_register(&kvm_dev); 2578 r = misc_register(&kvm_dev);
2293 if (r) { 2579 if (r) {
2294 printk(KERN_ERR "kvm: misc device register failed\n"); 2580 printk(KERN_ERR "kvm: misc device register failed\n");
2295 goto out_free; 2581 goto out_unreg;
2296 } 2582 }
2297 2583
2584 register_syscore_ops(&kvm_syscore_ops);
2585
2298 kvm_preempt_ops.sched_in = kvm_sched_in; 2586 kvm_preempt_ops.sched_in = kvm_sched_in;
2299 kvm_preempt_ops.sched_out = kvm_sched_out; 2587 kvm_preempt_ops.sched_out = kvm_sched_out;
2300 2588
@@ -2302,12 +2590,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2302 2590
2303 return 0; 2591 return 0;
2304 2592
2593out_unreg:
2594 kvm_async_pf_deinit();
2305out_free: 2595out_free:
2306 kmem_cache_destroy(kvm_vcpu_cache); 2596 kmem_cache_destroy(kvm_vcpu_cache);
2307out_free_5:
2308 sysdev_unregister(&kvm_sysdev);
2309out_free_4:
2310 sysdev_class_unregister(&kvm_sysdev_class);
2311out_free_3: 2597out_free_3:
2312 unregister_reboot_notifier(&kvm_reboot_notifier); 2598 unregister_reboot_notifier(&kvm_reboot_notifier);
2313 unregister_cpu_notifier(&kvm_cpu_notifier); 2599 unregister_cpu_notifier(&kvm_cpu_notifier);
@@ -2334,11 +2620,11 @@ void kvm_exit(void)
2334 kvm_exit_debug(); 2620 kvm_exit_debug();
2335 misc_deregister(&kvm_dev); 2621 misc_deregister(&kvm_dev);
2336 kmem_cache_destroy(kvm_vcpu_cache); 2622 kmem_cache_destroy(kvm_vcpu_cache);
2337 sysdev_unregister(&kvm_sysdev); 2623 kvm_async_pf_deinit();
2338 sysdev_class_unregister(&kvm_sysdev_class); 2624 unregister_syscore_ops(&kvm_syscore_ops);
2339 unregister_reboot_notifier(&kvm_reboot_notifier); 2625 unregister_reboot_notifier(&kvm_reboot_notifier);
2340 unregister_cpu_notifier(&kvm_cpu_notifier); 2626 unregister_cpu_notifier(&kvm_cpu_notifier);
2341 on_each_cpu(hardware_disable, NULL, 1); 2627 on_each_cpu(hardware_disable_nolock, NULL, 1);
2342 kvm_arch_hardware_unsetup(); 2628 kvm_arch_hardware_unsetup();
2343 kvm_arch_exit(); 2629 kvm_arch_exit();
2344 free_cpumask_var(cpus_hardware_enabled); 2630 free_cpumask_var(cpus_hardware_enabled);