aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/kvm_main.c60
2 files changed, 32 insertions, 32 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 3656849f78a0..73358d256fa2 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -90,7 +90,7 @@ irqfd_shutdown(struct work_struct *work)
90 * We know no new events will be scheduled at this point, so block 90 * We know no new events will be scheduled at this point, so block
91 * until all previously outstanding events have completed 91 * until all previously outstanding events have completed
92 */ 92 */
93 flush_work(&irqfd->inject); 93 flush_work_sync(&irqfd->inject);
94 94
95 /* 95 /*
96 * It is now safe to release the object's resources 96 * It is now safe to release the object's resources
@@ -578,7 +578,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
578 578
579 mutex_lock(&kvm->slots_lock); 579 mutex_lock(&kvm->slots_lock);
580 580
581 /* Verify that there isnt a match already */ 581 /* Verify that there isn't a match already */
582 if (ioeventfd_check_collision(kvm, p)) { 582 if (ioeventfd_check_collision(kvm, p)) {
583 ret = -EEXIST; 583 ret = -EEXIST;
584 goto unlock_fail; 584 goto unlock_fail;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1fa0d292119a..6330653480e4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -30,7 +30,7 @@
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/file.h> 32#include <linux/file.h>
33#include <linux/sysdev.h> 33#include <linux/syscore_ops.h>
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/cpumask.h> 36#include <linux/cpumask.h>
@@ -52,7 +52,6 @@
52#include <asm/io.h> 52#include <asm/io.h>
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
54#include <asm/pgtable.h> 54#include <asm/pgtable.h>
55#include <asm-generic/bitops/le.h>
56 55
57#include "coalesced_mmio.h" 56#include "coalesced_mmio.h"
58#include "async_pf.h" 57#include "async_pf.h"
@@ -1038,6 +1037,17 @@ static pfn_t get_fault_pfn(void)
1038 return fault_pfn; 1037 return fault_pfn;
1039} 1038}
1040 1039
1040int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1041 unsigned long start, int write, struct page **page)
1042{
1043 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
1044
1045 if (write)
1046 flags |= FOLL_WRITE;
1047
1048 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
1049}
1050
1041static inline int check_user_page_hwpoison(unsigned long addr) 1051static inline int check_user_page_hwpoison(unsigned long addr)
1042{ 1052{
1043 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1053 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
@@ -1071,7 +1081,14 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1071 if (writable) 1081 if (writable)
1072 *writable = write_fault; 1082 *writable = write_fault;
1073 1083
1074 npages = get_user_pages_fast(addr, 1, write_fault, page); 1084 if (async) {
1085 down_read(&current->mm->mmap_sem);
1086 npages = get_user_page_nowait(current, current->mm,
1087 addr, write_fault, page);
1088 up_read(&current->mm->mmap_sem);
1089 } else
1090 npages = get_user_pages_fast(addr, 1, write_fault,
1091 page);
1075 1092
1076 /* map read fault as writable if possible */ 1093 /* map read fault as writable if possible */
1077 if (unlikely(!write_fault) && npages == 1) { 1094 if (unlikely(!write_fault) && npages == 1) {
@@ -1094,7 +1111,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1094 return get_fault_pfn(); 1111 return get_fault_pfn();
1095 1112
1096 down_read(&current->mm->mmap_sem); 1113 down_read(&current->mm->mmap_sem);
1097 if (check_user_page_hwpoison(addr)) { 1114 if (npages == -EHWPOISON ||
1115 (!async && check_user_page_hwpoison(addr))) {
1098 up_read(&current->mm->mmap_sem); 1116 up_read(&current->mm->mmap_sem);
1099 get_page(hwpoison_page); 1117 get_page(hwpoison_page);
1100 return page_to_pfn(hwpoison_page); 1118 return page_to_pfn(hwpoison_page);
@@ -1439,7 +1457,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1439 if (memslot && memslot->dirty_bitmap) { 1457 if (memslot && memslot->dirty_bitmap) {
1440 unsigned long rel_gfn = gfn - memslot->base_gfn; 1458 unsigned long rel_gfn = gfn - memslot->base_gfn;
1441 1459
1442 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1460 __set_bit_le(rel_gfn, memslot->dirty_bitmap);
1443 } 1461 }
1444} 1462}
1445 1463
@@ -2447,33 +2465,26 @@ static void kvm_exit_debug(void)
2447 debugfs_remove(kvm_debugfs_dir); 2465 debugfs_remove(kvm_debugfs_dir);
2448} 2466}
2449 2467
2450static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2468static int kvm_suspend(void)
2451{ 2469{
2452 if (kvm_usage_count) 2470 if (kvm_usage_count)
2453 hardware_disable_nolock(NULL); 2471 hardware_disable_nolock(NULL);
2454 return 0; 2472 return 0;
2455} 2473}
2456 2474
2457static int kvm_resume(struct sys_device *dev) 2475static void kvm_resume(void)
2458{ 2476{
2459 if (kvm_usage_count) { 2477 if (kvm_usage_count) {
2460 WARN_ON(raw_spin_is_locked(&kvm_lock)); 2478 WARN_ON(raw_spin_is_locked(&kvm_lock));
2461 hardware_enable_nolock(NULL); 2479 hardware_enable_nolock(NULL);
2462 } 2480 }
2463 return 0;
2464} 2481}
2465 2482
2466static struct sysdev_class kvm_sysdev_class = { 2483static struct syscore_ops kvm_syscore_ops = {
2467 .name = "kvm",
2468 .suspend = kvm_suspend, 2484 .suspend = kvm_suspend,
2469 .resume = kvm_resume, 2485 .resume = kvm_resume,
2470}; 2486};
2471 2487
2472static struct sys_device kvm_sysdev = {
2473 .id = 0,
2474 .cls = &kvm_sysdev_class,
2475};
2476
2477struct page *bad_page; 2488struct page *bad_page;
2478pfn_t bad_pfn; 2489pfn_t bad_pfn;
2479 2490
@@ -2557,14 +2568,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2557 goto out_free_2; 2568 goto out_free_2;
2558 register_reboot_notifier(&kvm_reboot_notifier); 2569 register_reboot_notifier(&kvm_reboot_notifier);
2559 2570
2560 r = sysdev_class_register(&kvm_sysdev_class);
2561 if (r)
2562 goto out_free_3;
2563
2564 r = sysdev_register(&kvm_sysdev);
2565 if (r)
2566 goto out_free_4;
2567
2568 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2571 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2569 if (!vcpu_align) 2572 if (!vcpu_align)
2570 vcpu_align = __alignof__(struct kvm_vcpu); 2573 vcpu_align = __alignof__(struct kvm_vcpu);
@@ -2572,7 +2575,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2572 0, NULL); 2575 0, NULL);
2573 if (!kvm_vcpu_cache) { 2576 if (!kvm_vcpu_cache) {
2574 r = -ENOMEM; 2577 r = -ENOMEM;
2575 goto out_free_5; 2578 goto out_free_3;
2576 } 2579 }
2577 2580
2578 r = kvm_async_pf_init(); 2581 r = kvm_async_pf_init();
@@ -2589,6 +2592,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2589 goto out_unreg; 2592 goto out_unreg;
2590 } 2593 }
2591 2594
2595 register_syscore_ops(&kvm_syscore_ops);
2596
2592 kvm_preempt_ops.sched_in = kvm_sched_in; 2597 kvm_preempt_ops.sched_in = kvm_sched_in;
2593 kvm_preempt_ops.sched_out = kvm_sched_out; 2598 kvm_preempt_ops.sched_out = kvm_sched_out;
2594 2599
@@ -2600,10 +2605,6 @@ out_unreg:
2600 kvm_async_pf_deinit(); 2605 kvm_async_pf_deinit();
2601out_free: 2606out_free:
2602 kmem_cache_destroy(kvm_vcpu_cache); 2607 kmem_cache_destroy(kvm_vcpu_cache);
2603out_free_5:
2604 sysdev_unregister(&kvm_sysdev);
2605out_free_4:
2606 sysdev_class_unregister(&kvm_sysdev_class);
2607out_free_3: 2608out_free_3:
2608 unregister_reboot_notifier(&kvm_reboot_notifier); 2609 unregister_reboot_notifier(&kvm_reboot_notifier);
2609 unregister_cpu_notifier(&kvm_cpu_notifier); 2610 unregister_cpu_notifier(&kvm_cpu_notifier);
@@ -2631,8 +2632,7 @@ void kvm_exit(void)
2631 misc_deregister(&kvm_dev); 2632 misc_deregister(&kvm_dev);
2632 kmem_cache_destroy(kvm_vcpu_cache); 2633 kmem_cache_destroy(kvm_vcpu_cache);
2633 kvm_async_pf_deinit(); 2634 kvm_async_pf_deinit();
2634 sysdev_unregister(&kvm_sysdev); 2635 unregister_syscore_ops(&kvm_syscore_ops);
2635 sysdev_class_unregister(&kvm_sysdev_class);
2636 unregister_reboot_notifier(&kvm_reboot_notifier); 2636 unregister_reboot_notifier(&kvm_reboot_notifier);
2637 unregister_cpu_notifier(&kvm_cpu_notifier); 2637 unregister_cpu_notifier(&kvm_cpu_notifier);
2638 on_each_cpu(hardware_disable_nolock, NULL, 1); 2638 on_each_cpu(hardware_disable_nolock, NULL, 1);