aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c61
1 files changed, 36 insertions, 25 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 887f3b0c2b60..c6a91b044d8d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1855,8 +1855,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn)
1855 if (!kvm_is_reserved_pfn(pfn)) { 1855 if (!kvm_is_reserved_pfn(pfn)) {
1856 struct page *page = pfn_to_page(pfn); 1856 struct page *page = pfn_to_page(pfn);
1857 1857
1858 if (!PageReserved(page)) 1858 SetPageDirty(page);
1859 SetPageDirty(page);
1860 } 1859 }
1861} 1860}
1862EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1861EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
@@ -2477,6 +2476,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2477#endif 2476#endif
2478} 2477}
2479 2478
2479/*
2480 * Unlike kvm_arch_vcpu_runnable, this function is called outside
2481 * a vcpu_load/vcpu_put pair. However, for most architectures
2482 * kvm_arch_vcpu_runnable does not require vcpu_load.
2483 */
2484bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
2485{
2486 return kvm_arch_vcpu_runnable(vcpu);
2487}
2488
2489static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
2490{
2491 if (kvm_arch_dy_runnable(vcpu))
2492 return true;
2493
2494#ifdef CONFIG_KVM_ASYNC_PF
2495 if (!list_empty_careful(&vcpu->async_pf.done))
2496 return true;
2497#endif
2498
2499 return false;
2500}
2501
2480void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 2502void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2481{ 2503{
2482 struct kvm *kvm = me->kvm; 2504 struct kvm *kvm = me->kvm;
@@ -2506,9 +2528,10 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2506 continue; 2528 continue;
2507 if (vcpu == me) 2529 if (vcpu == me)
2508 continue; 2530 continue;
2509 if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 2531 if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
2510 continue; 2532 continue;
2511 if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu)) 2533 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
2534 !kvm_arch_vcpu_in_kernel(vcpu))
2512 continue; 2535 continue;
2513 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 2536 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
2514 continue; 2537 continue;
@@ -2591,30 +2614,20 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2591 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2614 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
2592} 2615}
2593 2616
2594static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 2617static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2595{ 2618{
2619#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
2596 char dir_name[ITOA_MAX_LEN * 2]; 2620 char dir_name[ITOA_MAX_LEN * 2];
2597 int ret;
2598
2599 if (!kvm_arch_has_vcpu_debugfs())
2600 return 0;
2601 2621
2602 if (!debugfs_initialized()) 2622 if (!debugfs_initialized())
2603 return 0; 2623 return;
2604 2624
2605 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 2625 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
2606 vcpu->debugfs_dentry = debugfs_create_dir(dir_name, 2626 vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
2607 vcpu->kvm->debugfs_dentry); 2627 vcpu->kvm->debugfs_dentry);
2608 if (!vcpu->debugfs_dentry)
2609 return -ENOMEM;
2610
2611 ret = kvm_arch_create_vcpu_debugfs(vcpu);
2612 if (ret < 0) {
2613 debugfs_remove_recursive(vcpu->debugfs_dentry);
2614 return ret;
2615 }
2616 2628
2617 return 0; 2629 kvm_arch_create_vcpu_debugfs(vcpu);
2630#endif
2618} 2631}
2619 2632
2620/* 2633/*
@@ -2649,9 +2662,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
2649 if (r) 2662 if (r)
2650 goto vcpu_destroy; 2663 goto vcpu_destroy;
2651 2664
2652 r = kvm_create_vcpu_debugfs(vcpu); 2665 kvm_create_vcpu_debugfs(vcpu);
2653 if (r)
2654 goto vcpu_destroy;
2655 2666
2656 mutex_lock(&kvm->lock); 2667 mutex_lock(&kvm->lock);
2657 if (kvm_get_vcpu_by_id(kvm, id)) { 2668 if (kvm_get_vcpu_by_id(kvm, id)) {
@@ -4205,7 +4216,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
4205{ 4216{
4206 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 4217 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4207 4218
4208 vcpu->preempted = false; 4219 WRITE_ONCE(vcpu->preempted, false);
4209 WRITE_ONCE(vcpu->ready, false); 4220 WRITE_ONCE(vcpu->ready, false);
4210 4221
4211 kvm_arch_sched_in(vcpu, cpu); 4222 kvm_arch_sched_in(vcpu, cpu);
@@ -4219,7 +4230,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
4219 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 4230 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4220 4231
4221 if (current->state == TASK_RUNNING) { 4232 if (current->state == TASK_RUNNING) {
4222 vcpu->preempted = true; 4233 WRITE_ONCE(vcpu->preempted, true);
4223 WRITE_ONCE(vcpu->ready, true); 4234 WRITE_ONCE(vcpu->ready, true);
4224 } 4235 }
4225 kvm_arch_vcpu_put(vcpu); 4236 kvm_arch_vcpu_put(vcpu);