aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/timer.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c27
-rw-r--r--include/linux/kvm_host.h15
-rw-r--r--virt/kvm/kvm_main.c4
8 files changed, 37 insertions, 23 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 49573c78c24b..77d8c0f4817d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -534,7 +534,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write)
534 struct kvm_vcpu *vcpu = apic->vcpu; 534 struct kvm_vcpu *vcpu = apic->vcpu;
535 struct kvm_run *run = vcpu->run; 535 struct kvm_run *run = vcpu->run;
536 536
537 set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests); 537 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
538 run->tpr_access.rip = kvm_rip_read(vcpu); 538 run->tpr_access.rip = kvm_rip_read(vcpu);
539 run->tpr_access.is_write = write; 539 run->tpr_access.is_write = write;
540} 540}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c5501bc10106..690a7fc58c17 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1378,7 +1378,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1378 1378
1379 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 1379 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1380 if (sp->unsync_children) { 1380 if (sp->unsync_children) {
1381 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); 1381 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1382 kvm_mmu_mark_parents_unsync(sp); 1382 kvm_mmu_mark_parents_unsync(sp);
1383 } else if (sp->unsync) 1383 } else if (sp->unsync)
1384 kvm_mmu_mark_parents_unsync(sp); 1384 kvm_mmu_mark_parents_unsync(sp);
@@ -2131,7 +2131,7 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2131 int ret = 0; 2131 int ret = 0;
2132 2132
2133 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { 2133 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2134 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); 2134 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2135 ret = 1; 2135 ret = 1;
2136 } 2136 }
2137 2137
@@ -2329,7 +2329,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2329void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) 2329void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2330{ 2330{
2331 ++vcpu->stat.tlb_flush; 2331 ++vcpu->stat.tlb_flush;
2332 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); 2332 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2333} 2333}
2334 2334
2335static void paging_new_cr3(struct kvm_vcpu *vcpu) 2335static void paging_new_cr3(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f7a6fdcf8ef3..587b99d37d44 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1494,7 +1494,7 @@ static void svm_handle_mce(struct vcpu_svm *svm)
1494 */ 1494 */
1495 pr_err("KVM: Guest triggered AMD Erratum 383\n"); 1495 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1496 1496
1497 set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests); 1497 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1498 1498
1499 return; 1499 return;
1500 } 1500 }
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index 564548fbb3d6..e16a0dbe74d8 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -32,7 +32,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
32 if (ktimer->reinject || !atomic_read(&ktimer->pending)) { 32 if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
33 atomic_inc(&ktimer->pending); 33 atomic_inc(&ktimer->pending);
34 /* FIXME: this code should not know anything about vcpus */ 34 /* FIXME: this code should not know anything about vcpus */
35 set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); 35 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
36 } 36 }
37 37
38 if (waitqueue_active(q)) 38 if (waitqueue_active(q))
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 345a35470511..661c6e199b4a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -899,7 +899,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
899 unsigned long sysenter_esp; 899 unsigned long sysenter_esp;
900 900
901 kvm_migrate_timers(vcpu); 901 kvm_migrate_timers(vcpu);
902 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); 902 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
903 local_irq_disable(); 903 local_irq_disable();
904 list_add(&vmx->local_vcpus_link, 904 list_add(&vmx->local_vcpus_link,
905 &per_cpu(vcpus_on_cpu, cpu)); 905 &per_cpu(vcpus_on_cpu, cpu));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9be6e4e5e8ee..7ef44107a14a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -296,7 +296,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
296 prev_nr = vcpu->arch.exception.nr; 296 prev_nr = vcpu->arch.exception.nr;
297 if (prev_nr == DF_VECTOR) { 297 if (prev_nr == DF_VECTOR) {
298 /* triple fault -> shutdown */ 298 /* triple fault -> shutdown */
299 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); 299 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
300 return; 300 return;
301 } 301 }
302 class1 = exception_class(prev_nr); 302 class1 = exception_class(prev_nr);
@@ -948,7 +948,7 @@ static int kvm_request_guest_time_update(struct kvm_vcpu *v)
948 948
949 if (!vcpu->time_page) 949 if (!vcpu->time_page)
950 return 0; 950 return 0;
951 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests); 951 kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
952 return 1; 952 return 1;
953} 953}
954 954
@@ -2253,7 +2253,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2253 printk(KERN_DEBUG "kvm: set_mce: " 2253 printk(KERN_DEBUG "kvm: set_mce: "
2254 "injects mce exception while " 2254 "injects mce exception while "
2255 "previous one is in progress!\n"); 2255 "previous one is in progress!\n");
2256 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); 2256 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2257 return 0; 2257 return 0;
2258 } 2258 }
2259 if (banks[1] & MCI_STATUS_VAL) 2259 if (banks[1] & MCI_STATUS_VAL)
@@ -4617,7 +4617,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4617 vcpu->run->request_interrupt_window; 4617 vcpu->run->request_interrupt_window;
4618 4618
4619 if (vcpu->requests) 4619 if (vcpu->requests)
4620 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 4620 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
4621 kvm_mmu_unload(vcpu); 4621 kvm_mmu_unload(vcpu);
4622 4622
4623 r = kvm_mmu_reload(vcpu); 4623 r = kvm_mmu_reload(vcpu);
@@ -4625,26 +4625,25 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4625 goto out; 4625 goto out;
4626 4626
4627 if (vcpu->requests) { 4627 if (vcpu->requests) {
4628 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) 4628 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
4629 __kvm_migrate_timers(vcpu); 4629 __kvm_migrate_timers(vcpu);
4630 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests)) 4630 if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu))
4631 kvm_write_guest_time(vcpu); 4631 kvm_write_guest_time(vcpu);
4632 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) 4632 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
4633 kvm_mmu_sync_roots(vcpu); 4633 kvm_mmu_sync_roots(vcpu);
4634 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) 4634 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
4635 kvm_x86_ops->tlb_flush(vcpu); 4635 kvm_x86_ops->tlb_flush(vcpu);
4636 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, 4636 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
4637 &vcpu->requests)) {
4638 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 4637 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
4639 r = 0; 4638 r = 0;
4640 goto out; 4639 goto out;
4641 } 4640 }
4642 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) { 4641 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
4643 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 4642 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4644 r = 0; 4643 r = 0;
4645 goto out; 4644 goto out;
4646 } 4645 }
4647 if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) { 4646 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
4648 vcpu->fpu_active = 0; 4647 vcpu->fpu_active = 0;
4649 kvm_x86_ops->fpu_deactivate(vcpu); 4648 kvm_x86_ops->fpu_deactivate(vcpu);
4650 } 4649 }
@@ -4773,7 +4772,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
4773 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 4772 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4774 kvm_vcpu_block(vcpu); 4773 kvm_vcpu_block(vcpu);
4775 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 4774 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4776 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) 4775 if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
4777 { 4776 {
4778 switch(vcpu->arch.mp_state) { 4777 switch(vcpu->arch.mp_state) {
4779 case KVM_MP_STATE_HALTED: 4778 case KVM_MP_STATE_HALTED:
@@ -5255,7 +5254,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5255 vcpu->guest_fpu_loaded = 0; 5254 vcpu->guest_fpu_loaded = 0;
5256 fpu_save_init(&vcpu->arch.guest_fpu); 5255 fpu_save_init(&vcpu->arch.guest_fpu);
5257 ++vcpu->stat.fpu_reload; 5256 ++vcpu->stat.fpu_reload;
5258 set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); 5257 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
5259 trace_kvm_fpu(0); 5258 trace_kvm_fpu(0);
5260} 5259}
5261 5260
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 240e460777bc..c8a9d628898e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -624,5 +624,20 @@ static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
624 624
625#endif 625#endif
626 626
627static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
628{
629 set_bit(req, &vcpu->requests);
630}
631
632static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu)
633{
634 return test_and_set_bit(req, &vcpu->requests);
635}
636
637static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
638{
639 return test_and_clear_bit(req, &vcpu->requests);
640}
641
627#endif 642#endif
628 643
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 65417e3d8462..5bd2f34ba576 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -145,7 +145,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
145 raw_spin_lock(&kvm->requests_lock); 145 raw_spin_lock(&kvm->requests_lock);
146 me = smp_processor_id(); 146 me = smp_processor_id();
147 kvm_for_each_vcpu(i, vcpu, kvm) { 147 kvm_for_each_vcpu(i, vcpu, kvm) {
148 if (test_and_set_bit(req, &vcpu->requests)) 148 if (kvm_make_check_request(req, vcpu))
149 continue; 149 continue;
150 cpu = vcpu->cpu; 150 cpu = vcpu->cpu;
151 if (cpus != NULL && cpu != -1 && cpu != me) 151 if (cpus != NULL && cpu != -1 && cpu != me)
@@ -1212,7 +1212,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1212 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1212 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1213 1213
1214 if (kvm_arch_vcpu_runnable(vcpu)) { 1214 if (kvm_arch_vcpu_runnable(vcpu)) {
1215 set_bit(KVM_REQ_UNHALT, &vcpu->requests); 1215 kvm_make_request(KVM_REQ_UNHALT, vcpu);
1216 break; 1216 break;
1217 } 1217 }
1218 if (kvm_cpu_has_pending_timer(vcpu)) 1218 if (kvm_cpu_has_pending_timer(vcpu))