aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/lapic.c16
-rw-r--r--arch/x86/kvm/x86.c18
3 files changed, 18 insertions, 18 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index ed1af80432b3..361e31611276 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -202,7 +202,7 @@ int __pit_timer_fn(struct kvm_kpit_state *ps)
202 smp_mb__after_atomic_inc(); 202 smp_mb__after_atomic_inc();
203 /* FIXME: handle case where the guest is in guest mode */ 203 /* FIXME: handle case where the guest is in guest mode */
204 if (vcpu0 && waitqueue_active(&vcpu0->wq)) { 204 if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
205 vcpu0->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 205 vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
206 wake_up_interruptible(&vcpu0->wq); 206 wake_up_interruptible(&vcpu0->wq);
207 } 207 }
208 208
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index debf58211bdd..2ccf994dfc16 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -338,10 +338,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
338 } else 338 } else
339 apic_clear_vector(vector, apic->regs + APIC_TMR); 339 apic_clear_vector(vector, apic->regs + APIC_TMR);
340 340
341 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) 341 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
342 kvm_vcpu_kick(vcpu); 342 kvm_vcpu_kick(vcpu);
343 else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) { 343 else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
344 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 344 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
345 if (waitqueue_active(&vcpu->wq)) 345 if (waitqueue_active(&vcpu->wq))
346 wake_up_interruptible(&vcpu->wq); 346 wake_up_interruptible(&vcpu->wq);
347 } 347 }
@@ -362,11 +362,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
362 362
363 case APIC_DM_INIT: 363 case APIC_DM_INIT:
364 if (level) { 364 if (level) {
365 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) 365 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
366 printk(KERN_DEBUG 366 printk(KERN_DEBUG
367 "INIT on a runnable vcpu %d\n", 367 "INIT on a runnable vcpu %d\n",
368 vcpu->vcpu_id); 368 vcpu->vcpu_id);
369 vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED; 369 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
370 kvm_vcpu_kick(vcpu); 370 kvm_vcpu_kick(vcpu);
371 } else { 371 } else {
372 printk(KERN_DEBUG 372 printk(KERN_DEBUG
@@ -379,9 +379,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
379 case APIC_DM_STARTUP: 379 case APIC_DM_STARTUP:
380 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", 380 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
381 vcpu->vcpu_id, vector); 381 vcpu->vcpu_id, vector);
382 if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) { 382 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
383 vcpu->arch.sipi_vector = vector; 383 vcpu->arch.sipi_vector = vector;
384 vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED; 384 vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
385 if (waitqueue_active(&vcpu->wq)) 385 if (waitqueue_active(&vcpu->wq))
386 wake_up_interruptible(&vcpu->wq); 386 wake_up_interruptible(&vcpu->wq);
387 } 387 }
@@ -940,7 +940,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
940 940
941 atomic_inc(&apic->timer.pending); 941 atomic_inc(&apic->timer.pending);
942 if (waitqueue_active(q)) { 942 if (waitqueue_active(q)) {
943 apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 943 apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
944 wake_up_interruptible(q); 944 wake_up_interruptible(q);
945 } 945 }
946 if (apic_lvtt_period(apic)) { 946 if (apic_lvtt_period(apic)) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f070f0a9adee..b364d192896c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2433,11 +2433,11 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2433 ++vcpu->stat.halt_exits; 2433 ++vcpu->stat.halt_exits;
2434 KVMTRACE_0D(HLT, vcpu, handler); 2434 KVMTRACE_0D(HLT, vcpu, handler);
2435 if (irqchip_in_kernel(vcpu->kvm)) { 2435 if (irqchip_in_kernel(vcpu->kvm)) {
2436 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; 2436 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
2437 up_read(&vcpu->kvm->slots_lock); 2437 up_read(&vcpu->kvm->slots_lock);
2438 kvm_vcpu_block(vcpu); 2438 kvm_vcpu_block(vcpu);
2439 down_read(&vcpu->kvm->slots_lock); 2439 down_read(&vcpu->kvm->slots_lock);
2440 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) 2440 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
2441 return -EINTR; 2441 return -EINTR;
2442 return 1; 2442 return 1;
2443 } else { 2443 } else {
@@ -2726,14 +2726,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2726{ 2726{
2727 int r; 2727 int r;
2728 2728
2729 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) { 2729 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
2730 pr_debug("vcpu %d received sipi with vector # %x\n", 2730 pr_debug("vcpu %d received sipi with vector # %x\n",
2731 vcpu->vcpu_id, vcpu->arch.sipi_vector); 2731 vcpu->vcpu_id, vcpu->arch.sipi_vector);
2732 kvm_lapic_reset(vcpu); 2732 kvm_lapic_reset(vcpu);
2733 r = kvm_x86_ops->vcpu_reset(vcpu); 2733 r = kvm_x86_ops->vcpu_reset(vcpu);
2734 if (r) 2734 if (r)
2735 return r; 2735 return r;
2736 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 2736 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2737 } 2737 }
2738 2738
2739 down_read(&vcpu->kvm->slots_lock); 2739 down_read(&vcpu->kvm->slots_lock);
@@ -2891,7 +2891,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2891 2891
2892 vcpu_load(vcpu); 2892 vcpu_load(vcpu);
2893 2893
2894 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) { 2894 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
2895 kvm_vcpu_block(vcpu); 2895 kvm_vcpu_block(vcpu);
2896 vcpu_put(vcpu); 2896 vcpu_put(vcpu);
2897 return -EAGAIN; 2897 return -EAGAIN;
@@ -3794,9 +3794,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3794 3794
3795 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 3795 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3796 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) 3796 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
3797 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 3797 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3798 else 3798 else
3799 vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED; 3799 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
3800 3800
3801 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 3801 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3802 if (!page) { 3802 if (!page) {
@@ -3936,8 +3936,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
3936 3936
3937int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 3937int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3938{ 3938{
3939 return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE 3939 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
3940 || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED; 3940 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
3941} 3941}
3942 3942
3943static void vcpu_kick_intr(void *info) 3943static void vcpu_kick_intr(void *info)