diff options
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 26 | ||||
-rw-r--r-- | arch/x86/kvm/i8254.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 18 | ||||
-rw-r--r-- | include/asm-ia64/kvm_host.h | 8 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 10 |
6 files changed, 40 insertions, 40 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index ca1cfb124d4f..f7589dba75ab 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -340,7 +340,7 @@ static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
340 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; | 340 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; |
341 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; | 341 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; |
342 | 342 | ||
343 | target_vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 343 | target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
344 | if (waitqueue_active(&target_vcpu->wq)) | 344 | if (waitqueue_active(&target_vcpu->wq)) |
345 | wake_up_interruptible(&target_vcpu->wq); | 345 | wake_up_interruptible(&target_vcpu->wq); |
346 | } else { | 346 | } else { |
@@ -386,7 +386,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
386 | 386 | ||
387 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 387 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
388 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | 388 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == |
389 | VCPU_MP_STATE_UNINITIALIZED || | 389 | KVM_MP_STATE_UNINITIALIZED || |
390 | vcpu == kvm->vcpus[i]) | 390 | vcpu == kvm->vcpus[i]) |
391 | continue; | 391 | continue; |
392 | 392 | ||
@@ -437,12 +437,12 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
437 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | 437 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); |
438 | 438 | ||
439 | if (irqchip_in_kernel(vcpu->kvm)) { | 439 | if (irqchip_in_kernel(vcpu->kvm)) { |
440 | vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; | 440 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
441 | kvm_vcpu_block(vcpu); | 441 | kvm_vcpu_block(vcpu); |
442 | hrtimer_cancel(p_ht); | 442 | hrtimer_cancel(p_ht); |
443 | vcpu->arch.ht_active = 0; | 443 | vcpu->arch.ht_active = 0; |
444 | 444 | ||
445 | if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) | 445 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
446 | return -EINTR; | 446 | return -EINTR; |
447 | return 1; | 447 | return 1; |
448 | } else { | 448 | } else { |
@@ -668,7 +668,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
668 | 668 | ||
669 | vcpu_load(vcpu); | 669 | vcpu_load(vcpu); |
670 | 670 | ||
671 | if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) { | 671 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
672 | kvm_vcpu_block(vcpu); | 672 | kvm_vcpu_block(vcpu); |
673 | vcpu_put(vcpu); | 673 | vcpu_put(vcpu); |
674 | return -EAGAIN; | 674 | return -EAGAIN; |
@@ -1127,12 +1127,12 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | |||
1127 | wait_queue_head_t *q; | 1127 | wait_queue_head_t *q; |
1128 | 1128 | ||
1129 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | 1129 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); |
1130 | if (vcpu->arch.mp_state != VCPU_MP_STATE_HALTED) | 1130 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) |
1131 | goto out; | 1131 | goto out; |
1132 | 1132 | ||
1133 | q = &vcpu->wq; | 1133 | q = &vcpu->wq; |
1134 | if (waitqueue_active(q)) { | 1134 | if (waitqueue_active(q)) { |
1135 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 1135 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
1136 | wake_up_interruptible(q); | 1136 | wake_up_interruptible(q); |
1137 | } | 1137 | } |
1138 | out: | 1138 | out: |
@@ -1159,7 +1159,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1159 | return PTR_ERR(vmm_vcpu); | 1159 | return PTR_ERR(vmm_vcpu); |
1160 | 1160 | ||
1161 | if (vcpu->vcpu_id == 0) { | 1161 | if (vcpu->vcpu_id == 0) { |
1162 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 1162 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
1163 | 1163 | ||
1164 | /*Set entry address for first run.*/ | 1164 | /*Set entry address for first run.*/ |
1165 | regs->cr_iip = PALE_RESET_ENTRY; | 1165 | regs->cr_iip = PALE_RESET_ENTRY; |
@@ -1172,7 +1172,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1172 | v->arch.last_itc = 0; | 1172 | v->arch.last_itc = 0; |
1173 | } | 1173 | } |
1174 | } else | 1174 | } else |
1175 | vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED; | 1175 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; |
1176 | 1176 | ||
1177 | r = -ENOMEM; | 1177 | r = -ENOMEM; |
1178 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); | 1178 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); |
@@ -1704,10 +1704,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | |||
1704 | 1704 | ||
1705 | if (!test_and_set_bit(vec, &vpd->irr[0])) { | 1705 | if (!test_and_set_bit(vec, &vpd->irr[0])) { |
1706 | vcpu->arch.irq_new_pending = 1; | 1706 | vcpu->arch.irq_new_pending = 1; |
1707 | if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) | 1707 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) |
1708 | kvm_vcpu_kick(vcpu); | 1708 | kvm_vcpu_kick(vcpu); |
1709 | else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) { | 1709 | else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { |
1710 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 1710 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
1711 | if (waitqueue_active(&vcpu->wq)) | 1711 | if (waitqueue_active(&vcpu->wq)) |
1712 | wake_up_interruptible(&vcpu->wq); | 1712 | wake_up_interruptible(&vcpu->wq); |
1713 | } | 1713 | } |
@@ -1790,5 +1790,5 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | |||
1790 | 1790 | ||
1791 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 1791 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
1792 | { | 1792 | { |
1793 | return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE; | 1793 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE; |
1794 | } | 1794 | } |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index ed1af80432b3..361e31611276 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -202,7 +202,7 @@ int __pit_timer_fn(struct kvm_kpit_state *ps) | |||
202 | smp_mb__after_atomic_inc(); | 202 | smp_mb__after_atomic_inc(); |
203 | /* FIXME: handle case where the guest is in guest mode */ | 203 | /* FIXME: handle case where the guest is in guest mode */ |
204 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { | 204 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { |
205 | vcpu0->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 205 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
206 | wake_up_interruptible(&vcpu0->wq); | 206 | wake_up_interruptible(&vcpu0->wq); |
207 | } | 207 | } |
208 | 208 | ||
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index debf58211bdd..2ccf994dfc16 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -338,10 +338,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
338 | } else | 338 | } else |
339 | apic_clear_vector(vector, apic->regs + APIC_TMR); | 339 | apic_clear_vector(vector, apic->regs + APIC_TMR); |
340 | 340 | ||
341 | if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) | 341 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) |
342 | kvm_vcpu_kick(vcpu); | 342 | kvm_vcpu_kick(vcpu); |
343 | else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) { | 343 | else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { |
344 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 344 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
345 | if (waitqueue_active(&vcpu->wq)) | 345 | if (waitqueue_active(&vcpu->wq)) |
346 | wake_up_interruptible(&vcpu->wq); | 346 | wake_up_interruptible(&vcpu->wq); |
347 | } | 347 | } |
@@ -362,11 +362,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
362 | 362 | ||
363 | case APIC_DM_INIT: | 363 | case APIC_DM_INIT: |
364 | if (level) { | 364 | if (level) { |
365 | if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) | 365 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) |
366 | printk(KERN_DEBUG | 366 | printk(KERN_DEBUG |
367 | "INIT on a runnable vcpu %d\n", | 367 | "INIT on a runnable vcpu %d\n", |
368 | vcpu->vcpu_id); | 368 | vcpu->vcpu_id); |
369 | vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED; | 369 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; |
370 | kvm_vcpu_kick(vcpu); | 370 | kvm_vcpu_kick(vcpu); |
371 | } else { | 371 | } else { |
372 | printk(KERN_DEBUG | 372 | printk(KERN_DEBUG |
@@ -379,9 +379,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
379 | case APIC_DM_STARTUP: | 379 | case APIC_DM_STARTUP: |
380 | printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", | 380 | printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", |
381 | vcpu->vcpu_id, vector); | 381 | vcpu->vcpu_id, vector); |
382 | if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) { | 382 | if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { |
383 | vcpu->arch.sipi_vector = vector; | 383 | vcpu->arch.sipi_vector = vector; |
384 | vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED; | 384 | vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; |
385 | if (waitqueue_active(&vcpu->wq)) | 385 | if (waitqueue_active(&vcpu->wq)) |
386 | wake_up_interruptible(&vcpu->wq); | 386 | wake_up_interruptible(&vcpu->wq); |
387 | } | 387 | } |
@@ -940,7 +940,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic) | |||
940 | 940 | ||
941 | atomic_inc(&apic->timer.pending); | 941 | atomic_inc(&apic->timer.pending); |
942 | if (waitqueue_active(q)) { | 942 | if (waitqueue_active(q)) { |
943 | apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 943 | apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
944 | wake_up_interruptible(q); | 944 | wake_up_interruptible(q); |
945 | } | 945 | } |
946 | if (apic_lvtt_period(apic)) { | 946 | if (apic_lvtt_period(apic)) { |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f070f0a9adee..b364d192896c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2433,11 +2433,11 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
2433 | ++vcpu->stat.halt_exits; | 2433 | ++vcpu->stat.halt_exits; |
2434 | KVMTRACE_0D(HLT, vcpu, handler); | 2434 | KVMTRACE_0D(HLT, vcpu, handler); |
2435 | if (irqchip_in_kernel(vcpu->kvm)) { | 2435 | if (irqchip_in_kernel(vcpu->kvm)) { |
2436 | vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; | 2436 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
2437 | up_read(&vcpu->kvm->slots_lock); | 2437 | up_read(&vcpu->kvm->slots_lock); |
2438 | kvm_vcpu_block(vcpu); | 2438 | kvm_vcpu_block(vcpu); |
2439 | down_read(&vcpu->kvm->slots_lock); | 2439 | down_read(&vcpu->kvm->slots_lock); |
2440 | if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) | 2440 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
2441 | return -EINTR; | 2441 | return -EINTR; |
2442 | return 1; | 2442 | return 1; |
2443 | } else { | 2443 | } else { |
@@ -2726,14 +2726,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2726 | { | 2726 | { |
2727 | int r; | 2727 | int r; |
2728 | 2728 | ||
2729 | if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) { | 2729 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { |
2730 | pr_debug("vcpu %d received sipi with vector # %x\n", | 2730 | pr_debug("vcpu %d received sipi with vector # %x\n", |
2731 | vcpu->vcpu_id, vcpu->arch.sipi_vector); | 2731 | vcpu->vcpu_id, vcpu->arch.sipi_vector); |
2732 | kvm_lapic_reset(vcpu); | 2732 | kvm_lapic_reset(vcpu); |
2733 | r = kvm_x86_ops->vcpu_reset(vcpu); | 2733 | r = kvm_x86_ops->vcpu_reset(vcpu); |
2734 | if (r) | 2734 | if (r) |
2735 | return r; | 2735 | return r; |
2736 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 2736 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
2737 | } | 2737 | } |
2738 | 2738 | ||
2739 | down_read(&vcpu->kvm->slots_lock); | 2739 | down_read(&vcpu->kvm->slots_lock); |
@@ -2891,7 +2891,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2891 | 2891 | ||
2892 | vcpu_load(vcpu); | 2892 | vcpu_load(vcpu); |
2893 | 2893 | ||
2894 | if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) { | 2894 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
2895 | kvm_vcpu_block(vcpu); | 2895 | kvm_vcpu_block(vcpu); |
2896 | vcpu_put(vcpu); | 2896 | vcpu_put(vcpu); |
2897 | return -EAGAIN; | 2897 | return -EAGAIN; |
@@ -3794,9 +3794,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
3794 | 3794 | ||
3795 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; | 3795 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
3796 | if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) | 3796 | if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) |
3797 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | 3797 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
3798 | else | 3798 | else |
3799 | vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED; | 3799 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; |
3800 | 3800 | ||
3801 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 3801 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
3802 | if (!page) { | 3802 | if (!page) { |
@@ -3936,8 +3936,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
3936 | 3936 | ||
3937 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 3937 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
3938 | { | 3938 | { |
3939 | return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE | 3939 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE |
3940 | || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED; | 3940 | || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED; |
3941 | } | 3941 | } |
3942 | 3942 | ||
3943 | static void vcpu_kick_intr(void *info) | 3943 | static void vcpu_kick_intr(void *info) |
diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h index d6d6e15c1924..c082c208c1f3 100644 --- a/include/asm-ia64/kvm_host.h +++ b/include/asm-ia64/kvm_host.h | |||
@@ -318,10 +318,10 @@ struct kvm_vcpu_arch { | |||
318 | int vmm_tr_slot; | 318 | int vmm_tr_slot; |
319 | int vm_tr_slot; | 319 | int vm_tr_slot; |
320 | 320 | ||
321 | #define VCPU_MP_STATE_RUNNABLE 0 | 321 | #define KVM_MP_STATE_RUNNABLE 0 |
322 | #define VCPU_MP_STATE_UNINITIALIZED 1 | 322 | #define KVM_MP_STATE_UNINITIALIZED 1 |
323 | #define VCPU_MP_STATE_INIT_RECEIVED 2 | 323 | #define KVM_MP_STATE_INIT_RECEIVED 2 |
324 | #define VCPU_MP_STATE_HALTED 3 | 324 | #define KVM_MP_STATE_HALTED 3 |
325 | int mp_state; | 325 | int mp_state; |
326 | 326 | ||
327 | #define MAX_PTC_G_NUM 3 | 327 | #define MAX_PTC_G_NUM 3 |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 15169cb71c83..f35a6ad43c0a 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -227,11 +227,11 @@ struct kvm_vcpu_arch { | |||
227 | u64 shadow_efer; | 227 | u64 shadow_efer; |
228 | u64 apic_base; | 228 | u64 apic_base; |
229 | struct kvm_lapic *apic; /* kernel irqchip context */ | 229 | struct kvm_lapic *apic; /* kernel irqchip context */ |
230 | #define VCPU_MP_STATE_RUNNABLE 0 | 230 | #define KVM_MP_STATE_RUNNABLE 0 |
231 | #define VCPU_MP_STATE_UNINITIALIZED 1 | 231 | #define KVM_MP_STATE_UNINITIALIZED 1 |
232 | #define VCPU_MP_STATE_INIT_RECEIVED 2 | 232 | #define KVM_MP_STATE_INIT_RECEIVED 2 |
233 | #define VCPU_MP_STATE_SIPI_RECEIVED 3 | 233 | #define KVM_MP_STATE_SIPI_RECEIVED 3 |
234 | #define VCPU_MP_STATE_HALTED 4 | 234 | #define KVM_MP_STATE_HALTED 4 |
235 | int mp_state; | 235 | int mp_state; |
236 | int sipi_vector; | 236 | int sipi_vector; |
237 | u64 ia32_misc_enable_msr; | 237 | u64 ia32_misc_enable_msr; |