aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-04-13 10:54:35 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:04:13 -0400
commita45352908b88d383bc40e1e4d1a6cc5bbcefc895 (patch)
treebe0f519e05f8df4409b595928338b2939ed64f6a /arch
parent3d80840d96127401ba6aeadd813c3a15b84e70fe (diff)
KVM: Rename VCPU_MP_STATE_* to KVM_MP_STATE_*
We wish to export it to userspace, so move it into the kvm namespace. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c26
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/lapic.c16
-rw-r--r--arch/x86/kvm/x86.c18
4 files changed, 31 insertions, 31 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index ca1cfb124d4f..f7589dba75ab 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -340,7 +340,7 @@ static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
340 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; 340 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
341 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; 341 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
342 342
343 target_vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 343 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
344 if (waitqueue_active(&target_vcpu->wq)) 344 if (waitqueue_active(&target_vcpu->wq))
345 wake_up_interruptible(&target_vcpu->wq); 345 wake_up_interruptible(&target_vcpu->wq);
346 } else { 346 } else {
@@ -386,7 +386,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
386 386
387 for (i = 0; i < KVM_MAX_VCPUS; i++) { 387 for (i = 0; i < KVM_MAX_VCPUS; i++) {
388 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == 388 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
389 VCPU_MP_STATE_UNINITIALIZED || 389 KVM_MP_STATE_UNINITIALIZED ||
390 vcpu == kvm->vcpus[i]) 390 vcpu == kvm->vcpus[i])
391 continue; 391 continue;
392 392
@@ -437,12 +437,12 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
437 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); 437 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
438 438
439 if (irqchip_in_kernel(vcpu->kvm)) { 439 if (irqchip_in_kernel(vcpu->kvm)) {
440 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; 440 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
441 kvm_vcpu_block(vcpu); 441 kvm_vcpu_block(vcpu);
442 hrtimer_cancel(p_ht); 442 hrtimer_cancel(p_ht);
443 vcpu->arch.ht_active = 0; 443 vcpu->arch.ht_active = 0;
444 444
445 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) 445 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
446 return -EINTR; 446 return -EINTR;
447 return 1; 447 return 1;
448 } else { 448 } else {
@@ -668,7 +668,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
668 668
669 vcpu_load(vcpu); 669 vcpu_load(vcpu);
670 670
671 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) { 671 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
672 kvm_vcpu_block(vcpu); 672 kvm_vcpu_block(vcpu);
673 vcpu_put(vcpu); 673 vcpu_put(vcpu);
674 return -EAGAIN; 674 return -EAGAIN;
@@ -1127,12 +1127,12 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1127 wait_queue_head_t *q; 1127 wait_queue_head_t *q;
1128 1128
1129 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); 1129 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
1130 if (vcpu->arch.mp_state != VCPU_MP_STATE_HALTED) 1130 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
1131 goto out; 1131 goto out;
1132 1132
1133 q = &vcpu->wq; 1133 q = &vcpu->wq;
1134 if (waitqueue_active(q)) { 1134 if (waitqueue_active(q)) {
1135 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 1135 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1136 wake_up_interruptible(q); 1136 wake_up_interruptible(q);
1137 } 1137 }
1138out: 1138out:
@@ -1159,7 +1159,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1159 return PTR_ERR(vmm_vcpu); 1159 return PTR_ERR(vmm_vcpu);
1160 1160
1161 if (vcpu->vcpu_id == 0) { 1161 if (vcpu->vcpu_id == 0) {
1162 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 1162 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1163 1163
1164 /*Set entry address for first run.*/ 1164 /*Set entry address for first run.*/
1165 regs->cr_iip = PALE_RESET_ENTRY; 1165 regs->cr_iip = PALE_RESET_ENTRY;
@@ -1172,7 +1172,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1172 v->arch.last_itc = 0; 1172 v->arch.last_itc = 0;
1173 } 1173 }
1174 } else 1174 } else
1175 vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED; 1175 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
1176 1176
1177 r = -ENOMEM; 1177 r = -ENOMEM;
1178 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); 1178 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
@@ -1704,10 +1704,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
1704 1704
1705 if (!test_and_set_bit(vec, &vpd->irr[0])) { 1705 if (!test_and_set_bit(vec, &vpd->irr[0])) {
1706 vcpu->arch.irq_new_pending = 1; 1706 vcpu->arch.irq_new_pending = 1;
1707 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) 1707 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
1708 kvm_vcpu_kick(vcpu); 1708 kvm_vcpu_kick(vcpu);
1709 else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) { 1709 else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
1710 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 1710 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1711 if (waitqueue_active(&vcpu->wq)) 1711 if (waitqueue_active(&vcpu->wq))
1712 wake_up_interruptible(&vcpu->wq); 1712 wake_up_interruptible(&vcpu->wq);
1713 } 1713 }
@@ -1790,5 +1790,5 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1790 1790
1791int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 1791int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1792{ 1792{
1793 return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE; 1793 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
1794} 1794}
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index ed1af80432b3..361e31611276 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -202,7 +202,7 @@ int __pit_timer_fn(struct kvm_kpit_state *ps)
202 smp_mb__after_atomic_inc(); 202 smp_mb__after_atomic_inc();
203 /* FIXME: handle case where the guest is in guest mode */ 203 /* FIXME: handle case where the guest is in guest mode */
204 if (vcpu0 && waitqueue_active(&vcpu0->wq)) { 204 if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
205 vcpu0->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 205 vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
206 wake_up_interruptible(&vcpu0->wq); 206 wake_up_interruptible(&vcpu0->wq);
207 } 207 }
208 208
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index debf58211bdd..2ccf994dfc16 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -338,10 +338,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
338 } else 338 } else
339 apic_clear_vector(vector, apic->regs + APIC_TMR); 339 apic_clear_vector(vector, apic->regs + APIC_TMR);
340 340
341 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) 341 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
342 kvm_vcpu_kick(vcpu); 342 kvm_vcpu_kick(vcpu);
343 else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) { 343 else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
344 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 344 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
345 if (waitqueue_active(&vcpu->wq)) 345 if (waitqueue_active(&vcpu->wq))
346 wake_up_interruptible(&vcpu->wq); 346 wake_up_interruptible(&vcpu->wq);
347 } 347 }
@@ -362,11 +362,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
362 362
363 case APIC_DM_INIT: 363 case APIC_DM_INIT:
364 if (level) { 364 if (level) {
365 if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) 365 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
366 printk(KERN_DEBUG 366 printk(KERN_DEBUG
367 "INIT on a runnable vcpu %d\n", 367 "INIT on a runnable vcpu %d\n",
368 vcpu->vcpu_id); 368 vcpu->vcpu_id);
369 vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED; 369 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
370 kvm_vcpu_kick(vcpu); 370 kvm_vcpu_kick(vcpu);
371 } else { 371 } else {
372 printk(KERN_DEBUG 372 printk(KERN_DEBUG
@@ -379,9 +379,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
379 case APIC_DM_STARTUP: 379 case APIC_DM_STARTUP:
380 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", 380 printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
381 vcpu->vcpu_id, vector); 381 vcpu->vcpu_id, vector);
382 if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) { 382 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
383 vcpu->arch.sipi_vector = vector; 383 vcpu->arch.sipi_vector = vector;
384 vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED; 384 vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
385 if (waitqueue_active(&vcpu->wq)) 385 if (waitqueue_active(&vcpu->wq))
386 wake_up_interruptible(&vcpu->wq); 386 wake_up_interruptible(&vcpu->wq);
387 } 387 }
@@ -940,7 +940,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
940 940
941 atomic_inc(&apic->timer.pending); 941 atomic_inc(&apic->timer.pending);
942 if (waitqueue_active(q)) { 942 if (waitqueue_active(q)) {
943 apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 943 apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
944 wake_up_interruptible(q); 944 wake_up_interruptible(q);
945 } 945 }
946 if (apic_lvtt_period(apic)) { 946 if (apic_lvtt_period(apic)) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f070f0a9adee..b364d192896c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2433,11 +2433,11 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2433 ++vcpu->stat.halt_exits; 2433 ++vcpu->stat.halt_exits;
2434 KVMTRACE_0D(HLT, vcpu, handler); 2434 KVMTRACE_0D(HLT, vcpu, handler);
2435 if (irqchip_in_kernel(vcpu->kvm)) { 2435 if (irqchip_in_kernel(vcpu->kvm)) {
2436 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; 2436 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
2437 up_read(&vcpu->kvm->slots_lock); 2437 up_read(&vcpu->kvm->slots_lock);
2438 kvm_vcpu_block(vcpu); 2438 kvm_vcpu_block(vcpu);
2439 down_read(&vcpu->kvm->slots_lock); 2439 down_read(&vcpu->kvm->slots_lock);
2440 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) 2440 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
2441 return -EINTR; 2441 return -EINTR;
2442 return 1; 2442 return 1;
2443 } else { 2443 } else {
@@ -2726,14 +2726,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2726{ 2726{
2727 int r; 2727 int r;
2728 2728
2729 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) { 2729 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
2730 pr_debug("vcpu %d received sipi with vector # %x\n", 2730 pr_debug("vcpu %d received sipi with vector # %x\n",
2731 vcpu->vcpu_id, vcpu->arch.sipi_vector); 2731 vcpu->vcpu_id, vcpu->arch.sipi_vector);
2732 kvm_lapic_reset(vcpu); 2732 kvm_lapic_reset(vcpu);
2733 r = kvm_x86_ops->vcpu_reset(vcpu); 2733 r = kvm_x86_ops->vcpu_reset(vcpu);
2734 if (r) 2734 if (r)
2735 return r; 2735 return r;
2736 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 2736 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2737 } 2737 }
2738 2738
2739 down_read(&vcpu->kvm->slots_lock); 2739 down_read(&vcpu->kvm->slots_lock);
@@ -2891,7 +2891,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2891 2891
2892 vcpu_load(vcpu); 2892 vcpu_load(vcpu);
2893 2893
2894 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) { 2894 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
2895 kvm_vcpu_block(vcpu); 2895 kvm_vcpu_block(vcpu);
2896 vcpu_put(vcpu); 2896 vcpu_put(vcpu);
2897 return -EAGAIN; 2897 return -EAGAIN;
@@ -3794,9 +3794,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3794 3794
3795 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 3795 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3796 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) 3796 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
3797 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 3797 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3798 else 3798 else
3799 vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED; 3799 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
3800 3800
3801 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 3801 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3802 if (!page) { 3802 if (!page) {
@@ -3936,8 +3936,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
3936 3936
3937int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 3937int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3938{ 3938{
3939 return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE 3939 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
3940 || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED; 3940 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
3941} 3941}
3942 3942
3943static void vcpu_kick_intr(void *info) 3943static void vcpu_kick_intr(void *info)