aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-12-29 11:07:30 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:50 -0500
commit4d4ec0874583b127caac1d0f84033c8971b2fd2a (patch)
tree2ed5d1edff6a5253561fff0593e89d1c49518b1b /arch/x86
parenta1f83a74feaa9718a5c61587256ea6cc1b993d16 (diff)
KVM: Replace read accesses of vcpu->arch.cr0 by an accessor
Since we'd like to allow the guest to own a few bits of cr0 at times, we need to know when we access those bits. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/emulate.c6
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h10
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/svm.c9
-rw-r--r--arch/x86/kvm/vmx.c16
-rw-r--r--arch/x86/kvm/x86.c20
7 files changed, 38 insertions, 27 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 7e8faea4651e..0f89e320bc96 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1515,7 +1515,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
1515 1515
1516 /* syscall is not available in real mode */ 1516 /* syscall is not available in real mode */
1517 if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL 1517 if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
1518 || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) 1518 || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE))
1519 return -1; 1519 return -1;
1520 1520
1521 setup_syscalls_segments(ctxt, &cs, &ss); 1521 setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1569,7 +1569,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
1569 1569
1570 /* inject #GP if in real mode or paging is disabled */ 1570 /* inject #GP if in real mode or paging is disabled */
1571 if (ctxt->mode == X86EMUL_MODE_REAL || 1571 if (ctxt->mode == X86EMUL_MODE_REAL ||
1572 !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) { 1572 !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) {
1573 kvm_inject_gp(ctxt->vcpu, 0); 1573 kvm_inject_gp(ctxt->vcpu, 0);
1574 return -1; 1574 return -1;
1575 } 1575 }
@@ -1635,7 +1635,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1635 1635
1636 /* inject #GP if in real mode or paging is disabled */ 1636 /* inject #GP if in real mode or paging is disabled */
1637 if (ctxt->mode == X86EMUL_MODE_REAL 1637 if (ctxt->mode == X86EMUL_MODE_REAL
1638 || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) { 1638 || !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) {
1639 kvm_inject_gp(ctxt->vcpu, 0); 1639 kvm_inject_gp(ctxt->vcpu, 0);
1640 return -1; 1640 return -1;
1641 } 1641 }
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 35acc36e1782..f46859751b30 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -38,6 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
38 return vcpu->arch.pdptrs[index]; 38 return vcpu->arch.pdptrs[index];
39} 39}
40 40
41static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
42{
43 return vcpu->arch.cr0 & mask;
44}
45
46static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
47{
48 return kvm_read_cr0_bits(vcpu, ~0UL);
49}
50
41static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) 51static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
42{ 52{
43 if (mask & vcpu->arch.cr4_guest_owned_bits) 53 if (mask & vcpu->arch.cr4_guest_owned_bits)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4f5508c35100..276bf7497c36 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
226 226
227static int is_write_protection(struct kvm_vcpu *vcpu) 227static int is_write_protection(struct kvm_vcpu *vcpu)
228{ 228{
229 return vcpu->arch.cr0 & X86_CR0_WP; 229 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
230} 230}
231 231
232static int is_cpuid_PSE36(void) 232static int is_cpuid_PSE36(void)
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index ff583423968d..599159f728b9 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -79,7 +79,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu)
79 79
80static inline int is_paging(struct kvm_vcpu *vcpu) 80static inline int is_paging(struct kvm_vcpu *vcpu)
81{ 81{
82 return vcpu->arch.cr0 & X86_CR0_PG; 82 return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
83} 83}
84 84
85static inline int is_present_gpte(unsigned long pte) 85static inline int is_present_gpte(unsigned long pte)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cf64fc026e3e..d3246ce70ae8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -980,7 +980,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
980 if (npt_enabled) 980 if (npt_enabled)
981 goto set; 981 goto set;
982 982
983 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { 983 if (kvm_read_cr0_bits(vcpu, X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
984 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 984 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
985 vcpu->fpu_active = 1; 985 vcpu->fpu_active = 1;
986 } 986 }
@@ -1244,7 +1244,7 @@ static int ud_interception(struct vcpu_svm *svm)
1244static int nm_interception(struct vcpu_svm *svm) 1244static int nm_interception(struct vcpu_svm *svm)
1245{ 1245{
1246 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 1246 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
1247 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) 1247 if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS))
1248 svm->vmcb->save.cr0 &= ~X86_CR0_TS; 1248 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
1249 svm->vcpu.fpu_active = 1; 1249 svm->vcpu.fpu_active = 1;
1250 1250
@@ -1743,7 +1743,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1743 hsave->save.gdtr = vmcb->save.gdtr; 1743 hsave->save.gdtr = vmcb->save.gdtr;
1744 hsave->save.idtr = vmcb->save.idtr; 1744 hsave->save.idtr = vmcb->save.idtr;
1745 hsave->save.efer = svm->vcpu.arch.shadow_efer; 1745 hsave->save.efer = svm->vcpu.arch.shadow_efer;
1746 hsave->save.cr0 = svm->vcpu.arch.cr0; 1746 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
1747 hsave->save.cr4 = svm->vcpu.arch.cr4; 1747 hsave->save.cr4 = svm->vcpu.arch.cr4;
1748 hsave->save.rflags = vmcb->save.rflags; 1748 hsave->save.rflags = vmcb->save.rflags;
1749 hsave->save.rip = svm->next_rip; 1749 hsave->save.rip = svm->next_rip;
@@ -2387,7 +2387,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
2387 2387
2388 if (npt_enabled) { 2388 if (npt_enabled) {
2389 int mmu_reload = 0; 2389 int mmu_reload = 0;
2390 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { 2390 if ((kvm_read_cr0_bits(vcpu, X86_CR0_PG) ^ svm->vmcb->save.cr0)
2391 & X86_CR0_PG) {
2391 svm_set_cr0(vcpu, svm->vmcb->save.cr0); 2392 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
2392 mmu_reload = 1; 2393 mmu_reload = 1;
2393 } 2394 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7c7b2eeea5d0..4c7177c489ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -799,7 +799,7 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
799 return; 799 return;
800 vcpu->fpu_active = 1; 800 vcpu->fpu_active = 1;
801 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); 801 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
802 if (vcpu->arch.cr0 & X86_CR0_TS) 802 if (kvm_read_cr0_bits(vcpu, X86_CR0_TS))
803 vmcs_set_bits(GUEST_CR0, X86_CR0_TS); 803 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
804 update_exception_bitmap(vcpu); 804 update_exception_bitmap(vcpu);
805} 805}
@@ -1785,7 +1785,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1785 1785
1786 vmx_flush_tlb(vcpu); 1786 vmx_flush_tlb(vcpu);
1787 vmcs_writel(GUEST_CR3, guest_cr3); 1787 vmcs_writel(GUEST_CR3, guest_cr3);
1788 if (vcpu->arch.cr0 & X86_CR0_PE) 1788 if (kvm_read_cr0_bits(vcpu, X86_CR0_PE))
1789 vmx_fpu_deactivate(vcpu); 1789 vmx_fpu_deactivate(vcpu);
1790} 1790}
1791 1791
@@ -1840,7 +1840,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
1840 1840
1841static int vmx_get_cpl(struct kvm_vcpu *vcpu) 1841static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1842{ 1842{
1843 if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */ 1843 if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) /* if real mode */
1844 return 0; 1844 return 0;
1845 1845
1846 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ 1846 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
@@ -2095,7 +2095,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
2095static bool guest_state_valid(struct kvm_vcpu *vcpu) 2095static bool guest_state_valid(struct kvm_vcpu *vcpu)
2096{ 2096{
2097 /* real mode guest state checks */ 2097 /* real mode guest state checks */
2098 if (!(vcpu->arch.cr0 & X86_CR0_PE)) { 2098 if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
2099 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 2099 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
2100 return false; 2100 return false;
2101 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 2101 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
@@ -2580,7 +2580,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2580 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2580 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2581 2581
2582 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 2582 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
2583 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ 2583 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
2584 vmx_set_cr4(&vmx->vcpu, 0); 2584 vmx_set_cr4(&vmx->vcpu, 0);
2585 vmx_set_efer(&vmx->vcpu, 0); 2585 vmx_set_efer(&vmx->vcpu, 0);
2586 vmx_fpu_activate(&vmx->vcpu); 2586 vmx_fpu_activate(&vmx->vcpu);
@@ -2996,8 +2996,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
2996 case 2: /* clts */ 2996 case 2: /* clts */
2997 vmx_fpu_deactivate(vcpu); 2997 vmx_fpu_deactivate(vcpu);
2998 vcpu->arch.cr0 &= ~X86_CR0_TS; 2998 vcpu->arch.cr0 &= ~X86_CR0_TS;
2999 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); 2999 vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu));
3000 trace_kvm_cr_write(0, vcpu->arch.cr0); 3000 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
3001 vmx_fpu_activate(vcpu); 3001 vmx_fpu_activate(vcpu);
3002 skip_emulated_instruction(vcpu); 3002 skip_emulated_instruction(vcpu);
3003 return 1; 3003 return 1;
@@ -3018,7 +3018,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
3018 break; 3018 break;
3019 case 3: /* lmsw */ 3019 case 3: /* lmsw */
3020 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 3020 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
3021 trace_kvm_cr_write(0, (vcpu->arch.cr0 & ~0xful) | val); 3021 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
3022 kvm_lmsw(vcpu, val); 3022 kvm_lmsw(vcpu, val);
3023 3023
3024 skip_emulated_instruction(vcpu); 3024 skip_emulated_instruction(vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c990424d86d0..748b15d8e46d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -430,7 +430,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
430{ 430{
431 if (cr0 & CR0_RESERVED_BITS) { 431 if (cr0 & CR0_RESERVED_BITS) {
432 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", 432 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
433 cr0, vcpu->arch.cr0); 433 cr0, kvm_read_cr0(vcpu));
434 kvm_inject_gp(vcpu, 0); 434 kvm_inject_gp(vcpu, 0);
435 return; 435 return;
436 } 436 }
@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
488 488
489void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 489void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
490{ 490{
491 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); 491 kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
492} 492}
493EXPORT_SYMBOL_GPL(kvm_lmsw); 493EXPORT_SYMBOL_GPL(kvm_lmsw);
494 494
@@ -3095,7 +3095,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3095 3095
3096int emulate_clts(struct kvm_vcpu *vcpu) 3096int emulate_clts(struct kvm_vcpu *vcpu)
3097{ 3097{
3098 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); 3098 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
3099 return X86EMUL_CONTINUE; 3099 return X86EMUL_CONTINUE;
3100} 3100}
3101 3101
@@ -3714,7 +3714,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3714 3714
3715 switch (cr) { 3715 switch (cr) {
3716 case 0: 3716 case 0:
3717 value = vcpu->arch.cr0; 3717 value = kvm_read_cr0(vcpu);
3718 break; 3718 break;
3719 case 2: 3719 case 2:
3720 value = vcpu->arch.cr2; 3720 value = vcpu->arch.cr2;
@@ -3741,7 +3741,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3741{ 3741{
3742 switch (cr) { 3742 switch (cr) {
3743 case 0: 3743 case 0:
3744 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); 3744 kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
3745 *rflags = kvm_get_rflags(vcpu); 3745 *rflags = kvm_get_rflags(vcpu);
3746 break; 3746 break;
3747 case 2: 3747 case 2:
@@ -4335,7 +4335,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4335 sregs->gdt.limit = dt.limit; 4335 sregs->gdt.limit = dt.limit;
4336 sregs->gdt.base = dt.base; 4336 sregs->gdt.base = dt.base;
4337 4337
4338 sregs->cr0 = vcpu->arch.cr0; 4338 sregs->cr0 = kvm_read_cr0(vcpu);
4339 sregs->cr2 = vcpu->arch.cr2; 4339 sregs->cr2 = vcpu->arch.cr2;
4340 sregs->cr3 = vcpu->arch.cr3; 4340 sregs->cr3 = vcpu->arch.cr3;
4341 sregs->cr4 = kvm_read_cr4(vcpu); 4341 sregs->cr4 = kvm_read_cr4(vcpu);
@@ -4521,7 +4521,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4521{ 4521{
4522 struct kvm_segment kvm_seg; 4522 struct kvm_segment kvm_seg;
4523 4523
4524 if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE)) 4524 if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
4525 return kvm_load_realmode_segment(vcpu, selector, seg); 4525 return kvm_load_realmode_segment(vcpu, selector, seg);
4526 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) 4526 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
4527 return 1; 4527 return 1;
@@ -4799,7 +4799,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4799 &nseg_desc); 4799 &nseg_desc);
4800 } 4800 }
4801 4801
4802 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS); 4802 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
4803 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg); 4803 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4804 tr_seg.type = 11; 4804 tr_seg.type = 11;
4805 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); 4805 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
@@ -4834,7 +4834,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4834 kvm_x86_ops->set_efer(vcpu, sregs->efer); 4834 kvm_x86_ops->set_efer(vcpu, sregs->efer);
4835 kvm_set_apic_base(vcpu, sregs->apic_base); 4835 kvm_set_apic_base(vcpu, sregs->apic_base);
4836 4836
4837 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; 4837 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
4838 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 4838 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
4839 vcpu->arch.cr0 = sregs->cr0; 4839 vcpu->arch.cr0 = sregs->cr0;
4840 4840
@@ -4873,7 +4873,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4873 /* Older userspace won't unhalt the vcpu on reset. */ 4873 /* Older userspace won't unhalt the vcpu on reset. */
4874 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 4874 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
4875 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 4875 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4876 !(vcpu->arch.cr0 & X86_CR0_PE)) 4876 !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
4877 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4877 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4878 4878
4879 vcpu_put(vcpu); 4879 vcpu_put(vcpu);