aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-11-18 06:12:52 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:37 -0500
commit104f226bfd0a607ca0e804ae4907555374f72cd9 (patch)
treee3a792eef60ee304fc797397db52519a709a9f41 /arch/x86/kvm/vmx.c
parent30b31ab6823988263c72a215fb875edec6161250 (diff)
KVM: VMX: Fold __vmx_vcpu_run() into vmx_vcpu_run()
cea15c2 ("KVM: Move KVM context switch into own function") split vmx_vcpu_run() to prevent multiple copies of the context switch from being generated (causing problems due to a label). This patch folds them back together again and adds the __noclone attribute to prevent the label from being duplicated. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c63
1 files changed, 25 insertions, 38 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 92612fb162db..6bf807adbeba 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3904,17 +3904,33 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
3904#define Q "l" 3904#define Q "l"
3905#endif 3905#endif
3906 3906
3907/* 3907static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
3908 * We put this into a separate noinline function to prevent the compiler
3909 * from duplicating the code. This is needed because this code
3910 * uses non local labels that cannot be duplicated.
3911 * Do not put any flow control into this function.
3912 * Better would be to put this whole monstrosity into a .S file.
3913 */
3914static void noinline do_vmx_vcpu_run(struct kvm_vcpu *vcpu)
3915{ 3908{
3916 struct vcpu_vmx *vmx = to_vmx(vcpu); 3909 struct vcpu_vmx *vmx = to_vmx(vcpu);
3917 asm volatile( 3910
3911 /* Record the guest's net vcpu time for enforced NMI injections. */
3912 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
3913 vmx->entry_time = ktime_get();
3914
3915 /* Don't enter VMX if guest state is invalid, let the exit handler
3916 start emulation until we arrive back to a valid state */
3917 if (vmx->emulation_required && emulate_invalid_guest_state)
3918 return;
3919
3920 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
3921 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
3922 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
3923 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
3924
3925 /* When single-stepping over STI and MOV SS, we must clear the
3926 * corresponding interruptibility bits in the guest state. Otherwise
3927 * vmentry fails as it then expects bit 14 (BS) in pending debug
3928 * exceptions being set, but that's not correct for the guest debugging
3929 * case. */
3930 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3931 vmx_set_interrupt_shadow(vcpu, 0);
3932
3933 asm(
3918 /* Store host registers */ 3934 /* Store host registers */
3919 "push %%"R"dx; push %%"R"bp;" 3935 "push %%"R"dx; push %%"R"bp;"
3920 "push %%"R"cx \n\t" 3936 "push %%"R"cx \n\t"
@@ -4009,35 +4025,6 @@ static void noinline do_vmx_vcpu_run(struct kvm_vcpu *vcpu)
4009 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 4025 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
4010#endif 4026#endif
4011 ); 4027 );
4012}
4013
4014static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
4015{
4016 struct vcpu_vmx *vmx = to_vmx(vcpu);
4017
4018 /* Record the guest's net vcpu time for enforced NMI injections. */
4019 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
4020 vmx->entry_time = ktime_get();
4021
4022 /* Don't enter VMX if guest state is invalid, let the exit handler
4023 start emulation until we arrive back to a valid state */
4024 if (vmx->emulation_required && emulate_invalid_guest_state)
4025 return;
4026
4027 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
4028 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
4029 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
4030 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
4031
4032 /* When single-stepping over STI and MOV SS, we must clear the
4033 * corresponding interruptibility bits in the guest state. Otherwise
4034 * vmentry fails as it then expects bit 14 (BS) in pending debug
4035 * exceptions being set, but that's not correct for the guest debugging
4036 * case. */
4037 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
4038 vmx_set_interrupt_shadow(vcpu, 0);
4039
4040 do_vmx_vcpu_run(vcpu);
4041 4028
4042 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) 4029 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
4043 | (1 << VCPU_EXREG_PDPTR)); 4030 | (1 << VCPU_EXREG_PDPTR));