aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2013-01-21 08:36:49 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2013-01-23 21:40:31 -0500
commit141687869fb904e912568c6b94a6b1fa2114f6ed (patch)
tree6c2c62b50e8b499db77ecb5f1b1d1570f06d49a8
parent378a8b099fc207ddcb91b19a8c1457667e0af398 (diff)
KVM: VMX: set vmx->emulation_required only when needed.
If emulate_invalid_guest_state=false vmx->emulation_required is never actually used, but it ends up to be always set to true since handle_invalid_guest_state(), the only place it is reset back to false, is never called. This, besides been not very clean, makes vmexit and vmentry path to check emulate_invalid_guest_state needlessly. The patch fixes that by keeping emulation_required coherent with emulate_invalid_guest_state setting. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9bc68c7bca7d..02eeba86328d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2759,6 +2759,11 @@ static __exit void hardware_unsetup(void)
2759 free_kvm_area(); 2759 free_kvm_area();
2760} 2760}
2761 2761
2762static bool emulation_required(struct kvm_vcpu *vcpu)
2763{
2764 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2765}
2766
2762static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 2767static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
2763 struct kvm_segment *save) 2768 struct kvm_segment *save)
2764{ 2769{
@@ -2794,7 +2799,6 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
2794 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2799 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2795 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2800 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2796 2801
2797 vmx->emulation_required = 1;
2798 vmx->rmode.vm86_active = 0; 2802 vmx->rmode.vm86_active = 0;
2799 2803
2800 vmx_segment_cache_clear(vmx); 2804 vmx_segment_cache_clear(vmx);
@@ -2885,7 +2889,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
2885 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2889 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2886 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2890 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2887 2891
2888 vmx->emulation_required = 1;
2889 vmx->rmode.vm86_active = 1; 2892 vmx->rmode.vm86_active = 1;
2890 2893
2891 /* 2894 /*
@@ -3111,6 +3114,9 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3111 vmcs_writel(CR0_READ_SHADOW, cr0); 3114 vmcs_writel(CR0_READ_SHADOW, cr0);
3112 vmcs_writel(GUEST_CR0, hw_cr0); 3115 vmcs_writel(GUEST_CR0, hw_cr0);
3113 vcpu->arch.cr0 = cr0; 3116 vcpu->arch.cr0 = cr0;
3117
3118 /* depends on vcpu->arch.cr0 to be set to a new value */
3119 vmx->emulation_required = emulation_required(vcpu);
3114} 3120}
3115 3121
3116static u64 construct_eptp(unsigned long root_hpa) 3122static u64 construct_eptp(unsigned long root_hpa)
@@ -3298,8 +3304,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
3298 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); 3304 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3299 3305
3300out: 3306out:
3301 if (!vmx->emulation_required) 3307 vmx->emulation_required |= emulation_required(vcpu);
3302 vmx->emulation_required = !guest_state_valid(vcpu);
3303} 3308}
3304 3309
3305static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 3310static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -5027,7 +5032,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5027 schedule(); 5032 schedule();
5028 } 5033 }
5029 5034
5030 vmx->emulation_required = !guest_state_valid(vcpu); 5035 vmx->emulation_required = emulation_required(vcpu);
5031out: 5036out:
5032 return ret; 5037 return ret;
5033} 5038}
@@ -5970,7 +5975,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
5970 u32 vectoring_info = vmx->idt_vectoring_info; 5975 u32 vectoring_info = vmx->idt_vectoring_info;
5971 5976
5972 /* If guest state is invalid, start emulating */ 5977 /* If guest state is invalid, start emulating */
5973 if (vmx->emulation_required && emulate_invalid_guest_state) 5978 if (vmx->emulation_required)
5974 return handle_invalid_guest_state(vcpu); 5979 return handle_invalid_guest_state(vcpu);
5975 5980
5976 /* 5981 /*
@@ -6253,7 +6258,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6253 6258
6254 /* Don't enter VMX if guest state is invalid, let the exit handler 6259 /* Don't enter VMX if guest state is invalid, let the exit handler
6255 start emulation until we arrive back to a valid state */ 6260 start emulation until we arrive back to a valid state */
6256 if (vmx->emulation_required && emulate_invalid_guest_state) 6261 if (vmx->emulation_required)
6257 return; 6262 return;
6258 6263
6259 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) 6264 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))