diff options
author | Kyle Huey <me@kylehuey.com> | 2016-11-29 15:40:39 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-12-08 09:31:04 -0500 |
commit | eb2775621701e6ee3ea2a474437d04e93ccdcb2f (patch) | |
tree | 8fc49dc116f75a2e045c9b39dbdf425e59a8ca7d | |
parent | 09ca3f20492fb0b03060b56a47afa211476740a8 (diff) |
KVM: VMX: Move skip_emulated_instruction out of nested_vmx_check_vmcs12
We can't return both the pass/fail boolean for the vmcs and the upcoming
continue/exit-to-userspace boolean for skip_emulated_instruction out of
nested_vmx_check_vmcs, so move skip_emulated_instruction out of it instead.
Additionally, VMENTER/VMRESUME only trigger singlestep exceptions when
they advance the IP to the following instruction, not when they a) succeed,
b) fail MSR validation or c) throw an exception. Add a separate call to
skip_emulated_instruction that will later not be converted to the variant
that checks the singlestep flag.
Signed-off-by: Kyle Huey <khuey@kylehuey.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 53 |
1 files changed, 33 insertions, 20 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f2f9cf595c07..f4f6304f9583 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -7324,7 +7324,6 @@ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) | |||
7324 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 7324 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
7325 | if (vmx->nested.current_vmptr == -1ull) { | 7325 | if (vmx->nested.current_vmptr == -1ull) { |
7326 | nested_vmx_failInvalid(vcpu); | 7326 | nested_vmx_failInvalid(vcpu); |
7327 | skip_emulated_instruction(vcpu); | ||
7328 | return 0; | 7327 | return 0; |
7329 | } | 7328 | } |
7330 | return 1; | 7329 | return 1; |
@@ -7338,9 +7337,13 @@ static int handle_vmread(struct kvm_vcpu *vcpu) | |||
7338 | u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); | 7337 | u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); |
7339 | gva_t gva = 0; | 7338 | gva_t gva = 0; |
7340 | 7339 | ||
7341 | if (!nested_vmx_check_permission(vcpu) || | 7340 | if (!nested_vmx_check_permission(vcpu)) |
7342 | !nested_vmx_check_vmcs12(vcpu)) | 7341 | return 1; |
7342 | |||
7343 | if (!nested_vmx_check_vmcs12(vcpu)) { | ||
7344 | skip_emulated_instruction(vcpu); | ||
7343 | return 1; | 7345 | return 1; |
7346 | } | ||
7344 | 7347 | ||
7345 | /* Decode instruction info and find the field to read */ | 7348 | /* Decode instruction info and find the field to read */ |
7346 | field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); | 7349 | field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); |
@@ -7388,10 +7391,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) | |||
7388 | u64 field_value = 0; | 7391 | u64 field_value = 0; |
7389 | struct x86_exception e; | 7392 | struct x86_exception e; |
7390 | 7393 | ||
7391 | if (!nested_vmx_check_permission(vcpu) || | 7394 | if (!nested_vmx_check_permission(vcpu)) |
7392 | !nested_vmx_check_vmcs12(vcpu)) | ||
7393 | return 1; | 7395 | return 1; |
7394 | 7396 | ||
7397 | if (!nested_vmx_check_vmcs12(vcpu)) { | ||
7398 | skip_emulated_instruction(vcpu); | ||
7399 | return 1; | ||
7400 | } | ||
7401 | |||
7395 | if (vmx_instruction_info & (1u << 10)) | 7402 | if (vmx_instruction_info & (1u << 10)) |
7396 | field_value = kvm_register_readl(vcpu, | 7403 | field_value = kvm_register_readl(vcpu, |
7397 | (((vmx_instruction_info) >> 3) & 0xf)); | 7404 | (((vmx_instruction_info) >> 3) & 0xf)); |
@@ -10046,11 +10053,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
10046 | bool ia32e; | 10053 | bool ia32e; |
10047 | u32 msr_entry_idx; | 10054 | u32 msr_entry_idx; |
10048 | 10055 | ||
10049 | if (!nested_vmx_check_permission(vcpu) || | 10056 | if (!nested_vmx_check_permission(vcpu)) |
10050 | !nested_vmx_check_vmcs12(vcpu)) | ||
10051 | return 1; | 10057 | return 1; |
10052 | 10058 | ||
10053 | skip_emulated_instruction(vcpu); | 10059 | if (!nested_vmx_check_vmcs12(vcpu)) |
10060 | goto out; | ||
10061 | |||
10054 | vmcs12 = get_vmcs12(vcpu); | 10062 | vmcs12 = get_vmcs12(vcpu); |
10055 | 10063 | ||
10056 | if (enable_shadow_vmcs) | 10064 | if (enable_shadow_vmcs) |
@@ -10070,33 +10078,33 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
10070 | nested_vmx_failValid(vcpu, | 10078 | nested_vmx_failValid(vcpu, |
10071 | launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS | 10079 | launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS |
10072 | : VMXERR_VMRESUME_NONLAUNCHED_VMCS); | 10080 | : VMXERR_VMRESUME_NONLAUNCHED_VMCS); |
10073 | return 1; | 10081 | goto out; |
10074 | } | 10082 | } |
10075 | 10083 | ||
10076 | if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && | 10084 | if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && |
10077 | vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) { | 10085 | vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) { |
10078 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 10086 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
10079 | return 1; | 10087 | goto out; |
10080 | } | 10088 | } |
10081 | 10089 | ||
10082 | if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { | 10090 | if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { |
10083 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 10091 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
10084 | return 1; | 10092 | goto out; |
10085 | } | 10093 | } |
10086 | 10094 | ||
10087 | if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) { | 10095 | if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) { |
10088 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 10096 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
10089 | return 1; | 10097 | goto out; |
10090 | } | 10098 | } |
10091 | 10099 | ||
10092 | if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) { | 10100 | if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) { |
10093 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 10101 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
10094 | return 1; | 10102 | goto out; |
10095 | } | 10103 | } |
10096 | 10104 | ||
10097 | if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) { | 10105 | if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) { |
10098 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 10106 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
10099 | return 1; | 10107 | goto out; |
10100 | } | 10108 | } |
10101 | 10109 | ||
10102 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, | 10110 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, |
@@ -10116,26 +10124,26 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
10116 | vmx->nested.nested_vmx_entry_ctls_high)) | 10124 | vmx->nested.nested_vmx_entry_ctls_high)) |
10117 | { | 10125 | { |
10118 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 10126 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
10119 | return 1; | 10127 | goto out; |
10120 | } | 10128 | } |
10121 | 10129 | ||
10122 | if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || | 10130 | if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || |
10123 | ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { | 10131 | ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { |
10124 | nested_vmx_failValid(vcpu, | 10132 | nested_vmx_failValid(vcpu, |
10125 | VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); | 10133 | VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); |
10126 | return 1; | 10134 | goto out; |
10127 | } | 10135 | } |
10128 | 10136 | ||
10129 | if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) || | 10137 | if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) || |
10130 | ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { | 10138 | ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { |
10131 | nested_vmx_entry_failure(vcpu, vmcs12, | 10139 | nested_vmx_entry_failure(vcpu, vmcs12, |
10132 | EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); | 10140 | EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); |
10133 | return 1; | 10141 | goto out; |
10134 | } | 10142 | } |
10135 | if (vmcs12->vmcs_link_pointer != -1ull) { | 10143 | if (vmcs12->vmcs_link_pointer != -1ull) { |
10136 | nested_vmx_entry_failure(vcpu, vmcs12, | 10144 | nested_vmx_entry_failure(vcpu, vmcs12, |
10137 | EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR); | 10145 | EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR); |
10138 | return 1; | 10146 | goto out; |
10139 | } | 10147 | } |
10140 | 10148 | ||
10141 | /* | 10149 | /* |
@@ -10155,7 +10163,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
10155 | ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) { | 10163 | ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) { |
10156 | nested_vmx_entry_failure(vcpu, vmcs12, | 10164 | nested_vmx_entry_failure(vcpu, vmcs12, |
10157 | EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); | 10165 | EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); |
10158 | return 1; | 10166 | goto out; |
10159 | } | 10167 | } |
10160 | } | 10168 | } |
10161 | 10169 | ||
@@ -10173,7 +10181,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
10173 | ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) { | 10181 | ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) { |
10174 | nested_vmx_entry_failure(vcpu, vmcs12, | 10182 | nested_vmx_entry_failure(vcpu, vmcs12, |
10175 | EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); | 10183 | EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); |
10176 | return 1; | 10184 | goto out; |
10177 | } | 10185 | } |
10178 | } | 10186 | } |
10179 | 10187 | ||
@@ -10186,6 +10194,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
10186 | if (!vmcs02) | 10194 | if (!vmcs02) |
10187 | return -ENOMEM; | 10195 | return -ENOMEM; |
10188 | 10196 | ||
10197 | skip_emulated_instruction(vcpu); | ||
10189 | enter_guest_mode(vcpu); | 10198 | enter_guest_mode(vcpu); |
10190 | 10199 | ||
10191 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) | 10200 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) |
@@ -10227,6 +10236,10 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
10227 | * the success flag) when L2 exits (see nested_vmx_vmexit()). | 10236 | * the success flag) when L2 exits (see nested_vmx_vmexit()). |
10228 | */ | 10237 | */ |
10229 | return 1; | 10238 | return 1; |
10239 | |||
10240 | out: | ||
10241 | skip_emulated_instruction(vcpu); | ||
10242 | return 1; | ||
10230 | } | 10243 | } |
10231 | 10244 | ||
10232 | /* | 10245 | /* |