aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c54
1 files changed, 35 insertions, 19 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a13c603bdefb..7207e6cc07c1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2874,6 +2874,17 @@ static void setup_msrs(struct vcpu_vmx *vmx)
2874 vmx_update_msr_bitmap(&vmx->vcpu); 2874 vmx_update_msr_bitmap(&vmx->vcpu);
2875} 2875}
2876 2876
2877static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
2878{
2879 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2880
2881 if (is_guest_mode(vcpu) &&
2882 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
2883 return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
2884
2885 return vcpu->arch.tsc_offset;
2886}
2887
2877/* 2888/*
2878 * reads and returns guest's timestamp counter "register" 2889 * reads and returns guest's timestamp counter "register"
2879 * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset 2890 * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
@@ -11175,11 +11186,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
11175 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 11186 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
11176 } 11187 }
11177 11188
11178 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 11189 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
11179 vmcs_write64(TSC_OFFSET, 11190
11180 vcpu->arch.tsc_offset + vmcs12->tsc_offset);
11181 else
11182 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
11183 if (kvm_has_tsc_control) 11191 if (kvm_has_tsc_control)
11184 decache_tsc_multiplier(vmx); 11192 decache_tsc_multiplier(vmx);
11185 11193
@@ -11427,6 +11435,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11427 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 11435 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11428 u32 msr_entry_idx; 11436 u32 msr_entry_idx;
11429 u32 exit_qual; 11437 u32 exit_qual;
11438 int r;
11430 11439
11431 enter_guest_mode(vcpu); 11440 enter_guest_mode(vcpu);
11432 11441
@@ -11436,26 +11445,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11436 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 11445 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
11437 vmx_segment_cache_clear(vmx); 11446 vmx_segment_cache_clear(vmx);
11438 11447
11439 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { 11448 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
11440 leave_guest_mode(vcpu); 11449 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
11441 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 11450
11442 nested_vmx_entry_failure(vcpu, vmcs12, 11451 r = EXIT_REASON_INVALID_STATE;
11443 EXIT_REASON_INVALID_STATE, exit_qual); 11452 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual))
11444 return 1; 11453 goto fail;
11445 }
11446 11454
11447 nested_get_vmcs12_pages(vcpu, vmcs12); 11455 nested_get_vmcs12_pages(vcpu, vmcs12);
11448 11456
11457 r = EXIT_REASON_MSR_LOAD_FAIL;
11449 msr_entry_idx = nested_vmx_load_msr(vcpu, 11458 msr_entry_idx = nested_vmx_load_msr(vcpu,
11450 vmcs12->vm_entry_msr_load_addr, 11459 vmcs12->vm_entry_msr_load_addr,
11451 vmcs12->vm_entry_msr_load_count); 11460 vmcs12->vm_entry_msr_load_count);
11452 if (msr_entry_idx) { 11461 if (msr_entry_idx)
11453 leave_guest_mode(vcpu); 11462 goto fail;
11454 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
11455 nested_vmx_entry_failure(vcpu, vmcs12,
11456 EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
11457 return 1;
11458 }
11459 11463
11460 /* 11464 /*
11461 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 11465 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11464,6 +11468,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11464 * the success flag) when L2 exits (see nested_vmx_vmexit()). 11468 * the success flag) when L2 exits (see nested_vmx_vmexit()).
11465 */ 11469 */
11466 return 0; 11470 return 0;
11471
11472fail:
11473 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
11474 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
11475 leave_guest_mode(vcpu);
11476 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
11477 nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
11478 return 1;
11467} 11479}
11468 11480
11469/* 11481/*
@@ -12035,6 +12047,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
12035 12047
12036 leave_guest_mode(vcpu); 12048 leave_guest_mode(vcpu);
12037 12049
12050 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
12051 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
12052
12038 if (likely(!vmx->fail)) { 12053 if (likely(!vmx->fail)) {
12039 if (exit_reason == -1) 12054 if (exit_reason == -1)
12040 sync_vmcs12(vcpu, vmcs12); 12055 sync_vmcs12(vcpu, vmcs12);
@@ -12725,6 +12740,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
12725 12740
12726 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 12741 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
12727 12742
12743 .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
12728 .write_tsc_offset = vmx_write_tsc_offset, 12744 .write_tsc_offset = vmx_write_tsc_offset,
12729 12745
12730 .set_tdp_cr3 = vmx_set_cr3, 12746 .set_tdp_cr3 = vmx_set_cr3,