diff options
| -rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 17 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 54 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 6 |
4 files changed, 56 insertions, 22 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 949c977bc4c9..c25775fad4ed 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -1013,6 +1013,7 @@ struct kvm_x86_ops { | |||
| 1013 | 1013 | ||
| 1014 | bool (*has_wbinvd_exit)(void); | 1014 | bool (*has_wbinvd_exit)(void); |
| 1015 | 1015 | ||
| 1016 | u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); | ||
| 1016 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); | 1017 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
| 1017 | 1018 | ||
| 1018 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); | 1019 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b3ebc8ad6891..e77a536d0b7c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -1424,12 +1424,23 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) | |||
| 1424 | seg->base = 0; | 1424 | seg->base = 0; |
| 1425 | } | 1425 | } |
| 1426 | 1426 | ||
| 1427 | static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) | ||
| 1428 | { | ||
| 1429 | struct vcpu_svm *svm = to_svm(vcpu); | ||
| 1430 | |||
| 1431 | if (is_guest_mode(vcpu)) | ||
| 1432 | return svm->nested.hsave->control.tsc_offset; | ||
| 1433 | |||
| 1434 | return vcpu->arch.tsc_offset; | ||
| 1435 | } | ||
| 1436 | |||
| 1427 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | 1437 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
| 1428 | { | 1438 | { |
| 1429 | struct vcpu_svm *svm = to_svm(vcpu); | 1439 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1430 | u64 g_tsc_offset = 0; | 1440 | u64 g_tsc_offset = 0; |
| 1431 | 1441 | ||
| 1432 | if (is_guest_mode(vcpu)) { | 1442 | if (is_guest_mode(vcpu)) { |
| 1443 | /* Write L1's TSC offset. */ | ||
| 1433 | g_tsc_offset = svm->vmcb->control.tsc_offset - | 1444 | g_tsc_offset = svm->vmcb->control.tsc_offset - |
| 1434 | svm->nested.hsave->control.tsc_offset; | 1445 | svm->nested.hsave->control.tsc_offset; |
| 1435 | svm->nested.hsave->control.tsc_offset = offset; | 1446 | svm->nested.hsave->control.tsc_offset = offset; |
| @@ -3323,6 +3334,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) | |||
| 3323 | /* Restore the original control entries */ | 3334 | /* Restore the original control entries */ |
| 3324 | copy_vmcb_control_area(vmcb, hsave); | 3335 | copy_vmcb_control_area(vmcb, hsave); |
| 3325 | 3336 | ||
| 3337 | svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; | ||
| 3326 | kvm_clear_exception_queue(&svm->vcpu); | 3338 | kvm_clear_exception_queue(&svm->vcpu); |
| 3327 | kvm_clear_interrupt_queue(&svm->vcpu); | 3339 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 3328 | 3340 | ||
| @@ -3483,10 +3495,12 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, | |||
| 3483 | /* We don't want to see VMMCALLs from a nested guest */ | 3495 | /* We don't want to see VMMCALLs from a nested guest */ |
| 3484 | clr_intercept(svm, INTERCEPT_VMMCALL); | 3496 | clr_intercept(svm, INTERCEPT_VMMCALL); |
| 3485 | 3497 | ||
| 3498 | svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; | ||
| 3499 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; | ||
| 3500 | |||
| 3486 | svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; | 3501 | svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; |
| 3487 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; | 3502 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; |
| 3488 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; | 3503 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; |
| 3489 | svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; | ||
| 3490 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; | 3504 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; |
| 3491 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; | 3505 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; |
| 3492 | 3506 | ||
| @@ -7102,6 +7116,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
| 7102 | 7116 | ||
| 7103 | .has_wbinvd_exit = svm_has_wbinvd_exit, | 7117 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
| 7104 | 7118 | ||
| 7119 | .read_l1_tsc_offset = svm_read_l1_tsc_offset, | ||
| 7105 | .write_tsc_offset = svm_write_tsc_offset, | 7120 | .write_tsc_offset = svm_write_tsc_offset, |
| 7106 | 7121 | ||
| 7107 | .set_tdp_cr3 = set_tdp_cr3, | 7122 | .set_tdp_cr3 = set_tdp_cr3, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a13c603bdefb..7207e6cc07c1 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -2874,6 +2874,17 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
| 2874 | vmx_update_msr_bitmap(&vmx->vcpu); | 2874 | vmx_update_msr_bitmap(&vmx->vcpu); |
| 2875 | } | 2875 | } |
| 2876 | 2876 | ||
| 2877 | static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) | ||
| 2878 | { | ||
| 2879 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | ||
| 2880 | |||
| 2881 | if (is_guest_mode(vcpu) && | ||
| 2882 | (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) | ||
| 2883 | return vcpu->arch.tsc_offset - vmcs12->tsc_offset; | ||
| 2884 | |||
| 2885 | return vcpu->arch.tsc_offset; | ||
| 2886 | } | ||
| 2887 | |||
| 2877 | /* | 2888 | /* |
| 2878 | * reads and returns guest's timestamp counter "register" | 2889 | * reads and returns guest's timestamp counter "register" |
| 2879 | * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset | 2890 | * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset |
| @@ -11175,11 +11186,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
| 11175 | vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); | 11186 | vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); |
| 11176 | } | 11187 | } |
| 11177 | 11188 | ||
| 11178 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | 11189 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); |
| 11179 | vmcs_write64(TSC_OFFSET, | 11190 | |
| 11180 | vcpu->arch.tsc_offset + vmcs12->tsc_offset); | ||
| 11181 | else | ||
| 11182 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); | ||
| 11183 | if (kvm_has_tsc_control) | 11191 | if (kvm_has_tsc_control) |
| 11184 | decache_tsc_multiplier(vmx); | 11192 | decache_tsc_multiplier(vmx); |
| 11185 | 11193 | ||
| @@ -11427,6 +11435,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
| 11427 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 11435 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
| 11428 | u32 msr_entry_idx; | 11436 | u32 msr_entry_idx; |
| 11429 | u32 exit_qual; | 11437 | u32 exit_qual; |
| 11438 | int r; | ||
| 11430 | 11439 | ||
| 11431 | enter_guest_mode(vcpu); | 11440 | enter_guest_mode(vcpu); |
| 11432 | 11441 | ||
| @@ -11436,26 +11445,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
| 11436 | vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); | 11445 | vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); |
| 11437 | vmx_segment_cache_clear(vmx); | 11446 | vmx_segment_cache_clear(vmx); |
| 11438 | 11447 | ||
| 11439 | if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { | 11448 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) |
| 11440 | leave_guest_mode(vcpu); | 11449 | vcpu->arch.tsc_offset += vmcs12->tsc_offset; |
| 11441 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | 11450 | |
| 11442 | nested_vmx_entry_failure(vcpu, vmcs12, | 11451 | r = EXIT_REASON_INVALID_STATE; |
| 11443 | EXIT_REASON_INVALID_STATE, exit_qual); | 11452 | if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) |
| 11444 | return 1; | 11453 | goto fail; |
| 11445 | } | ||
| 11446 | 11454 | ||
| 11447 | nested_get_vmcs12_pages(vcpu, vmcs12); | 11455 | nested_get_vmcs12_pages(vcpu, vmcs12); |
| 11448 | 11456 | ||
| 11457 | r = EXIT_REASON_MSR_LOAD_FAIL; | ||
| 11449 | msr_entry_idx = nested_vmx_load_msr(vcpu, | 11458 | msr_entry_idx = nested_vmx_load_msr(vcpu, |
| 11450 | vmcs12->vm_entry_msr_load_addr, | 11459 | vmcs12->vm_entry_msr_load_addr, |
| 11451 | vmcs12->vm_entry_msr_load_count); | 11460 | vmcs12->vm_entry_msr_load_count); |
| 11452 | if (msr_entry_idx) { | 11461 | if (msr_entry_idx) |
| 11453 | leave_guest_mode(vcpu); | 11462 | goto fail; |
| 11454 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | ||
| 11455 | nested_vmx_entry_failure(vcpu, vmcs12, | ||
| 11456 | EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); | ||
| 11457 | return 1; | ||
| 11458 | } | ||
| 11459 | 11463 | ||
| 11460 | /* | 11464 | /* |
| 11461 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point | 11465 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point |
| @@ -11464,6 +11468,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
| 11464 | * the success flag) when L2 exits (see nested_vmx_vmexit()). | 11468 | * the success flag) when L2 exits (see nested_vmx_vmexit()). |
| 11465 | */ | 11469 | */ |
| 11466 | return 0; | 11470 | return 0; |
| 11471 | |||
| 11472 | fail: | ||
| 11473 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | ||
| 11474 | vcpu->arch.tsc_offset -= vmcs12->tsc_offset; | ||
| 11475 | leave_guest_mode(vcpu); | ||
| 11476 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | ||
| 11477 | nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual); | ||
| 11478 | return 1; | ||
| 11467 | } | 11479 | } |
| 11468 | 11480 | ||
| 11469 | /* | 11481 | /* |
| @@ -12035,6 +12047,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
| 12035 | 12047 | ||
| 12036 | leave_guest_mode(vcpu); | 12048 | leave_guest_mode(vcpu); |
| 12037 | 12049 | ||
| 12050 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | ||
| 12051 | vcpu->arch.tsc_offset -= vmcs12->tsc_offset; | ||
| 12052 | |||
| 12038 | if (likely(!vmx->fail)) { | 12053 | if (likely(!vmx->fail)) { |
| 12039 | if (exit_reason == -1) | 12054 | if (exit_reason == -1) |
| 12040 | sync_vmcs12(vcpu, vmcs12); | 12055 | sync_vmcs12(vcpu, vmcs12); |
| @@ -12725,6 +12740,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
| 12725 | 12740 | ||
| 12726 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, | 12741 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, |
| 12727 | 12742 | ||
| 12743 | .read_l1_tsc_offset = vmx_read_l1_tsc_offset, | ||
| 12728 | .write_tsc_offset = vmx_write_tsc_offset, | 12744 | .write_tsc_offset = vmx_write_tsc_offset, |
| 12729 | 12745 | ||
| 12730 | .set_tdp_cr3 = vmx_set_cr3, | 12746 | .set_tdp_cr3 = vmx_set_cr3, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0334b250e102..3f3fba58c960 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1490,7 +1490,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) | |||
| 1490 | 1490 | ||
| 1491 | static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) | 1491 | static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) |
| 1492 | { | 1492 | { |
| 1493 | u64 curr_offset = vcpu->arch.tsc_offset; | 1493 | u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); |
| 1494 | vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; | 1494 | vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; |
| 1495 | } | 1495 | } |
| 1496 | 1496 | ||
| @@ -1532,7 +1532,9 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | |||
| 1532 | 1532 | ||
| 1533 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) | 1533 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) |
| 1534 | { | 1534 | { |
| 1535 | return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); | 1535 | u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); |
| 1536 | |||
| 1537 | return tsc_offset + kvm_scale_tsc(vcpu, host_tsc); | ||
| 1536 | } | 1538 | } |
| 1537 | EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); | 1539 | EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); |
| 1538 | 1540 | ||
