diff options
author | Zachary Amsden <zamsden@redhat.com> | 2010-08-20 04:07:16 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:51:22 -0400 |
commit | f4e1b3c8bd2a044cd0ccf80595bfd088a49fe60b (patch) | |
tree | 0f494d950f3582d0ed4bb8218cdf6541578a2d46 | |
parent | ae38436b78a8abff767e2ac10e2cd663a7eef476 (diff) |
KVM: x86: Convert TSC writes to TSC offset writes
Change svm / vmx to be the same internally and write TSC offset
instead of bare TSC in helper functions. Isolated as a single
patch to contain code movement.
Signed-off-by: Zachary Amsden <zamsden@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | arch/x86/kvm/svm.c | 31 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 11 |
2 files changed, 22 insertions, 20 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index af5b9ea51965..e06f00d1f15c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -701,6 +701,20 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) | |||
701 | seg->base = 0; | 701 | seg->base = 0; |
702 | } | 702 | } |
703 | 703 | ||
704 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | ||
705 | { | ||
706 | struct vcpu_svm *svm = to_svm(vcpu); | ||
707 | u64 g_tsc_offset = 0; | ||
708 | |||
709 | if (is_nested(svm)) { | ||
710 | g_tsc_offset = svm->vmcb->control.tsc_offset - | ||
711 | svm->nested.hsave->control.tsc_offset; | ||
712 | svm->nested.hsave->control.tsc_offset = offset; | ||
713 | } | ||
714 | |||
715 | svm->vmcb->control.tsc_offset = offset + g_tsc_offset; | ||
716 | } | ||
717 | |||
704 | static void init_vmcb(struct vcpu_svm *svm) | 718 | static void init_vmcb(struct vcpu_svm *svm) |
705 | { | 719 | { |
706 | struct vmcb_control_area *control = &svm->vmcb->control; | 720 | struct vmcb_control_area *control = &svm->vmcb->control; |
@@ -901,7 +915,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
901 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | 915 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; |
902 | svm->asid_generation = 0; | 916 | svm->asid_generation = 0; |
903 | init_vmcb(svm); | 917 | init_vmcb(svm); |
904 | svm->vmcb->control.tsc_offset = 0-native_read_tsc(); | 918 | svm_write_tsc_offset(&svm->vcpu, 0-native_read_tsc()); |
905 | 919 | ||
906 | err = fx_init(&svm->vcpu); | 920 | err = fx_init(&svm->vcpu); |
907 | if (err) | 921 | if (err) |
@@ -2566,20 +2580,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
2566 | struct vcpu_svm *svm = to_svm(vcpu); | 2580 | struct vcpu_svm *svm = to_svm(vcpu); |
2567 | 2581 | ||
2568 | switch (ecx) { | 2582 | switch (ecx) { |
2569 | case MSR_IA32_TSC: { | 2583 | case MSR_IA32_TSC: |
2570 | u64 tsc_offset = data - native_read_tsc(); | 2584 | svm_write_tsc_offset(vcpu, data - native_read_tsc()); |
2571 | u64 g_tsc_offset = 0; | ||
2572 | |||
2573 | if (is_nested(svm)) { | ||
2574 | g_tsc_offset = svm->vmcb->control.tsc_offset - | ||
2575 | svm->nested.hsave->control.tsc_offset; | ||
2576 | svm->nested.hsave->control.tsc_offset = tsc_offset; | ||
2577 | } | ||
2578 | |||
2579 | svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset; | ||
2580 | |||
2581 | break; | 2585 | break; |
2582 | } | ||
2583 | case MSR_STAR: | 2586 | case MSR_STAR: |
2584 | svm->vmcb->save.star = data; | 2587 | svm->vmcb->save.star = data; |
2585 | break; | 2588 | break; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4fbab2469bf9..d9bec5ee38b8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1149,9 +1149,9 @@ static u64 guest_read_tsc(void) | |||
1149 | * writes 'guest_tsc' into guest's timestamp counter "register" | 1149 | * writes 'guest_tsc' into guest's timestamp counter "register" |
1150 | * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc | 1150 | * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc |
1151 | */ | 1151 | */ |
1152 | static void guest_write_tsc(u64 guest_tsc, u64 host_tsc) | 1152 | static void vmx_write_tsc_offset(u64 offset) |
1153 | { | 1153 | { |
1154 | vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); | 1154 | vmcs_write64(TSC_OFFSET, offset); |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | /* | 1157 | /* |
@@ -1255,7 +1255,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
1255 | break; | 1255 | break; |
1256 | case MSR_IA32_TSC: | 1256 | case MSR_IA32_TSC: |
1257 | rdtscll(host_tsc); | 1257 | rdtscll(host_tsc); |
1258 | guest_write_tsc(data, host_tsc); | 1258 | vmx_write_tsc_offset(data - host_tsc); |
1259 | break; | 1259 | break; |
1260 | case MSR_IA32_CR_PAT: | 1260 | case MSR_IA32_CR_PAT: |
1261 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | 1261 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { |
@@ -2512,7 +2512,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2512 | { | 2512 | { |
2513 | u32 host_sysenter_cs, msr_low, msr_high; | 2513 | u32 host_sysenter_cs, msr_low, msr_high; |
2514 | u32 junk; | 2514 | u32 junk; |
2515 | u64 host_pat, tsc_this; | 2515 | u64 host_pat; |
2516 | unsigned long a; | 2516 | unsigned long a; |
2517 | struct desc_ptr dt; | 2517 | struct desc_ptr dt; |
2518 | int i; | 2518 | int i; |
@@ -2653,8 +2653,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2653 | vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; | 2653 | vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; |
2654 | vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); | 2654 | vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); |
2655 | 2655 | ||
2656 | tsc_this = native_read_tsc(); | 2656 | vmx_write_tsc_offset(0-native_read_tsc()); |
2657 | guest_write_tsc(0, tsc_this); | ||
2658 | 2657 | ||
2659 | return 0; | 2658 | return 0; |
2660 | } | 2659 | } |