diff options
author | Zachary Amsden <zamsden@redhat.com> | 2010-08-20 04:07:17 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:51:22 -0400 |
commit | 99e3e30aee1a326a98bf3a5f47b8622219c685f3 (patch) | |
tree | 1b67fc70af33988080784d32725f72b5ce7c07d1 /arch/x86/kvm/vmx.c | |
parent | f4e1b3c8bd2a044cd0ccf80595bfd088a49fe60b (diff) |
KVM: x86: Move TSC offset writes to common code
Also, ensure that the storing of the offset and the reading of the TSC
are never preempted by taking a spinlock. While the lock is overkill
now, it is useful later in this patch series.
Signed-off-by: Zachary Amsden <zamsden@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d9bec5ee38b8..138746d3afe9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1146,10 +1146,9 @@ static u64 guest_read_tsc(void) | |||
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | /* | 1148 | /* |
1149 | * writes 'guest_tsc' into guest's timestamp counter "register" | 1149 | * writes 'offset' into guest's timestamp counter offset register |
1150 | * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc | ||
1151 | */ | 1150 | */ |
1152 | static void vmx_write_tsc_offset(u64 offset) | 1151 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
1153 | { | 1152 | { |
1154 | vmcs_write64(TSC_OFFSET, offset); | 1153 | vmcs_write64(TSC_OFFSET, offset); |
1155 | } | 1154 | } |
@@ -1224,7 +1223,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
1224 | { | 1223 | { |
1225 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1224 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1226 | struct shared_msr_entry *msr; | 1225 | struct shared_msr_entry *msr; |
1227 | u64 host_tsc; | ||
1228 | int ret = 0; | 1226 | int ret = 0; |
1229 | 1227 | ||
1230 | switch (msr_index) { | 1228 | switch (msr_index) { |
@@ -1254,8 +1252,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
1254 | vmcs_writel(GUEST_SYSENTER_ESP, data); | 1252 | vmcs_writel(GUEST_SYSENTER_ESP, data); |
1255 | break; | 1253 | break; |
1256 | case MSR_IA32_TSC: | 1254 | case MSR_IA32_TSC: |
1257 | rdtscll(host_tsc); | 1255 | kvm_write_tsc(vcpu, data); |
1258 | vmx_write_tsc_offset(data - host_tsc); | ||
1259 | break; | 1256 | break; |
1260 | case MSR_IA32_CR_PAT: | 1257 | case MSR_IA32_CR_PAT: |
1261 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | 1258 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { |
@@ -2653,7 +2650,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2653 | vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; | 2650 | vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; |
2654 | vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); | 2651 | vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); |
2655 | 2652 | ||
2656 | vmx_write_tsc_offset(0-native_read_tsc()); | 2653 | kvm_write_tsc(&vmx->vcpu, 0); |
2657 | 2654 | ||
2658 | return 0; | 2655 | return 0; |
2659 | } | 2656 | } |
@@ -4348,6 +4345,8 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
4348 | .set_supported_cpuid = vmx_set_supported_cpuid, | 4345 | .set_supported_cpuid = vmx_set_supported_cpuid, |
4349 | 4346 | ||
4350 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, | 4347 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, |
4348 | |||
4349 | .write_tsc_offset = vmx_write_tsc_offset, | ||
4351 | }; | 4350 | }; |
4352 | 4351 | ||
4353 | static int __init vmx_init(void) | 4352 | static int __init vmx_init(void) |