aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZachary Amsden <zamsden@redhat.com>2010-08-20 04:07:17 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:22 -0400
commit99e3e30aee1a326a98bf3a5f47b8622219c685f3 (patch)
tree1b67fc70af33988080784d32725f72b5ce7c07d1
parentf4e1b3c8bd2a044cd0ccf80595bfd088a49fe60b (diff)
KVM: x86: Move TSC offset writes to common code
Also, ensure that the storing of the offset and the reading of the TSC are never preempted by taking a spinlock. While the lock is overkill now, it is useful later in this patch series. Signed-off-by: Zachary Amsden <zamsden@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c13
-rw-r--r--arch/x86/kvm/x86.c18
-rw-r--r--arch/x86/kvm/x86.h2
5 files changed, 33 insertions, 9 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6056a23dc4cf..a215153f1ff6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -395,6 +395,7 @@ struct kvm_arch {
395 395
396 unsigned long irq_sources_bitmap; 396 unsigned long irq_sources_bitmap;
397 s64 kvmclock_offset; 397 s64 kvmclock_offset;
398 spinlock_t tsc_write_lock;
398 399
399 struct kvm_xen_hvm_config xen_hvm_config; 400 struct kvm_xen_hvm_config xen_hvm_config;
400 401
@@ -521,6 +522,8 @@ struct kvm_x86_ops {
521 522
522 bool (*has_wbinvd_exit)(void); 523 bool (*has_wbinvd_exit)(void);
523 524
525 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
526
524 const struct trace_print_flags *exit_reasons_str; 527 const struct trace_print_flags *exit_reasons_str;
525}; 528};
526 529
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e06f00d1f15c..ea41c551fa44 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -915,7 +915,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
915 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 915 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
916 svm->asid_generation = 0; 916 svm->asid_generation = 0;
917 init_vmcb(svm); 917 init_vmcb(svm);
918 svm_write_tsc_offset(&svm->vcpu, 0-native_read_tsc()); 918 kvm_write_tsc(&svm->vcpu, 0);
919 919
920 err = fx_init(&svm->vcpu); 920 err = fx_init(&svm->vcpu);
921 if (err) 921 if (err)
@@ -2581,7 +2581,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2581 2581
2582 switch (ecx) { 2582 switch (ecx) {
2583 case MSR_IA32_TSC: 2583 case MSR_IA32_TSC:
2584 svm_write_tsc_offset(vcpu, data - native_read_tsc()); 2584 kvm_write_tsc(vcpu, data);
2585 break; 2585 break;
2586 case MSR_STAR: 2586 case MSR_STAR:
2587 svm->vmcb->save.star = data; 2587 svm->vmcb->save.star = data;
@@ -3551,6 +3551,8 @@ static struct kvm_x86_ops svm_x86_ops = {
3551 .set_supported_cpuid = svm_set_supported_cpuid, 3551 .set_supported_cpuid = svm_set_supported_cpuid,
3552 3552
3553 .has_wbinvd_exit = svm_has_wbinvd_exit, 3553 .has_wbinvd_exit = svm_has_wbinvd_exit,
3554
3555 .write_tsc_offset = svm_write_tsc_offset,
3554}; 3556};
3555 3557
3556static int __init svm_init(void) 3558static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d9bec5ee38b8..138746d3afe9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1146,10 +1146,9 @@ static u64 guest_read_tsc(void)
1146} 1146}
1147 1147
1148/* 1148/*
1149 * writes 'guest_tsc' into guest's timestamp counter "register" 1149 * writes 'offset' into guest's timestamp counter offset register
1150 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
1151 */ 1150 */
1152static void vmx_write_tsc_offset(u64 offset) 1151static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1153{ 1152{
1154 vmcs_write64(TSC_OFFSET, offset); 1153 vmcs_write64(TSC_OFFSET, offset);
1155} 1154}
@@ -1224,7 +1223,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1224{ 1223{
1225 struct vcpu_vmx *vmx = to_vmx(vcpu); 1224 struct vcpu_vmx *vmx = to_vmx(vcpu);
1226 struct shared_msr_entry *msr; 1225 struct shared_msr_entry *msr;
1227 u64 host_tsc;
1228 int ret = 0; 1226 int ret = 0;
1229 1227
1230 switch (msr_index) { 1228 switch (msr_index) {
@@ -1254,8 +1252,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1254 vmcs_writel(GUEST_SYSENTER_ESP, data); 1252 vmcs_writel(GUEST_SYSENTER_ESP, data);
1255 break; 1253 break;
1256 case MSR_IA32_TSC: 1254 case MSR_IA32_TSC:
1257 rdtscll(host_tsc); 1255 kvm_write_tsc(vcpu, data);
1258 vmx_write_tsc_offset(data - host_tsc);
1259 break; 1256 break;
1260 case MSR_IA32_CR_PAT: 1257 case MSR_IA32_CR_PAT:
1261 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 1258 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
@@ -2653,7 +2650,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2653 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; 2650 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
2654 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); 2651 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
2655 2652
2656 vmx_write_tsc_offset(0-native_read_tsc()); 2653 kvm_write_tsc(&vmx->vcpu, 0);
2657 2654
2658 return 0; 2655 return 0;
2659} 2656}
@@ -4348,6 +4345,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
4348 .set_supported_cpuid = vmx_set_supported_cpuid, 4345 .set_supported_cpuid = vmx_set_supported_cpuid,
4349 4346
4350 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 4347 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
4348
4349 .write_tsc_offset = vmx_write_tsc_offset,
4351}; 4350};
4352 4351
4353static int __init vmx_init(void) 4352static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8b0c51a1adaa..886132b6ef14 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -895,6 +895,22 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *
895 895
896static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 896static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
897 897
898void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
899{
900 struct kvm *kvm = vcpu->kvm;
901 u64 offset;
902 unsigned long flags;
903
904 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
905 offset = data - native_read_tsc();
906 kvm_x86_ops->write_tsc_offset(vcpu, offset);
907 spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
908
909 /* Reset of TSC must disable overshoot protection below */
910 vcpu->arch.hv_clock.tsc_timestamp = 0;
911}
912EXPORT_SYMBOL_GPL(kvm_write_tsc);
913
898static void kvm_write_guest_time(struct kvm_vcpu *v) 914static void kvm_write_guest_time(struct kvm_vcpu *v)
899{ 915{
900 struct timespec ts; 916 struct timespec ts;
@@ -5495,6 +5511,8 @@ struct kvm *kvm_arch_create_vm(void)
5495 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 5511 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5496 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 5512 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5497 5513
5514 spin_lock_init(&kvm->arch.tsc_write_lock);
5515
5498 return kvm; 5516 return kvm;
5499} 5517}
5500 5518
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index b7a404722d2b..2d6385e44ccf 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -68,4 +68,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
68void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 68void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
69void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 69void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
70 70
71void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
72
71#endif 73#endif