aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-03-25 06:07:27 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:27 -0400
commit0cc5064d335543a72c5ef904a3f528966fa3f2d2 (patch)
tree4e6f8b4e013fc24821cb0f1af26d33d906e4d870 /drivers/kvm
parentd28c6cfbbc5e2d4fccfe6d733995ed5971ca87f6 (diff)
KVM: SVM: Ensure timestamp counter monotonicity
When a vcpu is migrated from one cpu to another, its timestamp counter may lose its monotonic property if the host has unsynced timestamp counters. This can confuse the guest, sometimes to the point of refusing to boot. As the rdtsc instruction is rather fast on AMD processors (7-10 cycles), we can simply record the last host tsc when we drop the cpu, and adjust the vcpu tsc offset when we detect that we've migrated to a different cpu. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/kvm.h1
-rw-r--r--drivers/kvm/svm.c21
2 files changed, 18 insertions, 4 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index f5e343cb06b0..6d0bd7aab92e 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -244,6 +244,7 @@ struct kvm_vcpu {
244 struct mutex mutex; 244 struct mutex mutex;
245 int cpu; 245 int cpu;
246 int launched; 246 int launched;
247 u64 host_tsc;
247 struct kvm_run *run; 248 struct kvm_run *run;
248 int interrupt_window_open; 249 int interrupt_window_open;
249 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 250 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 0542d3357ce1..ca2642fc091f 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -459,7 +459,6 @@ static void init_vmcb(struct vmcb *vmcb)
459{ 459{
460 struct vmcb_control_area *control = &vmcb->control; 460 struct vmcb_control_area *control = &vmcb->control;
461 struct vmcb_save_area *save = &vmcb->save; 461 struct vmcb_save_area *save = &vmcb->save;
462 u64 tsc;
463 462
464 control->intercept_cr_read = INTERCEPT_CR0_MASK | 463 control->intercept_cr_read = INTERCEPT_CR0_MASK |
465 INTERCEPT_CR3_MASK | 464 INTERCEPT_CR3_MASK |
@@ -517,8 +516,7 @@ static void init_vmcb(struct vmcb *vmcb)
517 516
518 control->iopm_base_pa = iopm_base; 517 control->iopm_base_pa = iopm_base;
519 control->msrpm_base_pa = msrpm_base; 518 control->msrpm_base_pa = msrpm_base;
520 rdtscll(tsc); 519 control->tsc_offset = 0;
521 control->tsc_offset = -tsc;
522 control->int_ctl = V_INTR_MASKING_MASK; 520 control->int_ctl = V_INTR_MASKING_MASK;
523 521
524 init_seg(&save->es); 522 init_seg(&save->es);
@@ -606,11 +604,26 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
606 604
607static void svm_vcpu_load(struct kvm_vcpu *vcpu) 605static void svm_vcpu_load(struct kvm_vcpu *vcpu)
608{ 606{
609 get_cpu(); 607 int cpu;
608
609 cpu = get_cpu();
610 if (unlikely(cpu != vcpu->cpu)) {
611 u64 tsc_this, delta;
612
613 /*
614 * Make sure that the guest sees a monotonically
615 * increasing TSC.
616 */
617 rdtscll(tsc_this);
618 delta = vcpu->host_tsc - tsc_this;
619 vcpu->svm->vmcb->control.tsc_offset += delta;
620 vcpu->cpu = cpu;
621 }
610} 622}
611 623
612static void svm_vcpu_put(struct kvm_vcpu *vcpu) 624static void svm_vcpu_put(struct kvm_vcpu *vcpu)
613{ 625{
626 rdtscll(vcpu->host_tsc);
614 put_cpu(); 627 put_cpu();
615} 628}
616 629