diff options
author | Zachary Amsden <zamsden@redhat.com> | 2010-08-20 04:07:23 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:51:23 -0400 |
commit | e48672fa25e879f7ae21785c7efd187738139593 (patch) | |
tree | bec27bad04ba6e933e72439cc565d2c752a31928 /arch/x86/kvm/svm.c | |
parent | 6755bae8e69093b2994b6f29cd3eaecdf610374e (diff) |
KVM: x86: Unify TSC logic
Move the TSC control logic from the vendor backends into x86.c
by adding adjust_tsc_offset to x86 ops. Now all TSC decisions
can be done in one place.
Signed-off-by: Zachary Amsden <zamsden@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 26 |
1 files changed, 10 insertions, 16 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ea41c551fa44..ff28f6521065 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -715,6 +715,15 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | |||
715 | svm->vmcb->control.tsc_offset = offset + g_tsc_offset; | 715 | svm->vmcb->control.tsc_offset = offset + g_tsc_offset; |
716 | } | 716 | } |
717 | 717 | ||
718 | static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) | ||
719 | { | ||
720 | struct vcpu_svm *svm = to_svm(vcpu); | ||
721 | |||
722 | svm->vmcb->control.tsc_offset += adjustment; | ||
723 | if (is_nested(svm)) | ||
724 | svm->nested.hsave->control.tsc_offset += adjustment; | ||
725 | } | ||
726 | |||
718 | static void init_vmcb(struct vcpu_svm *svm) | 727 | static void init_vmcb(struct vcpu_svm *svm) |
719 | { | 728 | { |
720 | struct vmcb_control_area *control = &svm->vmcb->control; | 729 | struct vmcb_control_area *control = &svm->vmcb->control; |
@@ -961,20 +970,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
961 | int i; | 970 | int i; |
962 | 971 | ||
963 | if (unlikely(cpu != vcpu->cpu)) { | 972 | if (unlikely(cpu != vcpu->cpu)) { |
964 | u64 delta; | ||
965 | |||
966 | if (check_tsc_unstable()) { | ||
967 | /* | ||
968 | * Make sure that the guest sees a monotonically | ||
969 | * increasing TSC. | ||
970 | */ | ||
971 | delta = vcpu->arch.host_tsc - native_read_tsc(); | ||
972 | svm->vmcb->control.tsc_offset += delta; | ||
973 | if (is_nested(svm)) | ||
974 | svm->nested.hsave->control.tsc_offset += delta; | ||
975 | } | ||
976 | vcpu->cpu = cpu; | ||
977 | kvm_migrate_timers(vcpu); | ||
978 | svm->asid_generation = 0; | 973 | svm->asid_generation = 0; |
979 | } | 974 | } |
980 | 975 | ||
@@ -990,8 +985,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) | |||
990 | ++vcpu->stat.host_state_reload; | 985 | ++vcpu->stat.host_state_reload; |
991 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 986 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
992 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); | 987 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
993 | |||
994 | vcpu->arch.host_tsc = native_read_tsc(); | ||
995 | } | 988 | } |
996 | 989 | ||
997 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) | 990 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
@@ -3553,6 +3546,7 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
3553 | .has_wbinvd_exit = svm_has_wbinvd_exit, | 3546 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
3554 | 3547 | ||
3555 | .write_tsc_offset = svm_write_tsc_offset, | 3548 | .write_tsc_offset = svm_write_tsc_offset, |
3549 | .adjust_tsc_offset = svm_adjust_tsc_offset, | ||
3556 | }; | 3550 | }; |
3557 | 3551 | ||
3558 | static int __init svm_init(void) | 3552 | static int __init svm_init(void) |