diff options
| author | Haozhong Zhang <haozhong.zhang@intel.com> | 2015-10-20 03:39:05 -0400 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-11-10 06:06:16 -0500 |
| commit | 07c1419a32bbba08cf1efb6d1ecaf24f174fa4c3 (patch) | |
| tree | af346dcf95cb8a00fc2090f095a9b43c4d71ead4 | |
| parent | 381d585c80e34988269bd7901ad910981e900be1 (diff) | |
KVM: x86: Replace call-back compute_tsc_offset() with a common function
Both VMX and SVM calculate the tsc-offset in the same way, so this
patch removes the call-back compute_tsc_offset() and replaces it with a
common function kvm_compute_tsc_offset().
Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| -rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 10 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 6 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 15 |
4 files changed, 12 insertions, 20 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c5a3f3d66e90..672f960e8144 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -856,7 +856,6 @@ struct kvm_x86_ops { | |||
| 856 | u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu); | 856 | u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu); |
| 857 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); | 857 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
| 858 | 858 | ||
| 859 | u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); | ||
| 860 | u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc); | 859 | u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc); |
| 861 | 860 | ||
| 862 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); | 861 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f6e49a6c9ab0..d99b175ffbea 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -1004,15 +1004,6 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho | |||
| 1004 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); | 1004 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
| 1005 | } | 1005 | } |
| 1006 | 1006 | ||
| 1007 | static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | ||
| 1008 | { | ||
| 1009 | u64 tsc; | ||
| 1010 | |||
| 1011 | tsc = kvm_scale_tsc(vcpu, rdtsc()); | ||
| 1012 | |||
| 1013 | return target_tsc - tsc; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | static void init_vmcb(struct vcpu_svm *svm) | 1007 | static void init_vmcb(struct vcpu_svm *svm) |
| 1017 | { | 1008 | { |
| 1018 | struct vmcb_control_area *control = &svm->vmcb->control; | 1009 | struct vmcb_control_area *control = &svm->vmcb->control; |
| @@ -4370,7 +4361,6 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
| 4370 | .read_tsc_offset = svm_read_tsc_offset, | 4361 | .read_tsc_offset = svm_read_tsc_offset, |
| 4371 | .write_tsc_offset = svm_write_tsc_offset, | 4362 | .write_tsc_offset = svm_write_tsc_offset, |
| 4372 | .adjust_tsc_offset = svm_adjust_tsc_offset, | 4363 | .adjust_tsc_offset = svm_adjust_tsc_offset, |
| 4373 | .compute_tsc_offset = svm_compute_tsc_offset, | ||
| 4374 | .read_l1_tsc = svm_read_l1_tsc, | 4364 | .read_l1_tsc = svm_read_l1_tsc, |
| 4375 | 4365 | ||
| 4376 | .set_tdp_cr3 = set_tdp_cr3, | 4366 | .set_tdp_cr3 = set_tdp_cr3, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index baee46893899..2d4782ce9a93 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -2426,11 +2426,6 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho | |||
| 2426 | offset + adjustment); | 2426 | offset + adjustment); |
| 2427 | } | 2427 | } |
| 2428 | 2428 | ||
| 2429 | static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | ||
| 2430 | { | ||
| 2431 | return target_tsc - rdtsc(); | ||
| 2432 | } | ||
| 2433 | |||
| 2434 | static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) | 2429 | static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) |
| 2435 | { | 2430 | { |
| 2436 | struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); | 2431 | struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| @@ -10813,7 +10808,6 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
| 10813 | .read_tsc_offset = vmx_read_tsc_offset, | 10808 | .read_tsc_offset = vmx_read_tsc_offset, |
| 10814 | .write_tsc_offset = vmx_write_tsc_offset, | 10809 | .write_tsc_offset = vmx_write_tsc_offset, |
| 10815 | .adjust_tsc_offset = vmx_adjust_tsc_offset, | 10810 | .adjust_tsc_offset = vmx_adjust_tsc_offset, |
| 10816 | .compute_tsc_offset = vmx_compute_tsc_offset, | ||
| 10817 | .read_l1_tsc = vmx_read_l1_tsc, | 10811 | .read_l1_tsc = vmx_read_l1_tsc, |
| 10818 | 10812 | ||
| 10819 | .set_tdp_cr3 = vmx_set_cr3, | 10813 | .set_tdp_cr3 = vmx_set_cr3, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c314e8d22a67..bb46066e125b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1392,6 +1392,15 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) | |||
| 1392 | } | 1392 | } |
| 1393 | EXPORT_SYMBOL_GPL(kvm_scale_tsc); | 1393 | EXPORT_SYMBOL_GPL(kvm_scale_tsc); |
| 1394 | 1394 | ||
| 1395 | static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | ||
| 1396 | { | ||
| 1397 | u64 tsc; | ||
| 1398 | |||
| 1399 | tsc = kvm_scale_tsc(vcpu, rdtsc()); | ||
| 1400 | |||
| 1401 | return target_tsc - tsc; | ||
| 1402 | } | ||
| 1403 | |||
| 1395 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | 1404 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) |
| 1396 | { | 1405 | { |
| 1397 | struct kvm *kvm = vcpu->kvm; | 1406 | struct kvm *kvm = vcpu->kvm; |
| @@ -1403,7 +1412,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
| 1403 | u64 data = msr->data; | 1412 | u64 data = msr->data; |
| 1404 | 1413 | ||
| 1405 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); | 1414 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); |
| 1406 | offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); | 1415 | offset = kvm_compute_tsc_offset(vcpu, data); |
| 1407 | ns = get_kernel_ns(); | 1416 | ns = get_kernel_ns(); |
| 1408 | elapsed = ns - kvm->arch.last_tsc_nsec; | 1417 | elapsed = ns - kvm->arch.last_tsc_nsec; |
| 1409 | 1418 | ||
| @@ -1460,7 +1469,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
| 1460 | } else { | 1469 | } else { |
| 1461 | u64 delta = nsec_to_cycles(vcpu, elapsed); | 1470 | u64 delta = nsec_to_cycles(vcpu, elapsed); |
| 1462 | data += delta; | 1471 | data += delta; |
| 1463 | offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); | 1472 | offset = kvm_compute_tsc_offset(vcpu, data); |
| 1464 | pr_debug("kvm: adjusted tsc offset by %llu\n", delta); | 1473 | pr_debug("kvm: adjusted tsc offset by %llu\n", delta); |
| 1465 | } | 1474 | } |
| 1466 | matched = true; | 1475 | matched = true; |
| @@ -2687,7 +2696,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 2687 | if (tsc_delta < 0) | 2696 | if (tsc_delta < 0) |
| 2688 | mark_tsc_unstable("KVM discovered backwards TSC"); | 2697 | mark_tsc_unstable("KVM discovered backwards TSC"); |
| 2689 | if (check_tsc_unstable()) { | 2698 | if (check_tsc_unstable()) { |
| 2690 | u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, | 2699 | u64 offset = kvm_compute_tsc_offset(vcpu, |
| 2691 | vcpu->arch.last_guest_tsc); | 2700 | vcpu->arch.last_guest_tsc); |
| 2692 | kvm_x86_ops->write_tsc_offset(vcpu, offset); | 2701 | kvm_x86_ops->write_tsc_offset(vcpu, offset); |
| 2693 | vcpu->arch.tsc_catchup = 1; | 2702 | vcpu->arch.tsc_catchup = 1; |
