aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorZachary Amsden <zamsden@redhat.com>2010-09-18 20:38:13 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:53:04 -0400
commit5f4e3f882731c65b5d64a2ff743fda96eaebb9ee (patch)
tree25d538b6a58066d01711daa505acbabf47aaf125 /arch/x86/kvm
parent624d84cfe63b5afdd087bf5b2075a8a8cac5c83f (diff)
KVM: x86: Make math work for other scales
The math in kvm_get_time_scale relies on the fact that NSEC_PER_SEC < 2^32. To use the same function to compute arbitrary time scales, we must extend the first reduction step to shrink the base rate to a 32-bit value, and possibly reduce the scaled rate into a 32-bit as well. Note we must take care to avoid an arithmetic overflow when scaling up the tps32 value (this could not happen with the fixed scaled value of NSEC_PER_SEC, but can happen with scaled rates above 2^31. Signed-off-by: Zachary Amsden <zamsden@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/x86.c30
1 files changed, 18 insertions, 12 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7d2880500fa3..6666af840190 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -920,31 +920,35 @@ static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
920 return quotient; 920 return quotient;
921} 921}
922 922
923static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock) 923static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
924 s8 *pshift, u32 *pmultiplier)
924{ 925{
925 uint64_t nsecs = 1000000000LL; 926 uint64_t scaled64;
926 int32_t shift = 0; 927 int32_t shift = 0;
927 uint64_t tps64; 928 uint64_t tps64;
928 uint32_t tps32; 929 uint32_t tps32;
929 930
930 tps64 = tsc_khz * 1000LL; 931 tps64 = base_khz * 1000LL;
931 while (tps64 > nsecs*2) { 932 scaled64 = scaled_khz * 1000LL;
933 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000UL) {
932 tps64 >>= 1; 934 tps64 >>= 1;
933 shift--; 935 shift--;
934 } 936 }
935 937
936 tps32 = (uint32_t)tps64; 938 tps32 = (uint32_t)tps64;
937 while (tps32 <= (uint32_t)nsecs) { 939 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000UL) {
938 tps32 <<= 1; 940 if (scaled64 & 0xffffffff00000000UL || tps32 & 0x80000000)
941 scaled64 >>= 1;
942 else
943 tps32 <<= 1;
939 shift++; 944 shift++;
940 } 945 }
941 946
942 hv_clock->tsc_shift = shift; 947 *pshift = shift;
943 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32); 948 *pmultiplier = div_frac(scaled64, tps32);
944 949
945 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n", 950 pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
946 __func__, tsc_khz, hv_clock->tsc_shift, 951 __func__, base_khz, scaled_khz, shift, *pmultiplier);
947 hv_clock->tsc_to_system_mul);
948} 952}
949 953
950static inline u64 get_kernel_ns(void) 954static inline u64 get_kernel_ns(void)
@@ -1084,7 +1088,9 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
1084 } 1088 }
1085 1089
1086 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { 1090 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1087 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock); 1091 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1092 &vcpu->hv_clock.tsc_shift,
1093 &vcpu->hv_clock.tsc_to_system_mul);
1088 vcpu->hw_tsc_khz = this_tsc_khz; 1094 vcpu->hw_tsc_khz = this_tsc_khz;
1089 } 1095 }
1090 1096