aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/svm.c48
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/math64.h51
5 files changed, 97 insertions, 45 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f3354bd92364..52d1419968eb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1238,6 +1238,8 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
1238void kvm_define_shared_msr(unsigned index, u32 msr); 1238void kvm_define_shared_msr(unsigned index, u32 msr);
1239int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1239int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1240 1240
1241u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1242
1241unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); 1243unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1242bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 1244bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1243 1245
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9c92e6f429d0..65f4f1947a62 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -212,7 +212,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm);
212static int nested_svm_vmexit(struct vcpu_svm *svm); 212static int nested_svm_vmexit(struct vcpu_svm *svm);
213static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 213static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
214 bool has_error_code, u32 error_code); 214 bool has_error_code, u32 error_code);
215static u64 __scale_tsc(u64 ratio, u64 tsc);
216 215
217enum { 216enum {
218 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, 217 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
@@ -892,21 +891,7 @@ static __init int svm_hardware_setup(void)
892 kvm_enable_efer_bits(EFER_FFXSR); 891 kvm_enable_efer_bits(EFER_FFXSR);
893 892
894 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { 893 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
895 u64 max;
896
897 kvm_has_tsc_control = true; 894 kvm_has_tsc_control = true;
898
899 /*
900 * Make sure the user can only configure tsc_khz values that
901 * fit into a signed integer.
902 * A min value is not calculated needed because it will always
903 * be 1 on all machines and a value of 0 is used to disable
904 * tsc-scaling for the vcpu.
905 */
906 max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
907
908 kvm_max_guest_tsc_khz = max;
909
910 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; 895 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
911 kvm_tsc_scaling_ratio_frac_bits = 32; 896 kvm_tsc_scaling_ratio_frac_bits = 32;
912 } 897 }
@@ -972,31 +957,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
972 seg->base = 0; 957 seg->base = 0;
973} 958}
974 959
975static u64 __scale_tsc(u64 ratio, u64 tsc)
976{
977 u64 mult, frac, _tsc;
978
979 mult = ratio >> 32;
980 frac = ratio & ((1ULL << 32) - 1);
981
982 _tsc = tsc;
983 _tsc *= mult;
984 _tsc += (tsc >> 32) * frac;
985 _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
986
987 return _tsc;
988}
989
990static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
991{
992 u64 _tsc = tsc;
993
994 if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
995 _tsc = __scale_tsc(vcpu->arch.tsc_scaling_ratio, tsc);
996
997 return _tsc;
998}
999
1000static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) 960static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1001{ 961{
1002 u64 ratio; 962 u64 ratio;
@@ -1065,7 +1025,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
1065 if (host) { 1025 if (host) {
1066 if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT) 1026 if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
1067 WARN_ON(adjustment < 0); 1027 WARN_ON(adjustment < 0);
1068 adjustment = svm_scale_tsc(vcpu, (u64)adjustment); 1028 adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
1069 } 1029 }
1070 1030
1071 svm->vmcb->control.tsc_offset += adjustment; 1031 svm->vmcb->control.tsc_offset += adjustment;
@@ -1083,7 +1043,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1083{ 1043{
1084 u64 tsc; 1044 u64 tsc;
1085 1045
1086 tsc = svm_scale_tsc(vcpu, rdtsc()); 1046 tsc = kvm_scale_tsc(vcpu, rdtsc());
1087 1047
1088 return target_tsc - tsc; 1048 return target_tsc - tsc;
1089} 1049}
@@ -3075,7 +3035,7 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
3075{ 3035{
3076 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); 3036 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
3077 return vmcb->control.tsc_offset + 3037 return vmcb->control.tsc_offset +
3078 svm_scale_tsc(vcpu, host_tsc); 3038 kvm_scale_tsc(vcpu, host_tsc);
3079} 3039}
3080 3040
3081static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3041static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -3085,7 +3045,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3085 switch (msr_info->index) { 3045 switch (msr_info->index) {
3086 case MSR_IA32_TSC: { 3046 case MSR_IA32_TSC: {
3087 msr_info->data = svm->vmcb->control.tsc_offset + 3047 msr_info->data = svm->vmcb->control.tsc_offset +
3088 svm_scale_tsc(vcpu, rdtsc()); 3048 kvm_scale_tsc(vcpu, rdtsc());
3089 3049
3090 break; 3050 break;
3091 } 3051 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ef5b9d66cd71..1473e64cb744 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1329,6 +1329,33 @@ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1329 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; 1329 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1330} 1330}
1331 1331
1332/*
1333 * Multiply tsc by a fixed point number represented by ratio.
1334 *
1335 * The most significant 64-N bits (mult) of ratio represent the
1336 * integral part of the fixed point number; the remaining N bits
1337 * (frac) represent the fractional part, ie. ratio represents a fixed
1338 * point number (mult + frac * 2^(-N)).
1339 *
1340 * N equals to kvm_tsc_scaling_ratio_frac_bits.
1341 */
1342static inline u64 __scale_tsc(u64 ratio, u64 tsc)
1343{
1344 return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
1345}
1346
1347u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
1348{
1349 u64 _tsc = tsc;
1350 u64 ratio = vcpu->arch.tsc_scaling_ratio;
1351
1352 if (ratio != kvm_default_tsc_scaling_ratio)
1353 _tsc = __scale_tsc(ratio, tsc);
1354
1355 return _tsc;
1356}
1357EXPORT_SYMBOL_GPL(kvm_scale_tsc);
1358
1332void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) 1359void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1333{ 1360{
1334 struct kvm *kvm = vcpu->kvm; 1361 struct kvm *kvm = vcpu->kvm;
@@ -7371,8 +7398,19 @@ int kvm_arch_hardware_setup(void)
7371 if (r != 0) 7398 if (r != 0)
7372 return r; 7399 return r;
7373 7400
7374 if (kvm_has_tsc_control) 7401 if (kvm_has_tsc_control) {
7402 /*
7403 * Make sure the user can only configure tsc_khz values that
7404 * fit into a signed integer.
7405 * A min value is not calculated needed because it will always
7406 * be 1 on all machines.
7407 */
7408 u64 max = min(0x7fffffffULL,
7409 __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
7410 kvm_max_guest_tsc_khz = max;
7411
7375 kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits; 7412 kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
7413 }
7376 7414
7377 kvm_init_msr_list(); 7415 kvm_init_msr_list();
7378 return 0; 7416 return 0;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 242a6d2b53ff..5706a2108f0a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1183,4 +1183,5 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1183int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 1183int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1184 uint32_t guest_irq, bool set); 1184 uint32_t guest_irq, bool set);
1185#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ 1185#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
1186
1186#endif 1187#endif
diff --git a/include/linux/math64.h b/include/linux/math64.h
index c45c089bfdac..44282ec7b682 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
142} 142}
143#endif /* mul_u64_u32_shr */ 143#endif /* mul_u64_u32_shr */
144 144
145#ifndef mul_u64_u64_shr
146static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
147{
148 return (u64)(((unsigned __int128)a * mul) >> shift);
149}
150#endif /* mul_u64_u64_shr */
151
145#else 152#else
146 153
147#ifndef mul_u64_u32_shr 154#ifndef mul_u64_u32_shr
@@ -161,6 +168,50 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
161} 168}
162#endif /* mul_u64_u32_shr */ 169#endif /* mul_u64_u32_shr */
163 170
171#ifndef mul_u64_u64_shr
172static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
173{
174 union {
175 u64 ll;
176 struct {
177#ifdef __BIG_ENDIAN
178 u32 high, low;
179#else
180 u32 low, high;
181#endif
182 } l;
183 } rl, rm, rn, rh, a0, b0;
184 u64 c;
185
186 a0.ll = a;
187 b0.ll = b;
188
189 rl.ll = (u64)a0.l.low * b0.l.low;
190 rm.ll = (u64)a0.l.low * b0.l.high;
191 rn.ll = (u64)a0.l.high * b0.l.low;
192 rh.ll = (u64)a0.l.high * b0.l.high;
193
194 /*
195 * Each of these lines computes a 64-bit intermediate result into "c",
196 * starting at bits 32-95. The low 32-bits go into the result of the
197 * multiplication, the high 32-bits are carried into the next step.
198 */
199 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
200 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
201 rh.l.high = (c >> 32) + rh.l.high;
202
203 /*
204 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
205 * shift it right and throw away the high part of the result.
206 */
207 if (shift == 0)
208 return rl.ll;
209 if (shift < 64)
210 return (rl.ll >> shift) | (rh.ll << (64 - shift));
211 return rh.ll >> (shift & 63);
212}
213#endif /* mul_u64_u64_shr */
214
164#endif 215#endif
165 216
166#endif /* _LINUX_MATH64_H */ 217#endif /* _LINUX_MATH64_H */