aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHaozhong Zhang <haozhong.zhang@intel.com>2015-10-20 03:39:04 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-11-10 06:06:16 -0500
commit381d585c80e34988269bd7901ad910981e900be1 (patch)
tree771900b748040495069fdad4c517f658159e3d6c
parent35181e86df97e4223f4a28fb33e2bcf3b73de141 (diff)
KVM: x86: Replace call-back set_tsc_khz() with a common function
Both VMX and SVM propagate virtual_tsc_khz in the same way, so this patch removes the call-back set_tsc_khz() and replaces it with a common function. Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/svm.c36
-rw-r--r--arch/x86/kvm/vmx.c17
-rw-r--r--arch/x86/kvm/x86.c46
-rw-r--r--include/linux/math64.h29
5 files changed, 70 insertions, 59 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 52d1419968eb..c5a3f3d66e90 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -853,7 +853,6 @@ struct kvm_x86_ops {
853 853
854 bool (*has_wbinvd_exit)(void); 854 bool (*has_wbinvd_exit)(void);
855 855
856 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
857 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu); 856 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
858 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 857 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
859 858
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 65f4f1947a62..f6e49a6c9ab0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -957,41 +957,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
957 seg->base = 0; 957 seg->base = 0;
958} 958}
959 959
960static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
961{
962 u64 ratio;
963 u64 khz;
964
965 /* Guest TSC same frequency as host TSC? */
966 if (!scale) {
967 vcpu->arch.tsc_scaling_ratio = TSC_RATIO_DEFAULT;
968 return;
969 }
970
971 /* TSC scaling supported? */
972 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
973 if (user_tsc_khz > tsc_khz) {
974 vcpu->arch.tsc_catchup = 1;
975 vcpu->arch.tsc_always_catchup = 1;
976 } else
977 WARN(1, "user requested TSC rate below hardware speed\n");
978 return;
979 }
980
981 khz = user_tsc_khz;
982
983 /* TSC scaling required - calculate ratio */
984 ratio = khz << 32;
985 do_div(ratio, tsc_khz);
986
987 if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
988 WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
989 user_tsc_khz);
990 return;
991 }
992 vcpu->arch.tsc_scaling_ratio = ratio;
993}
994
995static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu) 960static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
996{ 961{
997 struct vcpu_svm *svm = to_svm(vcpu); 962 struct vcpu_svm *svm = to_svm(vcpu);
@@ -4402,7 +4367,6 @@ static struct kvm_x86_ops svm_x86_ops = {
4402 4367
4403 .has_wbinvd_exit = svm_has_wbinvd_exit, 4368 .has_wbinvd_exit = svm_has_wbinvd_exit,
4404 4369
4405 .set_tsc_khz = svm_set_tsc_khz,
4406 .read_tsc_offset = svm_read_tsc_offset, 4370 .read_tsc_offset = svm_read_tsc_offset,
4407 .write_tsc_offset = svm_write_tsc_offset, 4371 .write_tsc_offset = svm_write_tsc_offset,
4408 .adjust_tsc_offset = svm_adjust_tsc_offset, 4372 .adjust_tsc_offset = svm_adjust_tsc_offset,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a26ed285931b..baee46893899 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2382,22 +2382,6 @@ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2382 return host_tsc + tsc_offset; 2382 return host_tsc + tsc_offset;
2383} 2383}
2384 2384
2385/*
2386 * Engage any workarounds for mis-matched TSC rates. Currently limited to
2387 * software catchup for faster rates on slower CPUs.
2388 */
2389static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2390{
2391 if (!scale)
2392 return;
2393
2394 if (user_tsc_khz > tsc_khz) {
2395 vcpu->arch.tsc_catchup = 1;
2396 vcpu->arch.tsc_always_catchup = 1;
2397 } else
2398 WARN(1, "user requested TSC rate below hardware speed\n");
2399}
2400
2401static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu) 2385static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
2402{ 2386{
2403 return vmcs_read64(TSC_OFFSET); 2387 return vmcs_read64(TSC_OFFSET);
@@ -10826,7 +10810,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
10826 10810
10827 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 10811 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
10828 10812
10829 .set_tsc_khz = vmx_set_tsc_khz,
10830 .read_tsc_offset = vmx_read_tsc_offset, 10813 .read_tsc_offset = vmx_read_tsc_offset,
10831 .write_tsc_offset = vmx_write_tsc_offset, 10814 .write_tsc_offset = vmx_write_tsc_offset,
10832 .adjust_tsc_offset = vmx_adjust_tsc_offset, 10815 .adjust_tsc_offset = vmx_adjust_tsc_offset,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1473e64cb744..c314e8d22a67 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1253,7 +1253,43 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1253 return v; 1253 return v;
1254} 1254}
1255 1255
1256static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) 1256static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1257{
1258 u64 ratio;
1259
1260 /* Guest TSC same frequency as host TSC? */
1261 if (!scale) {
1262 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1263 return 0;
1264 }
1265
1266 /* TSC scaling supported? */
1267 if (!kvm_has_tsc_control) {
1268 if (user_tsc_khz > tsc_khz) {
1269 vcpu->arch.tsc_catchup = 1;
1270 vcpu->arch.tsc_always_catchup = 1;
1271 return 0;
1272 } else {
1273 WARN(1, "user requested TSC rate below hardware speed\n");
1274 return -1;
1275 }
1276 }
1277
1278 /* TSC scaling required - calculate ratio */
1279 ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
1280 user_tsc_khz, tsc_khz);
1281
1282 if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
1283 WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
1284 user_tsc_khz);
1285 return -1;
1286 }
1287
1288 vcpu->arch.tsc_scaling_ratio = ratio;
1289 return 0;
1290}
1291
1292static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1257{ 1293{
1258 u32 thresh_lo, thresh_hi; 1294 u32 thresh_lo, thresh_hi;
1259 int use_scaling = 0; 1295 int use_scaling = 0;
@@ -1262,7 +1298,7 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1262 if (this_tsc_khz == 0) { 1298 if (this_tsc_khz == 0) {
1263 /* set tsc_scaling_ratio to a safe value */ 1299 /* set tsc_scaling_ratio to a safe value */
1264 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; 1300 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1265 return; 1301 return -1;
1266 } 1302 }
1267 1303
1268 /* Compute a scale to convert nanoseconds in TSC cycles */ 1304 /* Compute a scale to convert nanoseconds in TSC cycles */
@@ -1283,7 +1319,7 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1283 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); 1319 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
1284 use_scaling = 1; 1320 use_scaling = 1;
1285 } 1321 }
1286 kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); 1322 return set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
1287} 1323}
1288 1324
1289static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 1325static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
@@ -3353,9 +3389,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3353 if (user_tsc_khz == 0) 3389 if (user_tsc_khz == 0)
3354 user_tsc_khz = tsc_khz; 3390 user_tsc_khz = tsc_khz;
3355 3391
3356 kvm_set_tsc_khz(vcpu, user_tsc_khz); 3392 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
3393 r = 0;
3357 3394
3358 r = 0;
3359 goto out; 3395 goto out;
3360 } 3396 }
3361 case KVM_GET_TSC_KHZ: { 3397 case KVM_GET_TSC_KHZ: {
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 44282ec7b682..6e8b5b270ffe 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -214,4 +214,33 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
214 214
215#endif 215#endif
216 216
217#ifndef mul_u64_u32_div
218static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
219{
220 union {
221 u64 ll;
222 struct {
223#ifdef __BIG_ENDIAN
224 u32 high, low;
225#else
226 u32 low, high;
227#endif
228 } l;
229 } u, rl, rh;
230
231 u.ll = a;
232 rl.ll = (u64)u.l.low * mul;
233 rh.ll = (u64)u.l.high * mul + rl.l.high;
234
235 /* Bits 32-63 of the result will be in rh.l.low. */
236 rl.l.high = do_div(rh.ll, divisor);
237
238 /* Bits 0-31 of the result will be in rl.l.low. */
239 do_div(rl.ll, divisor);
240
241 rl.l.high = rh.l.low;
242 return rl.ll;
243}
244#endif /* mul_u64_u32_div */
245
217#endif /* _LINUX_MATH64_H */ 246#endif /* _LINUX_MATH64_H */