aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorZachary Amsden <zamsden@redhat.com>2010-08-20 04:07:25 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:23 -0400
commit759379dd68c2885d1fafa433083d4487e710a685 (patch)
treed20cf95eb9f65293baa4e42e360465a2fc606e8e /arch/x86/kvm/x86.c
parent48434c20e18d59001469699fcaaf9cf30b815a20 (diff)
KVM: x86: Add helper functions for time computation
Add a helper function to compute the kernel time and convert nanoseconds back to CPU specific cycles. Note that these must not be called in preemptible context, as that would mean the kernel could enter software suspend state, which would cause non-atomic operation. Also, convert the KVM_SET_CLOCK / KVM_GET_CLOCK ioctls to use the kernel time helper, these should be bootbased as well. Signed-off-by: Zachary Amsden <zamsden@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c48
1 files changed, 28 insertions, 20 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9396b3f2c594..4bcb120cc76a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -893,6 +893,16 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *
893 hv_clock->tsc_to_system_mul); 893 hv_clock->tsc_to_system_mul);
894} 894}
895 895
896static inline u64 get_kernel_ns(void)
897{
898 struct timespec ts;
899
900 WARN_ON(preemptible());
901 ktime_get_ts(&ts);
902 monotonic_to_bootbased(&ts);
903 return timespec_to_ns(&ts);
904}
905
896static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 906static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
897 907
898static inline int kvm_tsc_changes_freq(void) 908static inline int kvm_tsc_changes_freq(void)
@@ -904,18 +914,24 @@ static inline int kvm_tsc_changes_freq(void)
904 return ret; 914 return ret;
905} 915}
906 916
917static inline u64 nsec_to_cycles(u64 nsec)
918{
919 WARN_ON(preemptible());
920 if (kvm_tsc_changes_freq())
921 printk_once(KERN_WARNING
922 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
923 return (nsec * __get_cpu_var(cpu_tsc_khz)) / USEC_PER_SEC;
924}
925
907void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) 926void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
908{ 927{
909 struct kvm *kvm = vcpu->kvm; 928 struct kvm *kvm = vcpu->kvm;
910 u64 offset, ns, elapsed; 929 u64 offset, ns, elapsed;
911 unsigned long flags; 930 unsigned long flags;
912 struct timespec ts;
913 931
914 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 932 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
915 offset = data - native_read_tsc(); 933 offset = data - native_read_tsc();
916 ktime_get_ts(&ts); 934 ns = get_kernel_ns();
917 monotonic_to_bootbased(&ts);
918 ns = timespec_to_ns(&ts);
919 elapsed = ns - kvm->arch.last_tsc_nsec; 935 elapsed = ns - kvm->arch.last_tsc_nsec;
920 936
921 /* 937 /*
@@ -931,10 +947,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
931 offset = kvm->arch.last_tsc_offset; 947 offset = kvm->arch.last_tsc_offset;
932 pr_debug("kvm: matched tsc offset for %llu\n", data); 948 pr_debug("kvm: matched tsc offset for %llu\n", data);
933 } else { 949 } else {
934 u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz); 950 u64 delta = nsec_to_cycles(elapsed);
935 tsc_delta = tsc_delta / USEC_PER_SEC; 951 offset += delta;
936 offset += tsc_delta; 952 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
937 pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
938 } 953 }
939 ns = kvm->arch.last_tsc_nsec; 954 ns = kvm->arch.last_tsc_nsec;
940 } 955 }
@@ -951,11 +966,11 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
951 966
952static int kvm_write_guest_time(struct kvm_vcpu *v) 967static int kvm_write_guest_time(struct kvm_vcpu *v)
953{ 968{
954 struct timespec ts;
955 unsigned long flags; 969 unsigned long flags;
956 struct kvm_vcpu_arch *vcpu = &v->arch; 970 struct kvm_vcpu_arch *vcpu = &v->arch;
957 void *shared_kaddr; 971 void *shared_kaddr;
958 unsigned long this_tsc_khz; 972 unsigned long this_tsc_khz;
973 s64 kernel_ns;
959 974
960 if ((!vcpu->time_page)) 975 if ((!vcpu->time_page))
961 return 0; 976 return 0;
@@ -963,8 +978,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
963 /* Keep irq disabled to prevent changes to the clock */ 978 /* Keep irq disabled to prevent changes to the clock */
964 local_irq_save(flags); 979 local_irq_save(flags);
965 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); 980 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
966 ktime_get_ts(&ts); 981 kernel_ns = get_kernel_ns();
967 monotonic_to_bootbased(&ts);
968 this_tsc_khz = __get_cpu_var(cpu_tsc_khz); 982 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
969 local_irq_restore(flags); 983 local_irq_restore(flags);
970 984
@@ -979,9 +993,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
979 } 993 }
980 994
981 /* With all the info we got, fill in the values */ 995 /* With all the info we got, fill in the values */
982 vcpu->hv_clock.system_time = ts.tv_nsec + 996 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
983 (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
984
985 vcpu->hv_clock.flags = 0; 997 vcpu->hv_clock.flags = 0;
986 998
987 /* 999 /*
@@ -3263,7 +3275,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
3263 break; 3275 break;
3264 } 3276 }
3265 case KVM_SET_CLOCK: { 3277 case KVM_SET_CLOCK: {
3266 struct timespec now;
3267 struct kvm_clock_data user_ns; 3278 struct kvm_clock_data user_ns;
3268 u64 now_ns; 3279 u64 now_ns;
3269 s64 delta; 3280 s64 delta;
@@ -3277,19 +3288,16 @@ long kvm_arch_vm_ioctl(struct file *filp,
3277 goto out; 3288 goto out;
3278 3289
3279 r = 0; 3290 r = 0;
3280 ktime_get_ts(&now); 3291 now_ns = get_kernel_ns();
3281 now_ns = timespec_to_ns(&now);
3282 delta = user_ns.clock - now_ns; 3292 delta = user_ns.clock - now_ns;
3283 kvm->arch.kvmclock_offset = delta; 3293 kvm->arch.kvmclock_offset = delta;
3284 break; 3294 break;
3285 } 3295 }
3286 case KVM_GET_CLOCK: { 3296 case KVM_GET_CLOCK: {
3287 struct timespec now;
3288 struct kvm_clock_data user_ns; 3297 struct kvm_clock_data user_ns;
3289 u64 now_ns; 3298 u64 now_ns;
3290 3299
3291 ktime_get_ts(&now); 3300 now_ns = get_kernel_ns();
3292 now_ns = timespec_to_ns(&now);
3293 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3301 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3294 user_ns.flags = 0; 3302 user_ns.flags = 0;
3295 3303