aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_guest.h6
-rw-r--r--arch/x86/kernel/kvm.c13
-rw-r--r--arch/x86/kernel/kvmclock.c54
3 files changed, 61 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
new file mode 100644
index 000000000000..a92b1763c419
--- /dev/null
+++ b/arch/x86/include/asm/kvm_guest.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_X86_KVM_GUEST_H
2#define _ASM_X86_KVM_GUEST_H
3
4int kvm_setup_vsyscall_timeinfo(void);
5
6#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 4180a874c764..a91c6b482b48 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -42,6 +42,7 @@
42#include <asm/apic.h> 42#include <asm/apic.h>
43#include <asm/apicdef.h> 43#include <asm/apicdef.h>
44#include <asm/hypervisor.h> 44#include <asm/hypervisor.h>
45#include <asm/kvm_guest.h>
45 46
46static int kvmapf = 1; 47static int kvmapf = 1;
47 48
@@ -62,6 +63,15 @@ static int parse_no_stealacc(char *arg)
62 63
63early_param("no-steal-acc", parse_no_stealacc); 64early_param("no-steal-acc", parse_no_stealacc);
64 65
66static int kvmclock_vsyscall = 1;
67static int parse_no_kvmclock_vsyscall(char *arg)
68{
69 kvmclock_vsyscall = 0;
70 return 0;
71}
72
73early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
74
65static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); 75static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
66static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); 76static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
67static int has_steal_clock = 0; 77static int has_steal_clock = 0;
@@ -471,6 +481,9 @@ void __init kvm_guest_init(void)
471 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 481 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
472 apic_set_eoi_write(kvm_guest_apic_eoi_write); 482 apic_set_eoi_write(kvm_guest_apic_eoi_write);
473 483
484 if (kvmclock_vsyscall)
485 kvm_setup_vsyscall_timeinfo();
486
474#ifdef CONFIG_SMP 487#ifdef CONFIG_SMP
475 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 488 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
476 register_cpu_notifier(&kvm_cpu_notifier); 489 register_cpu_notifier(&kvm_cpu_notifier);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index c7d75678886e..220a360010f8 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -40,11 +40,7 @@ static int parse_no_kvmclock(char *arg)
40early_param("no-kvmclock", parse_no_kvmclock); 40early_param("no-kvmclock", parse_no_kvmclock);
41 41
42/* The hypervisor will put information about time periodically here */ 42/* The hypervisor will put information about time periodically here */
43struct pvclock_aligned_vcpu_time_info { 43static struct pvclock_vsyscall_time_info *hv_clock;
44 struct pvclock_vcpu_time_info clock;
45} __attribute__((__aligned__(SMP_CACHE_BYTES)));
46
47static struct pvclock_aligned_vcpu_time_info *hv_clock;
48static struct pvclock_wall_clock wall_clock; 44static struct pvclock_wall_clock wall_clock;
49 45
50/* 46/*
@@ -67,7 +63,7 @@ static unsigned long kvm_get_wallclock(void)
67 preempt_disable(); 63 preempt_disable();
68 cpu = smp_processor_id(); 64 cpu = smp_processor_id();
69 65
70 vcpu_time = &hv_clock[cpu].clock; 66 vcpu_time = &hv_clock[cpu].pvti;
71 pvclock_read_wallclock(&wall_clock, vcpu_time, &ts); 67 pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
72 68
73 preempt_enable(); 69 preempt_enable();
@@ -88,7 +84,7 @@ static cycle_t kvm_clock_read(void)
88 84
89 preempt_disable_notrace(); 85 preempt_disable_notrace();
90 cpu = smp_processor_id(); 86 cpu = smp_processor_id();
91 src = &hv_clock[cpu].clock; 87 src = &hv_clock[cpu].pvti;
92 ret = pvclock_clocksource_read(src); 88 ret = pvclock_clocksource_read(src);
93 preempt_enable_notrace(); 89 preempt_enable_notrace();
94 return ret; 90 return ret;
@@ -116,7 +112,7 @@ static unsigned long kvm_get_tsc_khz(void)
116 112
117 preempt_disable(); 113 preempt_disable();
118 cpu = smp_processor_id(); 114 cpu = smp_processor_id();
119 src = &hv_clock[cpu].clock; 115 src = &hv_clock[cpu].pvti;
120 tsc_khz = pvclock_tsc_khz(src); 116 tsc_khz = pvclock_tsc_khz(src);
121 preempt_enable(); 117 preempt_enable();
122 return tsc_khz; 118 return tsc_khz;
@@ -143,7 +139,7 @@ bool kvm_check_and_clear_guest_paused(void)
143 if (!hv_clock) 139 if (!hv_clock)
144 return ret; 140 return ret;
145 141
146 src = &hv_clock[cpu].clock; 142 src = &hv_clock[cpu].pvti;
147 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { 143 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
148 src->flags &= ~PVCLOCK_GUEST_STOPPED; 144 src->flags &= ~PVCLOCK_GUEST_STOPPED;
149 ret = true; 145 ret = true;
@@ -164,7 +160,7 @@ int kvm_register_clock(char *txt)
164{ 160{
165 int cpu = smp_processor_id(); 161 int cpu = smp_processor_id();
166 int low, high, ret; 162 int low, high, ret;
167 struct pvclock_vcpu_time_info *src = &hv_clock[cpu].clock; 163 struct pvclock_vcpu_time_info *src = &hv_clock[cpu].pvti;
168 164
169 low = (int)__pa(src) | 1; 165 low = (int)__pa(src) | 1;
170 high = ((u64)__pa(src) >> 32); 166 high = ((u64)__pa(src) >> 32);
@@ -235,7 +231,7 @@ void __init kvmclock_init(void)
235 printk(KERN_INFO "kvm-clock: Using msrs %x and %x", 231 printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
236 msr_kvm_system_time, msr_kvm_wall_clock); 232 msr_kvm_system_time, msr_kvm_wall_clock);
237 233
238 mem = memblock_alloc(sizeof(struct pvclock_aligned_vcpu_time_info) * NR_CPUS, 234 mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS,
239 PAGE_SIZE); 235 PAGE_SIZE);
240 if (!mem) 236 if (!mem)
241 return; 237 return;
@@ -244,7 +240,7 @@ void __init kvmclock_init(void)
244 if (kvm_register_clock("boot clock")) { 240 if (kvm_register_clock("boot clock")) {
245 hv_clock = NULL; 241 hv_clock = NULL;
246 memblock_free(mem, 242 memblock_free(mem,
247 sizeof(struct pvclock_aligned_vcpu_time_info)*NR_CPUS); 243 sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
248 return; 244 return;
249 } 245 }
250 pv_time_ops.sched_clock = kvm_clock_read; 246 pv_time_ops.sched_clock = kvm_clock_read;
@@ -269,3 +265,37 @@ void __init kvmclock_init(void)
269 if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) 265 if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
270 pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); 266 pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
271} 267}
268
269int __init kvm_setup_vsyscall_timeinfo(void)
270{
271#ifdef CONFIG_X86_64
272 int cpu;
273 int ret;
274 u8 flags;
275 struct pvclock_vcpu_time_info *vcpu_time;
276 unsigned int size;
277
278 size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS;
279
280 preempt_disable();
281 cpu = smp_processor_id();
282
283 vcpu_time = &hv_clock[cpu].pvti;
284 flags = pvclock_read_flags(vcpu_time);
285
286 if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
287 preempt_enable();
288 return 1;
289 }
290
291 if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
292 preempt_enable();
293 return ret;
294 }
295
296 preempt_enable();
297
298 kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
299#endif
300 return 0;
301}