diff options
Diffstat (limited to 'arch/x86/kernel/kvmclock.c')
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 54 |
1 files changed, 42 insertions, 12 deletions
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index c7d75678886e..220a360010f8 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -40,11 +40,7 @@ static int parse_no_kvmclock(char *arg) | |||
40 | early_param("no-kvmclock", parse_no_kvmclock); | 40 | early_param("no-kvmclock", parse_no_kvmclock); |
41 | 41 | ||
42 | /* The hypervisor will put information about time periodically here */ | 42 | /* The hypervisor will put information about time periodically here */ |
43 | struct pvclock_aligned_vcpu_time_info { | 43 | static struct pvclock_vsyscall_time_info *hv_clock; |
44 | struct pvclock_vcpu_time_info clock; | ||
45 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | ||
46 | |||
47 | static struct pvclock_aligned_vcpu_time_info *hv_clock; | ||
48 | static struct pvclock_wall_clock wall_clock; | 44 | static struct pvclock_wall_clock wall_clock; |
49 | 45 | ||
50 | /* | 46 | /* |
@@ -67,7 +63,7 @@ static unsigned long kvm_get_wallclock(void) | |||
67 | preempt_disable(); | 63 | preempt_disable(); |
68 | cpu = smp_processor_id(); | 64 | cpu = smp_processor_id(); |
69 | 65 | ||
70 | vcpu_time = &hv_clock[cpu].clock; | 66 | vcpu_time = &hv_clock[cpu].pvti; |
71 | pvclock_read_wallclock(&wall_clock, vcpu_time, &ts); | 67 | pvclock_read_wallclock(&wall_clock, vcpu_time, &ts); |
72 | 68 | ||
73 | preempt_enable(); | 69 | preempt_enable(); |
@@ -88,7 +84,7 @@ static cycle_t kvm_clock_read(void) | |||
88 | 84 | ||
89 | preempt_disable_notrace(); | 85 | preempt_disable_notrace(); |
90 | cpu = smp_processor_id(); | 86 | cpu = smp_processor_id(); |
91 | src = &hv_clock[cpu].clock; | 87 | src = &hv_clock[cpu].pvti; |
92 | ret = pvclock_clocksource_read(src); | 88 | ret = pvclock_clocksource_read(src); |
93 | preempt_enable_notrace(); | 89 | preempt_enable_notrace(); |
94 | return ret; | 90 | return ret; |
@@ -116,7 +112,7 @@ static unsigned long kvm_get_tsc_khz(void) | |||
116 | 112 | ||
117 | preempt_disable(); | 113 | preempt_disable(); |
118 | cpu = smp_processor_id(); | 114 | cpu = smp_processor_id(); |
119 | src = &hv_clock[cpu].clock; | 115 | src = &hv_clock[cpu].pvti; |
120 | tsc_khz = pvclock_tsc_khz(src); | 116 | tsc_khz = pvclock_tsc_khz(src); |
121 | preempt_enable(); | 117 | preempt_enable(); |
122 | return tsc_khz; | 118 | return tsc_khz; |
@@ -143,7 +139,7 @@ bool kvm_check_and_clear_guest_paused(void) | |||
143 | if (!hv_clock) | 139 | if (!hv_clock) |
144 | return ret; | 140 | return ret; |
145 | 141 | ||
146 | src = &hv_clock[cpu].clock; | 142 | src = &hv_clock[cpu].pvti; |
147 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { | 143 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { |
148 | src->flags &= ~PVCLOCK_GUEST_STOPPED; | 144 | src->flags &= ~PVCLOCK_GUEST_STOPPED; |
149 | ret = true; | 145 | ret = true; |
@@ -164,7 +160,7 @@ int kvm_register_clock(char *txt) | |||
164 | { | 160 | { |
165 | int cpu = smp_processor_id(); | 161 | int cpu = smp_processor_id(); |
166 | int low, high, ret; | 162 | int low, high, ret; |
167 | struct pvclock_vcpu_time_info *src = &hv_clock[cpu].clock; | 163 | struct pvclock_vcpu_time_info *src = &hv_clock[cpu].pvti; |
168 | 164 | ||
169 | low = (int)__pa(src) | 1; | 165 | low = (int)__pa(src) | 1; |
170 | high = ((u64)__pa(src) >> 32); | 166 | high = ((u64)__pa(src) >> 32); |
@@ -235,7 +231,7 @@ void __init kvmclock_init(void) | |||
235 | printk(KERN_INFO "kvm-clock: Using msrs %x and %x", | 231 | printk(KERN_INFO "kvm-clock: Using msrs %x and %x", |
236 | msr_kvm_system_time, msr_kvm_wall_clock); | 232 | msr_kvm_system_time, msr_kvm_wall_clock); |
237 | 233 | ||
238 | mem = memblock_alloc(sizeof(struct pvclock_aligned_vcpu_time_info) * NR_CPUS, | 234 | mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS, |
239 | PAGE_SIZE); | 235 | PAGE_SIZE); |
240 | if (!mem) | 236 | if (!mem) |
241 | return; | 237 | return; |
@@ -244,7 +240,7 @@ void __init kvmclock_init(void) | |||
244 | if (kvm_register_clock("boot clock")) { | 240 | if (kvm_register_clock("boot clock")) { |
245 | hv_clock = NULL; | 241 | hv_clock = NULL; |
246 | memblock_free(mem, | 242 | memblock_free(mem, |
247 | sizeof(struct pvclock_aligned_vcpu_time_info)*NR_CPUS); | 243 | sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); |
248 | return; | 244 | return; |
249 | } | 245 | } |
250 | pv_time_ops.sched_clock = kvm_clock_read; | 246 | pv_time_ops.sched_clock = kvm_clock_read; |
@@ -269,3 +265,37 @@ void __init kvmclock_init(void) | |||
269 | if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) | 265 | if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) |
270 | pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); | 266 | pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); |
271 | } | 267 | } |
268 | |||
269 | int __init kvm_setup_vsyscall_timeinfo(void) | ||
270 | { | ||
271 | #ifdef CONFIG_X86_64 | ||
272 | int cpu; | ||
273 | int ret; | ||
274 | u8 flags; | ||
275 | struct pvclock_vcpu_time_info *vcpu_time; | ||
276 | unsigned int size; | ||
277 | |||
278 | size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS; | ||
279 | |||
280 | preempt_disable(); | ||
281 | cpu = smp_processor_id(); | ||
282 | |||
283 | vcpu_time = &hv_clock[cpu].pvti; | ||
284 | flags = pvclock_read_flags(vcpu_time); | ||
285 | |||
286 | if (!(flags & PVCLOCK_TSC_STABLE_BIT)) { | ||
287 | preempt_enable(); | ||
288 | return 1; | ||
289 | } | ||
290 | |||
291 | if ((ret = pvclock_init_vsyscall(hv_clock, size))) { | ||
292 | preempt_enable(); | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | preempt_enable(); | ||
297 | |||
298 | kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; | ||
299 | #endif | ||
300 | return 0; | ||
301 | } | ||