aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/time.c')
-rw-r--r--arch/x86/xen/time.c158
1 files changed, 28 insertions, 130 deletions
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c39e1a5aa241..64f0038b9558 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -12,7 +12,9 @@
12#include <linux/clocksource.h> 12#include <linux/clocksource.h>
13#include <linux/clockchips.h> 13#include <linux/clockchips.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/math64.h>
15 16
17#include <asm/pvclock.h>
16#include <asm/xen/hypervisor.h> 18#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h> 19#include <asm/xen/hypercall.h>
18 20
@@ -30,17 +32,6 @@
30 32
31static cycle_t xen_clocksource_read(void); 33static cycle_t xen_clocksource_read(void);
32 34
33/* These are perodically updated in shared_info, and then copied here. */
34struct shadow_time_info {
35 u64 tsc_timestamp; /* TSC at last update of time vals. */
36 u64 system_timestamp; /* Time, in nanosecs, since boot. */
37 u32 tsc_to_nsec_mul;
38 int tsc_shift;
39 u32 version;
40};
41
42static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
43
44/* runstate info updated by Xen */ 35/* runstate info updated by Xen */
45static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); 36static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
46 37
@@ -150,11 +141,7 @@ static void do_stolen_accounting(void)
150 if (stolen < 0) 141 if (stolen < 0)
151 stolen = 0; 142 stolen = 0;
152 143
153 ticks = 0; 144 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
154 while (stolen >= NS_PER_TICK) {
155 ticks++;
156 stolen -= NS_PER_TICK;
157 }
158 __get_cpu_var(residual_stolen) = stolen; 145 __get_cpu_var(residual_stolen) = stolen;
159 account_steal_time(NULL, ticks); 146 account_steal_time(NULL, ticks);
160 147
@@ -166,11 +153,7 @@ static void do_stolen_accounting(void)
166 if (blocked < 0) 153 if (blocked < 0)
167 blocked = 0; 154 blocked = 0;
168 155
169 ticks = 0; 156 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
170 while (blocked >= NS_PER_TICK) {
171 ticks++;
172 blocked -= NS_PER_TICK;
173 }
174 __get_cpu_var(residual_blocked) = blocked; 157 __get_cpu_var(residual_blocked) = blocked;
175 account_steal_time(idle_task(smp_processor_id()), ticks); 158 account_steal_time(idle_task(smp_processor_id()), ticks);
176} 159}
@@ -218,7 +201,7 @@ unsigned long long xen_sched_clock(void)
218unsigned long xen_cpu_khz(void) 201unsigned long xen_cpu_khz(void)
219{ 202{
220 u64 xen_khz = 1000000ULL << 32; 203 u64 xen_khz = 1000000ULL << 32;
221 const struct vcpu_time_info *info = 204 const struct pvclock_vcpu_time_info *info =
222 &HYPERVISOR_shared_info->vcpu_info[0].time; 205 &HYPERVISOR_shared_info->vcpu_info[0].time;
223 206
224 do_div(xen_khz, info->tsc_to_system_mul); 207 do_div(xen_khz, info->tsc_to_system_mul);
@@ -230,121 +213,26 @@ unsigned long xen_cpu_khz(void)
230 return xen_khz; 213 return xen_khz;
231} 214}
232 215
233/*
234 * Reads a consistent set of time-base values from Xen, into a shadow data
235 * area.
236 */
237static unsigned get_time_values_from_xen(void)
238{
239 struct vcpu_time_info *src;
240 struct shadow_time_info *dst;
241
242 /* src is shared memory with the hypervisor, so we need to
243 make sure we get a consistent snapshot, even in the face of
244 being preempted. */
245 src = &__get_cpu_var(xen_vcpu)->time;
246 dst = &__get_cpu_var(shadow_time);
247
248 do {
249 dst->version = src->version;
250 rmb(); /* fetch version before data */
251 dst->tsc_timestamp = src->tsc_timestamp;
252 dst->system_timestamp = src->system_time;
253 dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
254 dst->tsc_shift = src->tsc_shift;
255 rmb(); /* test version after fetching data */
256 } while ((src->version & 1) | (dst->version ^ src->version));
257
258 return dst->version;
259}
260
261/*
262 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
263 * yielding a 64-bit result.
264 */
265static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
266{
267 u64 product;
268#ifdef __i386__
269 u32 tmp1, tmp2;
270#endif
271
272 if (shift < 0)
273 delta >>= -shift;
274 else
275 delta <<= shift;
276
277#ifdef __i386__
278 __asm__ (
279 "mul %5 ; "
280 "mov %4,%%eax ; "
281 "mov %%edx,%4 ; "
282 "mul %5 ; "
283 "xor %5,%5 ; "
284 "add %4,%%eax ; "
285 "adc %5,%%edx ; "
286 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
287 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
288#elif __x86_64__
289 __asm__ (
290 "mul %%rdx ; shrd $32,%%rdx,%%rax"
291 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
292#else
293#error implement me!
294#endif
295
296 return product;
297}
298
299static u64 get_nsec_offset(struct shadow_time_info *shadow)
300{
301 u64 now, delta;
302 now = native_read_tsc();
303 delta = now - shadow->tsc_timestamp;
304 return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
305}
306
307static cycle_t xen_clocksource_read(void) 216static cycle_t xen_clocksource_read(void)
308{ 217{
309 struct shadow_time_info *shadow = &get_cpu_var(shadow_time); 218 struct pvclock_vcpu_time_info *src;
310 cycle_t ret; 219 cycle_t ret;
311 unsigned version;
312
313 do {
314 version = get_time_values_from_xen();
315 barrier();
316 ret = shadow->system_timestamp + get_nsec_offset(shadow);
317 barrier();
318 } while (version != __get_cpu_var(xen_vcpu)->time.version);
319
320 put_cpu_var(shadow_time);
321 220
221 src = &get_cpu_var(xen_vcpu)->time;
222 ret = pvclock_clocksource_read(src);
223 put_cpu_var(xen_vcpu);
322 return ret; 224 return ret;
323} 225}
324 226
325static void xen_read_wallclock(struct timespec *ts) 227static void xen_read_wallclock(struct timespec *ts)
326{ 228{
327 const struct shared_info *s = HYPERVISOR_shared_info; 229 struct shared_info *s = HYPERVISOR_shared_info;
328 u32 version; 230 struct pvclock_wall_clock *wall_clock = &(s->wc);
329 u64 delta; 231 struct pvclock_vcpu_time_info *vcpu_time;
330 struct timespec now;
331
332 /* get wallclock at system boot */
333 do {
334 version = s->wc_version;
335 rmb(); /* fetch version before time */
336 now.tv_sec = s->wc_sec;
337 now.tv_nsec = s->wc_nsec;
338 rmb(); /* fetch time before checking version */
339 } while ((s->wc_version & 1) | (version ^ s->wc_version));
340
341 delta = xen_clocksource_read(); /* time since system boot */
342 delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
343
344 now.tv_nsec = do_div(delta, NSEC_PER_SEC);
345 now.tv_sec = delta;
346 232
347 set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); 233 vcpu_time = &get_cpu_var(xen_vcpu)->time;
234 pvclock_read_wallclock(wall_clock, vcpu_time, ts);
235 put_cpu_var(xen_vcpu);
348} 236}
349 237
350unsigned long xen_get_wallclock(void) 238unsigned long xen_get_wallclock(void)
@@ -352,7 +240,6 @@ unsigned long xen_get_wallclock(void)
352 struct timespec ts; 240 struct timespec ts;
353 241
354 xen_read_wallclock(&ts); 242 xen_read_wallclock(&ts);
355
356 return ts.tv_sec; 243 return ts.tv_sec;
357} 244}
358 245
@@ -572,12 +459,23 @@ void xen_setup_cpu_clockevents(void)
572 clockevents_register_device(&__get_cpu_var(xen_clock_events)); 459 clockevents_register_device(&__get_cpu_var(xen_clock_events));
573} 460}
574 461
462void xen_timer_resume(void)
463{
464 int cpu;
465
466 if (xen_clockevent != &xen_vcpuop_clockevent)
467 return;
468
469 for_each_online_cpu(cpu) {
470 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
471 BUG();
472 }
473}
474
575__init void xen_time_init(void) 475__init void xen_time_init(void)
576{ 476{
577 int cpu = smp_processor_id(); 477 int cpu = smp_processor_id();
578 478
579 get_time_values_from_xen();
580
581 clocksource_register(&xen_clocksource); 479 clocksource_register(&xen_clocksource);
582 480
583 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { 481 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {