diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/xen/time.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/xen/time.c')
-rw-r--r-- | arch/x86/xen/time.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index b2bb5aa3b054..5158c505bef9 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -26,8 +26,6 @@ | |||
26 | 26 | ||
27 | #include "xen-ops.h" | 27 | #include "xen-ops.h" |
28 | 28 | ||
29 | #define XEN_SHIFT 22 | ||
30 | |||
31 | /* Xen may fire a timer up to this many ns early */ | 29 | /* Xen may fire a timer up to this many ns early */ |
32 | #define TIMER_SLOP 100000 | 30 | #define TIMER_SLOP 100000 |
33 | #define NS_PER_TICK (1000000000LL / HZ) | 31 | #define NS_PER_TICK (1000000000LL / HZ) |
@@ -135,24 +133,24 @@ static void do_stolen_accounting(void) | |||
135 | 133 | ||
136 | /* Add the appropriate number of ticks of stolen time, | 134 | /* Add the appropriate number of ticks of stolen time, |
137 | including any left-overs from last time. */ | 135 | including any left-overs from last time. */ |
138 | stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); | 136 | stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); |
139 | 137 | ||
140 | if (stolen < 0) | 138 | if (stolen < 0) |
141 | stolen = 0; | 139 | stolen = 0; |
142 | 140 | ||
143 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); | 141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
144 | __get_cpu_var(xen_residual_stolen) = stolen; | 142 | __this_cpu_write(xen_residual_stolen, stolen); |
145 | account_steal_ticks(ticks); | 143 | account_steal_ticks(ticks); |
146 | 144 | ||
147 | /* Add the appropriate number of ticks of blocked time, | 145 | /* Add the appropriate number of ticks of blocked time, |
148 | including any left-overs from last time. */ | 146 | including any left-overs from last time. */ |
149 | blocked += __get_cpu_var(xen_residual_blocked); | 147 | blocked += __this_cpu_read(xen_residual_blocked); |
150 | 148 | ||
151 | if (blocked < 0) | 149 | if (blocked < 0) |
152 | blocked = 0; | 150 | blocked = 0; |
153 | 151 | ||
154 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); | 152 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
155 | __get_cpu_var(xen_residual_blocked) = blocked; | 153 | __this_cpu_write(xen_residual_blocked, blocked); |
156 | account_idle_ticks(ticks); | 154 | account_idle_ticks(ticks); |
157 | } | 155 | } |
158 | 156 | ||
@@ -211,8 +209,6 @@ static struct clocksource xen_clocksource __read_mostly = { | |||
211 | .rating = 400, | 209 | .rating = 400, |
212 | .read = xen_clocksource_get_cycles, | 210 | .read = xen_clocksource_get_cycles, |
213 | .mask = ~0, | 211 | .mask = ~0, |
214 | .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ | ||
215 | .shift = XEN_SHIFT, | ||
216 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 212 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
217 | }; | 213 | }; |
218 | 214 | ||
@@ -397,7 +393,9 @@ void xen_setup_timer(int cpu) | |||
397 | name = "<timer kasprintf failed>"; | 393 | name = "<timer kasprintf failed>"; |
398 | 394 | ||
399 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | 395 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, |
400 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, | 396 | IRQF_DISABLED|IRQF_PERCPU| |
397 | IRQF_NOBALANCING|IRQF_TIMER| | ||
398 | IRQF_FORCE_RESUME, | ||
401 | name, NULL); | 399 | name, NULL); |
402 | 400 | ||
403 | evt = &per_cpu(xen_clock_events, cpu); | 401 | evt = &per_cpu(xen_clock_events, cpu); |
@@ -426,6 +424,8 @@ void xen_timer_resume(void) | |||
426 | { | 424 | { |
427 | int cpu; | 425 | int cpu; |
428 | 426 | ||
427 | pvclock_resume(); | ||
428 | |||
429 | if (xen_clockevent != &xen_vcpuop_clockevent) | 429 | if (xen_clockevent != &xen_vcpuop_clockevent) |
430 | return; | 430 | return; |
431 | 431 | ||
@@ -435,16 +435,16 @@ void xen_timer_resume(void) | |||
435 | } | 435 | } |
436 | } | 436 | } |
437 | 437 | ||
438 | static const struct pv_time_ops xen_time_ops __initdata = { | 438 | static const struct pv_time_ops xen_time_ops __initconst = { |
439 | .sched_clock = xen_clocksource_read, | 439 | .sched_clock = xen_clocksource_read, |
440 | }; | 440 | }; |
441 | 441 | ||
442 | static __init void xen_time_init(void) | 442 | static void __init xen_time_init(void) |
443 | { | 443 | { |
444 | int cpu = smp_processor_id(); | 444 | int cpu = smp_processor_id(); |
445 | struct timespec tp; | 445 | struct timespec tp; |
446 | 446 | ||
447 | clocksource_register(&xen_clocksource); | 447 | clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); |
448 | 448 | ||
449 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { | 449 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { |
450 | /* Successfully turned off 100Hz tick, so we have the | 450 | /* Successfully turned off 100Hz tick, so we have the |
@@ -464,7 +464,7 @@ static __init void xen_time_init(void) | |||
464 | xen_setup_cpu_clockevents(); | 464 | xen_setup_cpu_clockevents(); |
465 | } | 465 | } |
466 | 466 | ||
467 | __init void xen_init_time_ops(void) | 467 | void __init xen_init_time_ops(void) |
468 | { | 468 | { |
469 | pv_time_ops = xen_time_ops; | 469 | pv_time_ops = xen_time_ops; |
470 | 470 | ||
@@ -486,7 +486,7 @@ static void xen_hvm_setup_cpu_clockevents(void) | |||
486 | xen_setup_cpu_clockevents(); | 486 | xen_setup_cpu_clockevents(); |
487 | } | 487 | } |
488 | 488 | ||
489 | __init void xen_hvm_init_time_ops(void) | 489 | void __init xen_hvm_init_time_ops(void) |
490 | { | 490 | { |
491 | /* vector callback is needed otherwise we cannot receive interrupts | 491 | /* vector callback is needed otherwise we cannot receive interrupts |
492 | * on cpu > 0 and at this point we don't know how many cpus are | 492 | * on cpu > 0 and at this point we don't know how many cpus are |