diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-08-11 02:59:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 02:59:03 -0400 |
commit | c1955a3d4762e7a9bf84035eb3c4886a900f0d15 (patch) | |
tree | a00dcd1736c612017df7094a91d8a6435b7a49c9 /kernel | |
parent | 4a273f209cc95d148f79b4c96d3d03997b44ffda (diff) |
sched_clock: delay using sched_clock()
Some arch's can't handle sched_clock() being called too early - delay
this until sched_clock_init() has been called.
Reported-by: Bill Gatliff <bgat@billgatliff.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Nishanth Aravamudan <nacc@us.ibm.com>
CC: Russell King - ARM Linux <linux@arm.linux.org.uk>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_clock.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 074edc989379..204991a0bfa7 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -42,6 +42,8 @@ unsigned long long __attribute__((weak)) sched_clock(void) | |||
42 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | 42 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); |
43 | } | 43 | } |
44 | 44 | ||
45 | static __read_mostly int sched_clock_running; | ||
46 | |||
45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 47 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
46 | 48 | ||
47 | struct sched_clock_data { | 49 | struct sched_clock_data { |
@@ -70,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) | |||
70 | return &per_cpu(sched_clock_data, cpu); | 72 | return &per_cpu(sched_clock_data, cpu); |
71 | } | 73 | } |
72 | 74 | ||
73 | static __read_mostly int sched_clock_running; | ||
74 | |||
75 | void sched_clock_init(void) | 75 | void sched_clock_init(void) |
76 | { | 76 | { |
77 | u64 ktime_now = ktime_to_ns(ktime_get()); | 77 | u64 ktime_now = ktime_to_ns(ktime_get()); |
@@ -248,6 +248,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
248 | } | 248 | } |
249 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 249 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
250 | 250 | ||
251 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
252 | |||
253 | void sched_clock_init(void) | ||
254 | { | ||
255 | sched_clock_running = 1; | ||
256 | } | ||
257 | |||
258 | u64 sched_clock_cpu(int cpu) | ||
259 | { | ||
260 | if (unlikely(!sched_clock_running)) | ||
261 | return 0; | ||
262 | |||
263 | return sched_clock(); | ||
264 | } | ||
265 | |||
251 | #endif | 266 | #endif |
252 | 267 | ||
253 | unsigned long long cpu_clock(int cpu) | 268 | unsigned long long cpu_clock(int cpu) |