diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-26 15:21:59 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-27 02:35:19 -0500 |
commit | 1b49061d400c9e51e3ac2aac026a099fe599b9bb (patch) | |
tree | 54c632cd7f0be2573897c1463a247e69fb769940 /kernel/sched_clock.c | |
parent | 14131f2f98ac350ee9e73faed916d2238a8b6a0d (diff) | |
parent | 83ce400928680a6c8123d492684b27857f5a2d95 (diff) |
Merge branch 'sched/clock' into tracing/ftrace
Conflicts:
kernel/sched_clock.c
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r-- | kernel/sched_clock.c | 47 |
1 files changed, 23 insertions, 24 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index db69174b1178..7ec82c1c61c5 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -24,12 +24,12 @@ | |||
24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
25 | * consistent between cpus (never more than 2 jiffies difference). | 25 | * consistent between cpus (never more than 2 jiffies difference). |
26 | */ | 26 | */ |
27 | #include <linux/sched.h> | ||
28 | #include <linux/percpu.h> | ||
29 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
30 | #include <linux/ktime.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/hardirq.h> | 28 | #include <linux/hardirq.h> |
29 | #include <linux/module.h> | ||
30 | #include <linux/percpu.h> | ||
31 | #include <linux/ktime.h> | ||
32 | #include <linux/sched.h> | ||
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Scheduler clock - returns current time in nanosec units. | 35 | * Scheduler clock - returns current time in nanosec units. |
@@ -44,6 +44,10 @@ unsigned long long __attribute__((weak)) sched_clock(void) | |||
44 | static __read_mostly int sched_clock_running; | 44 | static __read_mostly int sched_clock_running; |
45 | 45 | ||
46 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 46 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
47 | __read_mostly int sched_clock_stable; | ||
48 | #else | ||
49 | static const int sched_clock_stable = 1; | ||
50 | #endif | ||
47 | 51 | ||
48 | struct sched_clock_data { | 52 | struct sched_clock_data { |
49 | /* | 53 | /* |
@@ -88,7 +92,7 @@ void sched_clock_init(void) | |||
88 | } | 92 | } |
89 | 93 | ||
90 | /* | 94 | /* |
91 | * min,max except they take wrapping into account | 95 | * min, max except they take wrapping into account |
92 | */ | 96 | */ |
93 | 97 | ||
94 | static inline u64 wrap_min(u64 x, u64 y) | 98 | static inline u64 wrap_min(u64 x, u64 y) |
@@ -117,10 +121,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | |||
117 | if (unlikely(delta < 0)) | 121 | if (unlikely(delta < 0)) |
118 | delta = 0; | 122 | delta = 0; |
119 | 123 | ||
124 | if (unlikely(!sched_clock_running)) | ||
125 | return 0ull; | ||
126 | |||
120 | /* | 127 | /* |
121 | * scd->clock = clamp(scd->tick_gtod + delta, | 128 | * scd->clock = clamp(scd->tick_gtod + delta, |
122 | * max(scd->tick_gtod, scd->clock), | 129 | * max(scd->tick_gtod, scd->clock), |
123 | * scd->tick_gtod + TICK_NSEC); | 130 | * scd->tick_gtod + TICK_NSEC); |
124 | */ | 131 | */ |
125 | 132 | ||
126 | clock = scd->tick_gtod + delta; | 133 | clock = scd->tick_gtod + delta; |
@@ -149,8 +156,13 @@ static void lock_double_clock(struct sched_clock_data *data1, | |||
149 | 156 | ||
150 | u64 sched_clock_cpu(int cpu) | 157 | u64 sched_clock_cpu(int cpu) |
151 | { | 158 | { |
152 | struct sched_clock_data *scd = cpu_sdc(cpu); | ||
153 | u64 now, clock, this_clock, remote_clock; | 159 | u64 now, clock, this_clock, remote_clock; |
160 | struct sched_clock_data *scd; | ||
161 | |||
162 | if (sched_clock_stable) | ||
163 | return sched_clock(); | ||
164 | |||
165 | scd = cpu_sdc(cpu); | ||
154 | 166 | ||
155 | /* | 167 | /* |
156 | * Normally this is not called in NMI context - but if it is, | 168 | * Normally this is not called in NMI context - but if it is, |
@@ -201,6 +213,8 @@ u64 sched_clock_cpu(int cpu) | |||
201 | return clock; | 213 | return clock; |
202 | } | 214 | } |
203 | 215 | ||
216 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
217 | |||
204 | void sched_clock_tick(void) | 218 | void sched_clock_tick(void) |
205 | { | 219 | { |
206 | struct sched_clock_data *scd = this_scd(); | 220 | struct sched_clock_data *scd = this_scd(); |
@@ -243,22 +257,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
243 | } | 257 | } |
244 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 258 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
245 | 259 | ||
246 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 260 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
247 | |||
248 | void sched_clock_init(void) | ||
249 | { | ||
250 | sched_clock_running = 1; | ||
251 | } | ||
252 | |||
253 | u64 sched_clock_cpu(int cpu) | ||
254 | { | ||
255 | if (unlikely(!sched_clock_running)) | ||
256 | return 0; | ||
257 | |||
258 | return sched_clock(); | ||
259 | } | ||
260 | |||
261 | #endif | ||
262 | 261 | ||
263 | unsigned long long cpu_clock(int cpu) | 262 | unsigned long long cpu_clock(int cpu) |
264 | { | 263 | { |