diff options
author | Peter Zijlstra <peterz@infradead.org> | 2009-02-26 15:40:16 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-26 15:56:07 -0500 |
commit | 8325d9c09dedf45476f4d6261d1b6a72e4a7453f (patch) | |
tree | 5b7f6f3b125aec59ec6a60d22130844a3addc8e9 /kernel/sched_clock.c | |
parent | 83ce400928680a6c8123d492684b27857f5a2d95 (diff) |
sched_clock: cleanups
- remove superfluous checks in __update_sched_clock()
- skip sched_clock_tick() for sched_clock_stable
- reinstate the simple !HAVE_UNSTABLE_SCHED_CLOCK code to please the bloatwatch
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r-- | kernel/sched_clock.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index a755d023805a..390f33234bd0 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -44,9 +44,6 @@ static __read_mostly int sched_clock_running; | |||
44 | 44 | ||
45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
46 | __read_mostly int sched_clock_stable; | 46 | __read_mostly int sched_clock_stable; |
47 | #else | ||
48 | static const int sched_clock_stable = 1; | ||
49 | #endif | ||
50 | 47 | ||
51 | struct sched_clock_data { | 48 | struct sched_clock_data { |
52 | /* | 49 | /* |
@@ -115,14 +112,9 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | |||
115 | s64 delta = now - scd->tick_raw; | 112 | s64 delta = now - scd->tick_raw; |
116 | u64 clock, min_clock, max_clock; | 113 | u64 clock, min_clock, max_clock; |
117 | 114 | ||
118 | WARN_ON_ONCE(!irqs_disabled()); | ||
119 | |||
120 | if (unlikely(delta < 0)) | 115 | if (unlikely(delta < 0)) |
121 | delta = 0; | 116 | delta = 0; |
122 | 117 | ||
123 | if (unlikely(!sched_clock_running)) | ||
124 | return 0ull; | ||
125 | |||
126 | /* | 118 | /* |
127 | * scd->clock = clamp(scd->tick_gtod + delta, | 119 | * scd->clock = clamp(scd->tick_gtod + delta, |
128 | * max(scd->tick_gtod, scd->clock), | 120 | * max(scd->tick_gtod, scd->clock), |
@@ -201,18 +193,20 @@ u64 sched_clock_cpu(int cpu) | |||
201 | return clock; | 193 | return clock; |
202 | } | 194 | } |
203 | 195 | ||
204 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
205 | |||
206 | void sched_clock_tick(void) | 196 | void sched_clock_tick(void) |
207 | { | 197 | { |
208 | struct sched_clock_data *scd = this_scd(); | 198 | struct sched_clock_data *scd; |
209 | u64 now, now_gtod; | 199 | u64 now, now_gtod; |
210 | 200 | ||
201 | if (sched_clock_stable) | ||
202 | return; | ||
203 | |||
211 | if (unlikely(!sched_clock_running)) | 204 | if (unlikely(!sched_clock_running)) |
212 | return; | 205 | return; |
213 | 206 | ||
214 | WARN_ON_ONCE(!irqs_disabled()); | 207 | WARN_ON_ONCE(!irqs_disabled()); |
215 | 208 | ||
209 | scd = this_scd(); | ||
216 | now_gtod = ktime_to_ns(ktime_get()); | 210 | now_gtod = ktime_to_ns(ktime_get()); |
217 | now = sched_clock(); | 211 | now = sched_clock(); |
218 | 212 | ||
@@ -245,6 +239,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
245 | } | 239 | } |
246 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 240 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
247 | 241 | ||
242 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
243 | |||
244 | void sched_clock_init(void) | ||
245 | { | ||
246 | sched_clock_running = 1; | ||
247 | } | ||
248 | |||
249 | u64 sched_clock_cpu(int cpu) | ||
250 | { | ||
251 | if (unlikely(!sched_clock_running)) | ||
252 | return 0; | ||
253 | |||
254 | return sched_clock(); | ||
255 | } | ||
256 | |||
248 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 257 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
249 | 258 | ||
250 | unsigned long long cpu_clock(int cpu) | 259 | unsigned long long cpu_clock(int cpu) |