aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-11-28 13:31:23 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-13 09:13:11 -0500
commitef08f0fff87630d4f67ceb09514d8b444df833f8 (patch)
treebe33021d329f9f2780ed8eac7cb8acb19a56a1b4 /kernel/sched
parent20d1c86a57762f0a33a78988e3fc8818316badd4 (diff)
sched/clock: Remove local_irq_disable() from the clocks
Now that x86 no longer requires IRQs disabled for sched_clock() and ia64 never had this requirement (it doesn't seem to do cpufreq at all), we can remove the requirement of disabling IRQs. MAINLINE PRE POST sched_clock_stable: 1 1 1 (cold) sched_clock: 329841 257223 221876 (cold) local_clock: 301773 309889 234692 (warm) sched_clock: 38375 25280 25602 (warm) local_clock: 100371 85268 33265 (warm) rdtsc: 27340 24247 24214 sched_clock_stable: 0 0 0 (cold) sched_clock: 382634 301224 235941 (cold) local_clock: 396890 399870 297017 (warm) sched_clock: 38194 25630 25233 (warm) local_clock: 143452 129629 71234 (warm) rdtsc: 27345 24307 24245 Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/n/tip-36e5kohiasnr106d077mgubp@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/clock.c34
1 files changed, 6 insertions, 28 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c3ae1446461c..59371549ddf0 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -26,9 +26,10 @@
26 * at 0 on boot (but people really shouldn't rely on that). 26 * at 0 on boot (but people really shouldn't rely on that).
27 * 27 *
28 * cpu_clock(i) -- can be used from any context, including NMI. 28 * cpu_clock(i) -- can be used from any context, including NMI.
29 * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
30 * local_clock() -- is cpu_clock() on the current cpu. 29 * local_clock() -- is cpu_clock() on the current cpu.
31 * 30 *
31 * sched_clock_cpu(i)
32 *
32 * How: 33 * How:
33 * 34 *
34 * The implementation either uses sched_clock() when 35 * The implementation either uses sched_clock() when
@@ -50,15 +51,6 @@
50 * Furthermore, explicit sleep and wakeup hooks allow us to account for time 51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
51 * that is otherwise invisible (TSC gets stopped). 52 * that is otherwise invisible (TSC gets stopped).
52 * 53 *
53 *
54 * Notes:
55 *
56 * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
57 * like cpufreq interrupts that can change the base clock (TSC) multiplier
58 * and cause funny jumps in time -- although the filtering provided by
59 * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
60 * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
61 * sched_clock().
62 */ 54 */
63#include <linux/spinlock.h> 55#include <linux/spinlock.h>
64#include <linux/hardirq.h> 56#include <linux/hardirq.h>
@@ -242,20 +234,20 @@ u64 sched_clock_cpu(int cpu)
242 struct sched_clock_data *scd; 234 struct sched_clock_data *scd;
243 u64 clock; 235 u64 clock;
244 236
245 WARN_ON_ONCE(!irqs_disabled());
246
247 if (sched_clock_stable) 237 if (sched_clock_stable)
248 return sched_clock(); 238 return sched_clock();
249 239
250 if (unlikely(!sched_clock_running)) 240 if (unlikely(!sched_clock_running))
251 return 0ull; 241 return 0ull;
252 242
243 preempt_disable();
253 scd = cpu_sdc(cpu); 244 scd = cpu_sdc(cpu);
254 245
255 if (cpu != smp_processor_id()) 246 if (cpu != smp_processor_id())
256 clock = sched_clock_remote(scd); 247 clock = sched_clock_remote(scd);
257 else 248 else
258 clock = sched_clock_local(scd); 249 clock = sched_clock_local(scd);
250 preempt_enable();
259 251
260 return clock; 252 return clock;
261} 253}
@@ -316,14 +308,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
316 */ 308 */
317u64 cpu_clock(int cpu) 309u64 cpu_clock(int cpu)
318{ 310{
319 u64 clock; 311 return sched_clock_cpu(cpu);
320 unsigned long flags;
321
322 local_irq_save(flags);
323 clock = sched_clock_cpu(cpu);
324 local_irq_restore(flags);
325
326 return clock;
327} 312}
328 313
329/* 314/*
@@ -335,14 +320,7 @@ u64 cpu_clock(int cpu)
335 */ 320 */
336u64 local_clock(void) 321u64 local_clock(void)
337{ 322{
338 u64 clock; 323 return sched_clock_cpu(raw_smp_processor_id());
339 unsigned long flags;
340
341 local_irq_save(flags);
342 clock = sched_clock_cpu(smp_processor_id());
343 local_irq_restore(flags);
344
345 return clock;
346} 324}
347 325
348#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 326#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */