aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2012-03-25 17:00:04 -0400
committerIngo Molnar <mingo@kernel.org>2012-03-25 21:16:07 -0400
commit90e240142bd31ff10aeda5a280a53153f4eff004 (patch)
treeae612bacc2cfd2fedc4c5c22c29dd326f8b93ae7 /arch/x86/kernel/process.c
parentf5243d6de7ae232e1d81e44ae9756bbd8c988fcd (diff)
x86: Merge the x86_32 and x86_64 cpu_idle() functions
Both functions are mostly identical. The differences are: - x86_32's cpu_idle() makes use of check_pgt_cache(), which is a nop on both x86_32 and x86_64. - x86_64's cpu_idle() uses enter/__exit_idle/(), on x86_32 these function are a nop. - In contrast to x86_32, x86_64 calls rcu_idle_enter/exit() in the innermost loop because idle notifications need RCU. Calling these function on x86_32 also in the innermost loop does not hurt. So we can merge both functions. Signed-off-by: Richard Weinberger <richard@nod.at> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: paulmck@linux.vnet.ibm.com Cc: josh@joshtriplett.org Cc: tj@kernel.org Link: http://lkml.kernel.org/r/1332709204-22496-1-git-send-email-richard@nod.at Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c114
1 files changed, 114 insertions, 0 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14baf78d5a1f..29309c42b9e5 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -12,6 +12,9 @@
12#include <linux/user-return-notifier.h> 12#include <linux/user-return-notifier.h>
13#include <linux/dmi.h> 13#include <linux/dmi.h>
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <linux/stackprotector.h>
16#include <linux/tick.h>
17#include <linux/cpuidle.h>
15#include <trace/events/power.h> 18#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 19#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h> 20#include <asm/cpu.h>
@@ -23,6 +26,24 @@
23#include <asm/i387.h> 26#include <asm/i387.h>
24#include <asm/fpu-internal.h> 27#include <asm/fpu-internal.h>
25#include <asm/debugreg.h> 28#include <asm/debugreg.h>
29#include <asm/nmi.h>
30
31#ifdef CONFIG_X86_64
32static DEFINE_PER_CPU(unsigned char, is_idle);
33static ATOMIC_NOTIFIER_HEAD(idle_notifier);
34
35void idle_notifier_register(struct notifier_block *n)
36{
37 atomic_notifier_chain_register(&idle_notifier, n);
38}
39EXPORT_SYMBOL_GPL(idle_notifier_register);
40
41void idle_notifier_unregister(struct notifier_block *n)
42{
43 atomic_notifier_chain_unregister(&idle_notifier, n);
44}
45EXPORT_SYMBOL_GPL(idle_notifier_unregister);
46#endif
26 47
27struct kmem_cache *task_xstate_cachep; 48struct kmem_cache *task_xstate_cachep;
28EXPORT_SYMBOL_GPL(task_xstate_cachep); 49EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -371,6 +392,99 @@ static inline int hlt_use_halt(void)
371} 392}
372#endif 393#endif
373 394
395#ifndef CONFIG_SMP
396static inline void play_dead(void)
397{
398 BUG();
399}
400#endif
401
402#ifdef CONFIG_X86_64
403void enter_idle(void)
404{
405 percpu_write(is_idle, 1);
406 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
407}
408
409static void __exit_idle(void)
410{
411 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
412 return;
413 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
414}
415
416/* Called from interrupts to signify idle end */
417void exit_idle(void)
418{
419 /* idle loop has pid 0 */
420 if (current->pid)
421 return;
422 __exit_idle();
423}
424#endif
425
426/*
427 * The idle thread. There's no useful work to be
428 * done, so just try to conserve power and have a
429 * low exit latency (ie sit in a loop waiting for
430 * somebody to say that they'd like to reschedule)
431 */
432void cpu_idle(void)
433{
434 /*
435 * If we're the non-boot CPU, nothing set the stack canary up
436 * for us. CPU0 already has it initialized but no harm in
437 * doing it again. This is a good place for updating it, as
438 * we wont ever return from this function (so the invalid
439 * canaries already on the stack wont ever trigger).
440 */
441 boot_init_stack_canary();
442 current_thread_info()->status |= TS_POLLING;
443
444 while (1) {
445 tick_nohz_idle_enter();
446
447 while (!need_resched()) {
448 rmb();
449
450 if (cpu_is_offline(smp_processor_id()))
451 play_dead();
452
453 /*
454 * Idle routines should keep interrupts disabled
455 * from here on, until they go to idle.
456 * Otherwise, idle callbacks can misfire.
457 */
458 local_touch_nmi();
459 local_irq_disable();
460
461 enter_idle();
462
463 /* Don't trace irqs off for idle */
464 stop_critical_timings();
465
466 /* enter_idle() needs rcu for notifiers */
467 rcu_idle_enter();
468
469 if (cpuidle_idle_call())
470 pm_idle();
471
472 rcu_idle_exit();
473 start_critical_timings();
474
475 /* In many cases the interrupt that ended idle
476 has already called exit_idle. But some idle
477 loops can be woken up without interrupt. */
478 __exit_idle();
479 }
480
481 tick_nohz_idle_exit();
482 preempt_enable_no_resched();
483 schedule();
484 preempt_disable();
485 }
486}
487
374/* 488/*
375 * We use this if we don't have any better 489 * We use this if we don't have any better
376 * idle routine.. 490 * idle routine..