aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2012-03-25 17:00:04 -0400
committerIngo Molnar <mingo@kernel.org>2012-03-25 21:16:07 -0400
commit90e240142bd31ff10aeda5a280a53153f4eff004 (patch)
treeae612bacc2cfd2fedc4c5c22c29dd326f8b93ae7 /arch/x86/kernel/process_64.c
parentf5243d6de7ae232e1d81e44ae9756bbd8c988fcd (diff)
x86: Merge the x86_32 and x86_64 cpu_idle() functions
Both functions are mostly identical. The differences are: - x86_32's cpu_idle() makes use of check_pgt_cache(), which is a nop on both x86_32 and x86_64. - x86_64's cpu_idle() uses enter/__exit_idle/(), on x86_32 these function are a nop. - In contrast to x86_32, x86_64 calls rcu_idle_enter/exit() in the innermost loop because idle notifications need RCU. Calling these function on x86_32 also in the innermost loop does not hurt. So we can merge both functions. Signed-off-by: Richard Weinberger <richard@nod.at> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: paulmck@linux.vnet.ibm.com Cc: josh@joshtriplett.org Cc: tj@kernel.org Link: http://lkml.kernel.org/r/1332709204-22496-1-git-send-email-richard@nod.at Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c107
1 files changed, 0 insertions, 107 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 292da13fc5aa..ce5e34f2beca 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -14,7 +14,6 @@
14 * This file handles the architecture-dependent parts of process handling.. 14 * This file handles the architecture-dependent parts of process handling..
15 */ 15 */
16 16
17#include <linux/stackprotector.h>
18#include <linux/cpu.h> 17#include <linux/cpu.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/sched.h> 19#include <linux/sched.h>
@@ -32,12 +31,10 @@
32#include <linux/notifier.h> 31#include <linux/notifier.h>
33#include <linux/kprobes.h> 32#include <linux/kprobes.h>
34#include <linux/kdebug.h> 33#include <linux/kdebug.h>
35#include <linux/tick.h>
36#include <linux/prctl.h> 34#include <linux/prctl.h>
37#include <linux/uaccess.h> 35#include <linux/uaccess.h>
38#include <linux/io.h> 36#include <linux/io.h>
39#include <linux/ftrace.h> 37#include <linux/ftrace.h>
40#include <linux/cpuidle.h>
41 38
42#include <asm/pgtable.h> 39#include <asm/pgtable.h>
43#include <asm/system.h> 40#include <asm/system.h>
@@ -52,114 +49,10 @@
52#include <asm/idle.h> 49#include <asm/idle.h>
53#include <asm/syscalls.h> 50#include <asm/syscalls.h>
54#include <asm/debugreg.h> 51#include <asm/debugreg.h>
55#include <asm/nmi.h>
56 52
57asmlinkage extern void ret_from_fork(void); 53asmlinkage extern void ret_from_fork(void);
58 54
59DEFINE_PER_CPU(unsigned long, old_rsp); 55DEFINE_PER_CPU(unsigned long, old_rsp);
60static DEFINE_PER_CPU(unsigned char, is_idle);
61
62static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63
64void idle_notifier_register(struct notifier_block *n)
65{
66 atomic_notifier_chain_register(&idle_notifier, n);
67}
68EXPORT_SYMBOL_GPL(idle_notifier_register);
69
70void idle_notifier_unregister(struct notifier_block *n)
71{
72 atomic_notifier_chain_unregister(&idle_notifier, n);
73}
74EXPORT_SYMBOL_GPL(idle_notifier_unregister);
75
76void enter_idle(void)
77{
78 percpu_write(is_idle, 1);
79 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
80}
81
82static void __exit_idle(void)
83{
84 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
85 return;
86 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
87}
88
89/* Called from interrupts to signify idle end */
90void exit_idle(void)
91{
92 /* idle loop has pid 0 */
93 if (current->pid)
94 return;
95 __exit_idle();
96}
97
98#ifndef CONFIG_SMP
99static inline void play_dead(void)
100{
101 BUG();
102}
103#endif
104
105/*
106 * The idle thread. There's no useful work to be
107 * done, so just try to conserve power and have a
108 * low exit latency (ie sit in a loop waiting for
109 * somebody to say that they'd like to reschedule)
110 */
111void cpu_idle(void)
112{
113 current_thread_info()->status |= TS_POLLING;
114
115 /*
116 * If we're the non-boot CPU, nothing set the stack canary up
117 * for us. CPU0 already has it initialized but no harm in
118 * doing it again. This is a good place for updating it, as
119 * we wont ever return from this function (so the invalid
120 * canaries already on the stack wont ever trigger).
121 */
122 boot_init_stack_canary();
123
124 /* endless idle loop with no priority at all */
125 while (1) {
126 tick_nohz_idle_enter();
127 while (!need_resched()) {
128
129 rmb();
130
131 if (cpu_is_offline(smp_processor_id()))
132 play_dead();
133 /*
134 * Idle routines should keep interrupts disabled
135 * from here on, until they go to idle.
136 * Otherwise, idle callbacks can misfire.
137 */
138 local_touch_nmi();
139 local_irq_disable();
140 enter_idle();
141 /* Don't trace irqs off for idle */
142 stop_critical_timings();
143
144 /* enter_idle() needs rcu for notifiers */
145 rcu_idle_enter();
146
147 if (cpuidle_idle_call())
148 pm_idle();
149
150 rcu_idle_exit();
151 start_critical_timings();
152
153 /* In many cases the interrupt that ended idle
154 has already called exit_idle. But some idle
155 loops can be woken up without interrupt. */
156 __exit_idle();
157 }
158
159 tick_nohz_idle_exit();
160 schedule_preempt_disabled();
161 }
162}
163 56
164/* Prints also some state that isn't saved in the pt_regs */ 57/* Prints also some state that isn't saved in the pt_regs */
165void __show_regs(struct pt_regs *regs, int all) 58void __show_regs(struct pt_regs *regs, int all)