aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2012-03-25 17:00:04 -0400
committerIngo Molnar <mingo@kernel.org>2012-03-25 21:16:07 -0400
commit90e240142bd31ff10aeda5a280a53153f4eff004 (patch)
treeae612bacc2cfd2fedc4c5c22c29dd326f8b93ae7 /arch/x86
parentf5243d6de7ae232e1d81e44ae9756bbd8c988fcd (diff)
x86: Merge the x86_32 and x86_64 cpu_idle() functions
Both functions are mostly identical. The differences are: - x86_32's cpu_idle() makes use of check_pgt_cache(), which is a nop on both x86_32 and x86_64. - x86_64's cpu_idle() uses enter/__exit_idle/(), on x86_32 these function are a nop. - In contrast to x86_32, x86_64 calls rcu_idle_enter/exit() in the innermost loop because idle notifications need RCU. Calling these function on x86_32 also in the innermost loop does not hurt. So we can merge both functions. Signed-off-by: Richard Weinberger <richard@nod.at> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: paulmck@linux.vnet.ibm.com Cc: josh@joshtriplett.org Cc: tj@kernel.org Link: http://lkml.kernel.org/r/1332709204-22496-1-git-send-email-richard@nod.at Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/idle.h1
-rw-r--r--arch/x86/kernel/process.c114
-rw-r--r--arch/x86/kernel/process_32.c58
-rw-r--r--arch/x86/kernel/process_64.c107
4 files changed, 115 insertions, 165 deletions
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index f49253d75710..c5d1785373ed 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -14,6 +14,7 @@ void exit_idle(void);
14#else /* !CONFIG_X86_64 */ 14#else /* !CONFIG_X86_64 */
15static inline void enter_idle(void) { } 15static inline void enter_idle(void) { }
16static inline void exit_idle(void) { } 16static inline void exit_idle(void) { }
17static inline void __exit_idle(void) { }
17#endif /* CONFIG_X86_64 */ 18#endif /* CONFIG_X86_64 */
18 19
19void amd_e400_remove_cpu(int cpu); 20void amd_e400_remove_cpu(int cpu);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14baf78d5a1f..29309c42b9e5 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -12,6 +12,9 @@
12#include <linux/user-return-notifier.h> 12#include <linux/user-return-notifier.h>
13#include <linux/dmi.h> 13#include <linux/dmi.h>
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <linux/stackprotector.h>
16#include <linux/tick.h>
17#include <linux/cpuidle.h>
15#include <trace/events/power.h> 18#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 19#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h> 20#include <asm/cpu.h>
@@ -23,6 +26,24 @@
23#include <asm/i387.h> 26#include <asm/i387.h>
24#include <asm/fpu-internal.h> 27#include <asm/fpu-internal.h>
25#include <asm/debugreg.h> 28#include <asm/debugreg.h>
29#include <asm/nmi.h>
30
31#ifdef CONFIG_X86_64
32static DEFINE_PER_CPU(unsigned char, is_idle);
33static ATOMIC_NOTIFIER_HEAD(idle_notifier);
34
35void idle_notifier_register(struct notifier_block *n)
36{
37 atomic_notifier_chain_register(&idle_notifier, n);
38}
39EXPORT_SYMBOL_GPL(idle_notifier_register);
40
41void idle_notifier_unregister(struct notifier_block *n)
42{
43 atomic_notifier_chain_unregister(&idle_notifier, n);
44}
45EXPORT_SYMBOL_GPL(idle_notifier_unregister);
46#endif
26 47
27struct kmem_cache *task_xstate_cachep; 48struct kmem_cache *task_xstate_cachep;
28EXPORT_SYMBOL_GPL(task_xstate_cachep); 49EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -371,6 +392,99 @@ static inline int hlt_use_halt(void)
371} 392}
372#endif 393#endif
373 394
395#ifndef CONFIG_SMP
396static inline void play_dead(void)
397{
398 BUG();
399}
400#endif
401
402#ifdef CONFIG_X86_64
403void enter_idle(void)
404{
405 percpu_write(is_idle, 1);
406 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
407}
408
409static void __exit_idle(void)
410{
411 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
412 return;
413 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
414}
415
416/* Called from interrupts to signify idle end */
417void exit_idle(void)
418{
419 /* idle loop has pid 0 */
420 if (current->pid)
421 return;
422 __exit_idle();
423}
424#endif
425
426/*
427 * The idle thread. There's no useful work to be
428 * done, so just try to conserve power and have a
429 * low exit latency (ie sit in a loop waiting for
430 * somebody to say that they'd like to reschedule)
431 */
432void cpu_idle(void)
433{
434 /*
435 * If we're the non-boot CPU, nothing set the stack canary up
436 * for us. CPU0 already has it initialized but no harm in
437 * doing it again. This is a good place for updating it, as
438 * we wont ever return from this function (so the invalid
439 * canaries already on the stack wont ever trigger).
440 */
441 boot_init_stack_canary();
442 current_thread_info()->status |= TS_POLLING;
443
444 while (1) {
445 tick_nohz_idle_enter();
446
447 while (!need_resched()) {
448 rmb();
449
450 if (cpu_is_offline(smp_processor_id()))
451 play_dead();
452
453 /*
454 * Idle routines should keep interrupts disabled
455 * from here on, until they go to idle.
456 * Otherwise, idle callbacks can misfire.
457 */
458 local_touch_nmi();
459 local_irq_disable();
460
461 enter_idle();
462
463 /* Don't trace irqs off for idle */
464 stop_critical_timings();
465
466 /* enter_idle() needs rcu for notifiers */
467 rcu_idle_enter();
468
469 if (cpuidle_idle_call())
470 pm_idle();
471
472 rcu_idle_exit();
473 start_critical_timings();
474
475 /* In many cases the interrupt that ended idle
476 has already called exit_idle. But some idle
477 loops can be woken up without interrupt. */
478 __exit_idle();
479 }
480
481 tick_nohz_idle_exit();
482 preempt_enable_no_resched();
483 schedule();
484 preempt_disable();
485 }
486}
487
374/* 488/*
375 * We use this if we don't have any better 489 * We use this if we don't have any better
376 * idle routine.. 490 * idle routine..
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9d7d4842bfaf..ea207c245aa4 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -9,7 +9,6 @@
9 * This file handles the architecture-dependent parts of process handling.. 9 * This file handles the architecture-dependent parts of process handling..
10 */ 10 */
11 11
12#include <linux/stackprotector.h>
13#include <linux/cpu.h> 12#include <linux/cpu.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/sched.h> 14#include <linux/sched.h>
@@ -31,14 +30,12 @@
31#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
32#include <linux/ptrace.h> 31#include <linux/ptrace.h>
33#include <linux/personality.h> 32#include <linux/personality.h>
34#include <linux/tick.h>
35#include <linux/percpu.h> 33#include <linux/percpu.h>
36#include <linux/prctl.h> 34#include <linux/prctl.h>
37#include <linux/ftrace.h> 35#include <linux/ftrace.h>
38#include <linux/uaccess.h> 36#include <linux/uaccess.h>
39#include <linux/io.h> 37#include <linux/io.h>
40#include <linux/kdebug.h> 38#include <linux/kdebug.h>
41#include <linux/cpuidle.h>
42 39
43#include <asm/pgtable.h> 40#include <asm/pgtable.h>
44#include <asm/system.h> 41#include <asm/system.h>
@@ -58,7 +55,6 @@
58#include <asm/idle.h> 55#include <asm/idle.h>
59#include <asm/syscalls.h> 56#include <asm/syscalls.h>
60#include <asm/debugreg.h> 57#include <asm/debugreg.h>
61#include <asm/nmi.h>
62 58
63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 59asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
64 60
@@ -70,60 +66,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
70 return ((unsigned long *)tsk->thread.sp)[3]; 66 return ((unsigned long *)tsk->thread.sp)[3];
71} 67}
72 68
73#ifndef CONFIG_SMP
74static inline void play_dead(void)
75{
76 BUG();
77}
78#endif
79
80/*
81 * The idle thread. There's no useful work to be
82 * done, so just try to conserve power and have a
83 * low exit latency (ie sit in a loop waiting for
84 * somebody to say that they'd like to reschedule)
85 */
86void cpu_idle(void)
87{
88 int cpu = smp_processor_id();
89
90 /*
91 * If we're the non-boot CPU, nothing set the stack canary up
92 * for us. CPU0 already has it initialized but no harm in
93 * doing it again. This is a good place for updating it, as
94 * we wont ever return from this function (so the invalid
95 * canaries already on the stack wont ever trigger).
96 */
97 boot_init_stack_canary();
98
99 current_thread_info()->status |= TS_POLLING;
100
101 /* endless idle loop with no priority at all */
102 while (1) {
103 tick_nohz_idle_enter();
104 rcu_idle_enter();
105 while (!need_resched()) {
106
107 check_pgt_cache();
108 rmb();
109
110 if (cpu_is_offline(cpu))
111 play_dead();
112
113 local_touch_nmi();
114 local_irq_disable();
115 /* Don't trace irqs off for idle */
116 stop_critical_timings();
117 if (cpuidle_idle_call())
118 pm_idle();
119 start_critical_timings();
120 }
121 rcu_idle_exit();
122 tick_nohz_idle_exit();
123 schedule_preempt_disabled();
124 }
125}
126
127void __show_regs(struct pt_regs *regs, int all) 69void __show_regs(struct pt_regs *regs, int all)
128{ 70{
129 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 71 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 292da13fc5aa..ce5e34f2beca 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -14,7 +14,6 @@
14 * This file handles the architecture-dependent parts of process handling.. 14 * This file handles the architecture-dependent parts of process handling..
15 */ 15 */
16 16
17#include <linux/stackprotector.h>
18#include <linux/cpu.h> 17#include <linux/cpu.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/sched.h> 19#include <linux/sched.h>
@@ -32,12 +31,10 @@
32#include <linux/notifier.h> 31#include <linux/notifier.h>
33#include <linux/kprobes.h> 32#include <linux/kprobes.h>
34#include <linux/kdebug.h> 33#include <linux/kdebug.h>
35#include <linux/tick.h>
36#include <linux/prctl.h> 34#include <linux/prctl.h>
37#include <linux/uaccess.h> 35#include <linux/uaccess.h>
38#include <linux/io.h> 36#include <linux/io.h>
39#include <linux/ftrace.h> 37#include <linux/ftrace.h>
40#include <linux/cpuidle.h>
41 38
42#include <asm/pgtable.h> 39#include <asm/pgtable.h>
43#include <asm/system.h> 40#include <asm/system.h>
@@ -52,114 +49,10 @@
52#include <asm/idle.h> 49#include <asm/idle.h>
53#include <asm/syscalls.h> 50#include <asm/syscalls.h>
54#include <asm/debugreg.h> 51#include <asm/debugreg.h>
55#include <asm/nmi.h>
56 52
57asmlinkage extern void ret_from_fork(void); 53asmlinkage extern void ret_from_fork(void);
58 54
59DEFINE_PER_CPU(unsigned long, old_rsp); 55DEFINE_PER_CPU(unsigned long, old_rsp);
60static DEFINE_PER_CPU(unsigned char, is_idle);
61
62static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63
64void idle_notifier_register(struct notifier_block *n)
65{
66 atomic_notifier_chain_register(&idle_notifier, n);
67}
68EXPORT_SYMBOL_GPL(idle_notifier_register);
69
70void idle_notifier_unregister(struct notifier_block *n)
71{
72 atomic_notifier_chain_unregister(&idle_notifier, n);
73}
74EXPORT_SYMBOL_GPL(idle_notifier_unregister);
75
76void enter_idle(void)
77{
78 percpu_write(is_idle, 1);
79 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
80}
81
82static void __exit_idle(void)
83{
84 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
85 return;
86 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
87}
88
89/* Called from interrupts to signify idle end */
90void exit_idle(void)
91{
92 /* idle loop has pid 0 */
93 if (current->pid)
94 return;
95 __exit_idle();
96}
97
98#ifndef CONFIG_SMP
99static inline void play_dead(void)
100{
101 BUG();
102}
103#endif
104
105/*
106 * The idle thread. There's no useful work to be
107 * done, so just try to conserve power and have a
108 * low exit latency (ie sit in a loop waiting for
109 * somebody to say that they'd like to reschedule)
110 */
111void cpu_idle(void)
112{
113 current_thread_info()->status |= TS_POLLING;
114
115 /*
116 * If we're the non-boot CPU, nothing set the stack canary up
117 * for us. CPU0 already has it initialized but no harm in
118 * doing it again. This is a good place for updating it, as
119 * we wont ever return from this function (so the invalid
120 * canaries already on the stack wont ever trigger).
121 */
122 boot_init_stack_canary();
123
124 /* endless idle loop with no priority at all */
125 while (1) {
126 tick_nohz_idle_enter();
127 while (!need_resched()) {
128
129 rmb();
130
131 if (cpu_is_offline(smp_processor_id()))
132 play_dead();
133 /*
134 * Idle routines should keep interrupts disabled
135 * from here on, until they go to idle.
136 * Otherwise, idle callbacks can misfire.
137 */
138 local_touch_nmi();
139 local_irq_disable();
140 enter_idle();
141 /* Don't trace irqs off for idle */
142 stop_critical_timings();
143
144 /* enter_idle() needs rcu for notifiers */
145 rcu_idle_enter();
146
147 if (cpuidle_idle_call())
148 pm_idle();
149
150 rcu_idle_exit();
151 start_critical_timings();
152
153 /* In many cases the interrupt that ended idle
154 has already called exit_idle. But some idle
155 loops can be woken up without interrupt. */
156 __exit_idle();
157 }
158
159 tick_nohz_idle_exit();
160 schedule_preempt_disabled();
161 }
162}
163 56
164/* Prints also some state that isn't saved in the pt_regs */ 57/* Prints also some state that isn't saved in the pt_regs */
165void __show_regs(struct pt_regs *regs, int all) 58void __show_regs(struct pt_regs *regs, int all)