aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_32.c
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2012-03-25 17:00:04 -0400
committerIngo Molnar <mingo@kernel.org>2012-03-25 21:16:07 -0400
commit90e240142bd31ff10aeda5a280a53153f4eff004 (patch)
treeae612bacc2cfd2fedc4c5c22c29dd326f8b93ae7 /arch/x86/kernel/process_32.c
parentf5243d6de7ae232e1d81e44ae9756bbd8c988fcd (diff)
x86: Merge the x86_32 and x86_64 cpu_idle() functions
Both functions are mostly identical. The differences are: - x86_32's cpu_idle() makes use of check_pgt_cache(), which is a nop on both x86_32 and x86_64. - x86_64's cpu_idle() uses enter/__exit_idle/(), on x86_32 these function are a nop. - In contrast to x86_32, x86_64 calls rcu_idle_enter/exit() in the innermost loop because idle notifications need RCU. Calling these function on x86_32 also in the innermost loop does not hurt. So we can merge both functions. Signed-off-by: Richard Weinberger <richard@nod.at> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: paulmck@linux.vnet.ibm.com Cc: josh@joshtriplett.org Cc: tj@kernel.org Link: http://lkml.kernel.org/r/1332709204-22496-1-git-send-email-richard@nod.at Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r--arch/x86/kernel/process_32.c58
1 files changed, 0 insertions, 58 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9d7d4842bfaf..ea207c245aa4 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -9,7 +9,6 @@
9 * This file handles the architecture-dependent parts of process handling.. 9 * This file handles the architecture-dependent parts of process handling..
10 */ 10 */
11 11
12#include <linux/stackprotector.h>
13#include <linux/cpu.h> 12#include <linux/cpu.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/sched.h> 14#include <linux/sched.h>
@@ -31,14 +30,12 @@
31#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
32#include <linux/ptrace.h> 31#include <linux/ptrace.h>
33#include <linux/personality.h> 32#include <linux/personality.h>
34#include <linux/tick.h>
35#include <linux/percpu.h> 33#include <linux/percpu.h>
36#include <linux/prctl.h> 34#include <linux/prctl.h>
37#include <linux/ftrace.h> 35#include <linux/ftrace.h>
38#include <linux/uaccess.h> 36#include <linux/uaccess.h>
39#include <linux/io.h> 37#include <linux/io.h>
40#include <linux/kdebug.h> 38#include <linux/kdebug.h>
41#include <linux/cpuidle.h>
42 39
43#include <asm/pgtable.h> 40#include <asm/pgtable.h>
44#include <asm/system.h> 41#include <asm/system.h>
@@ -58,7 +55,6 @@
58#include <asm/idle.h> 55#include <asm/idle.h>
59#include <asm/syscalls.h> 56#include <asm/syscalls.h>
60#include <asm/debugreg.h> 57#include <asm/debugreg.h>
61#include <asm/nmi.h>
62 58
63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 59asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
64 60
@@ -70,60 +66,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
70 return ((unsigned long *)tsk->thread.sp)[3]; 66 return ((unsigned long *)tsk->thread.sp)[3];
71} 67}
72 68
73#ifndef CONFIG_SMP
74static inline void play_dead(void)
75{
76 BUG();
77}
78#endif
79
80/*
81 * The idle thread. There's no useful work to be
82 * done, so just try to conserve power and have a
83 * low exit latency (ie sit in a loop waiting for
84 * somebody to say that they'd like to reschedule)
85 */
86void cpu_idle(void)
87{
88 int cpu = smp_processor_id();
89
90 /*
91 * If we're the non-boot CPU, nothing set the stack canary up
92 * for us. CPU0 already has it initialized but no harm in
93 * doing it again. This is a good place for updating it, as
94 * we wont ever return from this function (so the invalid
95 * canaries already on the stack wont ever trigger).
96 */
97 boot_init_stack_canary();
98
99 current_thread_info()->status |= TS_POLLING;
100
101 /* endless idle loop with no priority at all */
102 while (1) {
103 tick_nohz_idle_enter();
104 rcu_idle_enter();
105 while (!need_resched()) {
106
107 check_pgt_cache();
108 rmb();
109
110 if (cpu_is_offline(cpu))
111 play_dead();
112
113 local_touch_nmi();
114 local_irq_disable();
115 /* Don't trace irqs off for idle */
116 stop_critical_timings();
117 if (cpuidle_idle_call())
118 pm_idle();
119 start_critical_timings();
120 }
121 rcu_idle_exit();
122 tick_nohz_idle_exit();
123 schedule_preempt_disabled();
124 }
125}
126
127void __show_regs(struct pt_regs *regs, int all) 69void __show_regs(struct pt_regs *regs, int all)
128{ 70{
129 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 71 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;