diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2013-03-21 17:49:59 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2013-04-08 11:39:28 -0400 |
commit | dc775dd886618a1ea6f092bfb3ddc78660aa1a19 (patch) | |
tree | 601656205d801b13ee0d4bca634244fdb0882e28 /arch/sh/kernel | |
parent | 99444202b924e916aad95073d23f9995ae8eb558 (diff) |
sh: Use generic idle loop
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Magnus Damm <magnus.damm@gmail.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Link: http://lkml.kernel.org/r/20130321215235.216323644@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/idle.c | 101 | ||||
-rw-r--r-- | arch/sh/kernel/smp.c | 2 |
2 files changed, 12 insertions, 91 deletions
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 3d5a1b387cc0..2ea4483fd722 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -24,98 +24,24 @@ | |||
24 | 24 | ||
25 | static void (*sh_idle)(void); | 25 | static void (*sh_idle)(void); |
26 | 26 | ||
27 | static int hlt_counter; | 27 | void default_idle(void) |
28 | |||
29 | static int __init nohlt_setup(char *__unused) | ||
30 | { | ||
31 | hlt_counter = 1; | ||
32 | return 1; | ||
33 | } | ||
34 | __setup("nohlt", nohlt_setup); | ||
35 | |||
36 | static int __init hlt_setup(char *__unused) | ||
37 | { | ||
38 | hlt_counter = 0; | ||
39 | return 1; | ||
40 | } | ||
41 | __setup("hlt", hlt_setup); | ||
42 | |||
43 | static inline int hlt_works(void) | ||
44 | { | ||
45 | return !hlt_counter; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * On SMP it's slightly faster (but much more power-consuming!) | ||
50 | * to poll the ->work.need_resched flag instead of waiting for the | ||
51 | * cross-CPU IPI to arrive. Use this option with caution. | ||
52 | */ | ||
53 | static void poll_idle(void) | ||
54 | { | 28 | { |
29 | set_bl_bit(); | ||
55 | local_irq_enable(); | 30 | local_irq_enable(); |
56 | while (!need_resched()) | 31 | /* Isn't this racy ? */ |
57 | cpu_relax(); | 32 | cpu_sleep(); |
33 | clear_bl_bit(); | ||
58 | } | 34 | } |
59 | 35 | ||
60 | void default_idle(void) | 36 | void arch_cpu_idle_dead(void) |
61 | { | 37 | { |
62 | if (hlt_works()) { | 38 | play_dead(); |
63 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
64 | smp_mb__after_clear_bit(); | ||
65 | |||
66 | set_bl_bit(); | ||
67 | if (!need_resched()) { | ||
68 | local_irq_enable(); | ||
69 | cpu_sleep(); | ||
70 | } else | ||
71 | local_irq_enable(); | ||
72 | |||
73 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
74 | clear_bl_bit(); | ||
75 | } else | ||
76 | poll_idle(); | ||
77 | } | 39 | } |
78 | 40 | ||
79 | /* | 41 | void arch_cpu_idle(void) |
80 | * The idle thread. There's no useful work to be done, so just try to conserve | ||
81 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | ||
82 | * say that they'd like to reschedule) | ||
83 | */ | ||
84 | void cpu_idle(void) | ||
85 | { | 42 | { |
86 | unsigned int cpu = smp_processor_id(); | 43 | if (cpuidle_idle_call()) |
87 | 44 | sh_idle(); | |
88 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
89 | |||
90 | /* endless idle loop with no priority at all */ | ||
91 | while (1) { | ||
92 | tick_nohz_idle_enter(); | ||
93 | rcu_idle_enter(); | ||
94 | |||
95 | while (!need_resched()) { | ||
96 | check_pgt_cache(); | ||
97 | rmb(); | ||
98 | |||
99 | if (cpu_is_offline(cpu)) | ||
100 | play_dead(); | ||
101 | |||
102 | local_irq_disable(); | ||
103 | /* Don't trace irqs off for idle */ | ||
104 | stop_critical_timings(); | ||
105 | if (cpuidle_idle_call()) | ||
106 | sh_idle(); | ||
107 | /* | ||
108 | * Sanity check to ensure that sh_idle() returns | ||
109 | * with IRQs enabled | ||
110 | */ | ||
111 | WARN_ON(irqs_disabled()); | ||
112 | start_critical_timings(); | ||
113 | } | ||
114 | |||
115 | rcu_idle_exit(); | ||
116 | tick_nohz_idle_exit(); | ||
117 | schedule_preempt_disabled(); | ||
118 | } | ||
119 | } | 45 | } |
120 | 46 | ||
121 | void __init select_idle_routine(void) | 47 | void __init select_idle_routine(void) |
@@ -123,13 +49,8 @@ void __init select_idle_routine(void) | |||
123 | /* | 49 | /* |
124 | * If a platform has set its own idle routine, leave it alone. | 50 | * If a platform has set its own idle routine, leave it alone. |
125 | */ | 51 | */ |
126 | if (sh_idle) | 52 | if (!sh_idle) |
127 | return; | ||
128 | |||
129 | if (hlt_works()) | ||
130 | sh_idle = default_idle; | 53 | sh_idle = default_idle; |
131 | else | ||
132 | sh_idle = poll_idle; | ||
133 | } | 54 | } |
134 | 55 | ||
135 | void stop_this_cpu(void *unused) | 56 | void stop_this_cpu(void *unused) |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 2062aa88af41..45696451f0ea 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void) | |||
203 | set_cpu_online(cpu, true); | 203 | set_cpu_online(cpu, true); |
204 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | 204 | per_cpu(cpu_state, cpu) = CPU_ONLINE; |
205 | 205 | ||
206 | cpu_idle(); | 206 | cpu_startup_entry(CPUHP_ONLINE); |
207 | } | 207 | } |
208 | 208 | ||
209 | extern struct { | 209 | extern struct { |