aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-03-21 17:49:59 -0400
committerThomas Gleixner <tglx@linutronix.de>2013-04-08 11:39:28 -0400
commitdc775dd886618a1ea6f092bfb3ddc78660aa1a19 (patch)
tree601656205d801b13ee0d4bca634244fdb0882e28
parent99444202b924e916aad95073d23f9995ae8eb558 (diff)
sh: Use generic idle loop
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Magnus Damm <magnus.damm@gmail.com> Cc: Paul Mundt <lethal@linux-sh.org> Link: http://lkml.kernel.org/r/20130321215235.216323644@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/kernel/idle.c101
-rw-r--r--arch/sh/kernel/smp.c2
3 files changed, 14 insertions, 91 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5e859633ce69..853cbc971cdf 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -33,6 +33,8 @@ config SUPERH
33 select GENERIC_ATOMIC64 33 select GENERIC_ATOMIC64
34 select GENERIC_IRQ_SHOW 34 select GENERIC_IRQ_SHOW
35 select GENERIC_SMP_IDLE_THREAD 35 select GENERIC_SMP_IDLE_THREAD
36 select GENERIC_IDLE_LOOP
37 select GENERIC_IDLE_POLL_SETUP
36 select GENERIC_CLOCKEVENTS 38 select GENERIC_CLOCKEVENTS
37 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST 39 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
38 select GENERIC_STRNCPY_FROM_USER 40 select GENERIC_STRNCPY_FROM_USER
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 3d5a1b387cc0..2ea4483fd722 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -24,98 +24,24 @@
24 24
25static void (*sh_idle)(void); 25static void (*sh_idle)(void);
26 26
27static int hlt_counter; 27void default_idle(void)
28
29static int __init nohlt_setup(char *__unused)
30{
31 hlt_counter = 1;
32 return 1;
33}
34__setup("nohlt", nohlt_setup);
35
36static int __init hlt_setup(char *__unused)
37{
38 hlt_counter = 0;
39 return 1;
40}
41__setup("hlt", hlt_setup);
42
43static inline int hlt_works(void)
44{
45 return !hlt_counter;
46}
47
48/*
49 * On SMP it's slightly faster (but much more power-consuming!)
50 * to poll the ->work.need_resched flag instead of waiting for the
51 * cross-CPU IPI to arrive. Use this option with caution.
52 */
53static void poll_idle(void)
54{ 28{
29 set_bl_bit();
55 local_irq_enable(); 30 local_irq_enable();
56 while (!need_resched()) 31 /* Isn't this racy ? */
57 cpu_relax(); 32 cpu_sleep();
33 clear_bl_bit();
58} 34}
59 35
60void default_idle(void) 36void arch_cpu_idle_dead(void)
61{ 37{
62 if (hlt_works()) { 38 play_dead();
63 clear_thread_flag(TIF_POLLING_NRFLAG);
64 smp_mb__after_clear_bit();
65
66 set_bl_bit();
67 if (!need_resched()) {
68 local_irq_enable();
69 cpu_sleep();
70 } else
71 local_irq_enable();
72
73 set_thread_flag(TIF_POLLING_NRFLAG);
74 clear_bl_bit();
75 } else
76 poll_idle();
77} 39}
78 40
79/* 41void arch_cpu_idle(void)
80 * The idle thread. There's no useful work to be done, so just try to conserve
81 * power and have a low exit latency (ie sit in a loop waiting for somebody to
82 * say that they'd like to reschedule)
83 */
84void cpu_idle(void)
85{ 42{
86 unsigned int cpu = smp_processor_id(); 43 if (cpuidle_idle_call())
87 44 sh_idle();
88 set_thread_flag(TIF_POLLING_NRFLAG);
89
90 /* endless idle loop with no priority at all */
91 while (1) {
92 tick_nohz_idle_enter();
93 rcu_idle_enter();
94
95 while (!need_resched()) {
96 check_pgt_cache();
97 rmb();
98
99 if (cpu_is_offline(cpu))
100 play_dead();
101
102 local_irq_disable();
103 /* Don't trace irqs off for idle */
104 stop_critical_timings();
105 if (cpuidle_idle_call())
106 sh_idle();
107 /*
108 * Sanity check to ensure that sh_idle() returns
109 * with IRQs enabled
110 */
111 WARN_ON(irqs_disabled());
112 start_critical_timings();
113 }
114
115 rcu_idle_exit();
116 tick_nohz_idle_exit();
117 schedule_preempt_disabled();
118 }
119} 45}
120 46
121void __init select_idle_routine(void) 47void __init select_idle_routine(void)
@@ -123,13 +49,8 @@ void __init select_idle_routine(void)
123 /* 49 /*
124 * If a platform has set its own idle routine, leave it alone. 50 * If a platform has set its own idle routine, leave it alone.
125 */ 51 */
126 if (sh_idle) 52 if (!sh_idle)
127 return;
128
129 if (hlt_works())
130 sh_idle = default_idle; 53 sh_idle = default_idle;
131 else
132 sh_idle = poll_idle;
133} 54}
134 55
135void stop_this_cpu(void *unused) 56void stop_this_cpu(void *unused)
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 2062aa88af41..45696451f0ea 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void)
203 set_cpu_online(cpu, true); 203 set_cpu_online(cpu, true);
204 per_cpu(cpu_state, cpu) = CPU_ONLINE; 204 per_cpu(cpu_state, cpu) = CPU_ONLINE;
205 205
206 cpu_idle(); 206 cpu_startup_entry(CPUHP_ONLINE);
207} 207}
208 208
209extern struct { 209extern struct {