aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/idle.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-10-16 04:20:58 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-10-16 04:20:58 -0400
commitf533c3d340536198a4889a42a68d6c0d79a504e7 (patch)
tree1e45cd7687b4b0eb9eca5339d92c79abae5db0b5 /arch/sh/kernel/idle.c
parent94eab0bb206443dd7480349804f64e2bba8dc6e1 (diff)
sh: Idle loop chainsawing for SMP-based light sleep.
This does a bit of chainsawing of the idle loop code to get light sleep working on SMP. Previously this was forcing secondary CPUs in to sleep mode with them not coming back if they didn't have their own local timers. Given that we use clockevents broadcasting by default, the CPU managing the clockevents can't have IRQs disabled before entering its sleep state. This unfortunately leaves us with the age-old need_resched() race in between local_irq_enable() and cpu_sleep(), but at present this is unavoidable. After some more experimentation it may be possible to layer on SR.BL bit manipulation over top of this scheme to inhibit the race condition, but given the current potential for missing wakeups, this is left as a future exercise. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/idle.c')
-rw-r--r--arch/sh/kernel/idle.c73
1 files changed, 57 insertions, 16 deletions
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 27ff2dc093c7..8e61241230cb 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -21,7 +21,7 @@
21#include <asm/atomic.h> 21#include <asm/atomic.h>
22 22
23static int hlt_counter; 23static int hlt_counter;
24void (*pm_idle)(void); 24void (*pm_idle)(void) = NULL;
25void (*pm_power_off)(void); 25void (*pm_power_off)(void);
26EXPORT_SYMBOL(pm_power_off); 26EXPORT_SYMBOL(pm_power_off);
27 27
@@ -39,41 +39,68 @@ static int __init hlt_setup(char *__unused)
39} 39}
40__setup("hlt", hlt_setup); 40__setup("hlt", hlt_setup);
41 41
42static inline int hlt_works(void)
43{
44 return !hlt_counter;
45}
46
47/*
48 * On SMP it's slightly faster (but much more power-consuming!)
49 * to poll the ->work.need_resched flag instead of waiting for the
50 * cross-CPU IPI to arrive. Use this option with caution.
51 */
52static void poll_idle(void)
53{
54 local_irq_enable();
55 while (!need_resched())
56 cpu_relax();
57}
58
42void default_idle(void) 59void default_idle(void)
43{ 60{
44 if (!hlt_counter) { 61 if (hlt_works()) {
45 clear_thread_flag(TIF_POLLING_NRFLAG); 62 clear_thread_flag(TIF_POLLING_NRFLAG);
46 smp_mb__after_clear_bit(); 63 smp_mb__after_clear_bit();
47 set_bl_bit();
48 stop_critical_timings();
49 64
50 while (!need_resched()) 65 if (!need_resched()) {
66 local_irq_enable();
51 cpu_sleep(); 67 cpu_sleep();
68 }
52 69
53 start_critical_timings();
54 clear_bl_bit();
55 set_thread_flag(TIF_POLLING_NRFLAG); 70 set_thread_flag(TIF_POLLING_NRFLAG);
56 } else 71 } else
57 while (!need_resched()) 72 poll_idle();
58 cpu_relax();
59} 73}
60 74
75/*
76 * The idle thread. There's no useful work to be done, so just try to conserve
77 * power and have a low exit latency (ie sit in a loop waiting for somebody to
78 * say that they'd like to reschedule)
79 */
61void cpu_idle(void) 80void cpu_idle(void)
62{ 81{
82 unsigned int cpu = smp_processor_id();
83
63 set_thread_flag(TIF_POLLING_NRFLAG); 84 set_thread_flag(TIF_POLLING_NRFLAG);
64 85
65 /* endless idle loop with no priority at all */ 86 /* endless idle loop with no priority at all */
66 while (1) { 87 while (1) {
67 void (*idle)(void) = pm_idle; 88 tick_nohz_stop_sched_tick(1);
68 89
69 if (!idle) 90 while (!need_resched() && cpu_online(cpu)) {
70 idle = default_idle; 91 local_irq_disable();
92 /* Don't trace irqs off for idle */
93 stop_critical_timings();
94 pm_idle();
95 /*
96 * Sanity check to ensure that pm_idle() returns
97 * with IRQs enabled
98 */
99 WARN_ON(irqs_disabled());
100 start_critical_timings();
101 }
71 102
72 tick_nohz_stop_sched_tick(1);
73 while (!need_resched())
74 idle();
75 tick_nohz_restart_sched_tick(); 103 tick_nohz_restart_sched_tick();
76
77 preempt_enable_no_resched(); 104 preempt_enable_no_resched();
78 schedule(); 105 schedule();
79 preempt_disable(); 106 preempt_disable();
@@ -81,6 +108,20 @@ void cpu_idle(void)
81 } 108 }
82} 109}
83 110
111void __cpuinit select_idle_routine(void)
112{
113 /*
114 * If a platform has set its own idle routine, leave it alone.
115 */
116 if (pm_idle)
117 return;
118
119 if (hlt_works())
120 pm_idle = default_idle;
121 else
122 pm_idle = poll_idle;
123}
124
84static void do_nothing(void *unused) 125static void do_nothing(void *unused)
85{ 126{
86} 127}