diff options
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/idle.c | 73 |
1 files changed, 57 insertions, 16 deletions
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 27ff2dc093c7..8e61241230cb 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | 22 | ||
23 | static int hlt_counter; | 23 | static int hlt_counter; |
24 | void (*pm_idle)(void); | 24 | void (*pm_idle)(void) = NULL; |
25 | void (*pm_power_off)(void); | 25 | void (*pm_power_off)(void); |
26 | EXPORT_SYMBOL(pm_power_off); | 26 | EXPORT_SYMBOL(pm_power_off); |
27 | 27 | ||
@@ -39,41 +39,68 @@ static int __init hlt_setup(char *__unused) | |||
39 | } | 39 | } |
40 | __setup("hlt", hlt_setup); | 40 | __setup("hlt", hlt_setup); |
41 | 41 | ||
42 | static inline int hlt_works(void) | ||
43 | { | ||
44 | return !hlt_counter; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * On SMP it's slightly faster (but much more power-consuming!) | ||
49 | * to poll the ->work.need_resched flag instead of waiting for the | ||
50 | * cross-CPU IPI to arrive. Use this option with caution. | ||
51 | */ | ||
52 | static void poll_idle(void) | ||
53 | { | ||
54 | local_irq_enable(); | ||
55 | while (!need_resched()) | ||
56 | cpu_relax(); | ||
57 | } | ||
58 | |||
42 | void default_idle(void) | 59 | void default_idle(void) |
43 | { | 60 | { |
44 | if (!hlt_counter) { | 61 | if (hlt_works()) { |
45 | clear_thread_flag(TIF_POLLING_NRFLAG); | 62 | clear_thread_flag(TIF_POLLING_NRFLAG); |
46 | smp_mb__after_clear_bit(); | 63 | smp_mb__after_clear_bit(); |
47 | set_bl_bit(); | ||
48 | stop_critical_timings(); | ||
49 | 64 | ||
50 | while (!need_resched()) | 65 | if (!need_resched()) { |
66 | local_irq_enable(); | ||
51 | cpu_sleep(); | 67 | cpu_sleep(); |
68 | } | ||
52 | 69 | ||
53 | start_critical_timings(); | ||
54 | clear_bl_bit(); | ||
55 | set_thread_flag(TIF_POLLING_NRFLAG); | 70 | set_thread_flag(TIF_POLLING_NRFLAG); |
56 | } else | 71 | } else |
57 | while (!need_resched()) | 72 | poll_idle(); |
58 | cpu_relax(); | ||
59 | } | 73 | } |
60 | 74 | ||
75 | /* | ||
76 | * The idle thread. There's no useful work to be done, so just try to conserve | ||
77 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | ||
78 | * say that they'd like to reschedule) | ||
79 | */ | ||
61 | void cpu_idle(void) | 80 | void cpu_idle(void) |
62 | { | 81 | { |
82 | unsigned int cpu = smp_processor_id(); | ||
83 | |||
63 | set_thread_flag(TIF_POLLING_NRFLAG); | 84 | set_thread_flag(TIF_POLLING_NRFLAG); |
64 | 85 | ||
65 | /* endless idle loop with no priority at all */ | 86 | /* endless idle loop with no priority at all */ |
66 | while (1) { | 87 | while (1) { |
67 | void (*idle)(void) = pm_idle; | 88 | tick_nohz_stop_sched_tick(1); |
68 | 89 | ||
69 | if (!idle) | 90 | while (!need_resched() && cpu_online(cpu)) { |
70 | idle = default_idle; | 91 | local_irq_disable(); |
92 | /* Don't trace irqs off for idle */ | ||
93 | stop_critical_timings(); | ||
94 | pm_idle(); | ||
95 | /* | ||
96 | * Sanity check to ensure that pm_idle() returns | ||
97 | * with IRQs enabled | ||
98 | */ | ||
99 | WARN_ON(irqs_disabled()); | ||
100 | start_critical_timings(); | ||
101 | } | ||
71 | 102 | ||
72 | tick_nohz_stop_sched_tick(1); | ||
73 | while (!need_resched()) | ||
74 | idle(); | ||
75 | tick_nohz_restart_sched_tick(); | 103 | tick_nohz_restart_sched_tick(); |
76 | |||
77 | preempt_enable_no_resched(); | 104 | preempt_enable_no_resched(); |
78 | schedule(); | 105 | schedule(); |
79 | preempt_disable(); | 106 | preempt_disable(); |
@@ -81,6 +108,20 @@ void cpu_idle(void) | |||
81 | } | 108 | } |
82 | } | 109 | } |
83 | 110 | ||
111 | void __cpuinit select_idle_routine(void) | ||
112 | { | ||
113 | /* | ||
114 | * If a platform has set its own idle routine, leave it alone. | ||
115 | */ | ||
116 | if (pm_idle) | ||
117 | return; | ||
118 | |||
119 | if (hlt_works()) | ||
120 | pm_idle = default_idle; | ||
121 | else | ||
122 | pm_idle = poll_idle; | ||
123 | } | ||
124 | |||
84 | static void do_nothing(void *unused) | 125 | static void do_nothing(void *unused) |
85 | { | 126 | { |
86 | } | 127 | } |