diff options
Diffstat (limited to 'arch/sh/kernel/idle.c')
-rw-r--r-- | arch/sh/kernel/idle.c | 92 |
1 files changed, 73 insertions, 19 deletions
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 27ff2dc093c7..273f890b17ae 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -20,10 +20,9 @@ | |||
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | 22 | ||
23 | void (*pm_idle)(void) = NULL; | ||
24 | |||
23 | static int hlt_counter; | 25 | static int hlt_counter; |
24 | void (*pm_idle)(void); | ||
25 | void (*pm_power_off)(void); | ||
26 | EXPORT_SYMBOL(pm_power_off); | ||
27 | 26 | ||
28 | static int __init nohlt_setup(char *__unused) | 27 | static int __init nohlt_setup(char *__unused) |
29 | { | 28 | { |
@@ -39,52 +38,107 @@ static int __init hlt_setup(char *__unused) | |||
39 | } | 38 | } |
40 | __setup("hlt", hlt_setup); | 39 | __setup("hlt", hlt_setup); |
41 | 40 | ||
41 | static inline int hlt_works(void) | ||
42 | { | ||
43 | return !hlt_counter; | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * On SMP it's slightly faster (but much more power-consuming!) | ||
48 | * to poll the ->work.need_resched flag instead of waiting for the | ||
49 | * cross-CPU IPI to arrive. Use this option with caution. | ||
50 | */ | ||
51 | static void poll_idle(void) | ||
52 | { | ||
53 | local_irq_enable(); | ||
54 | while (!need_resched()) | ||
55 | cpu_relax(); | ||
56 | } | ||
57 | |||
42 | void default_idle(void) | 58 | void default_idle(void) |
43 | { | 59 | { |
44 | if (!hlt_counter) { | 60 | if (hlt_works()) { |
45 | clear_thread_flag(TIF_POLLING_NRFLAG); | 61 | clear_thread_flag(TIF_POLLING_NRFLAG); |
46 | smp_mb__after_clear_bit(); | 62 | smp_mb__after_clear_bit(); |
47 | set_bl_bit(); | ||
48 | stop_critical_timings(); | ||
49 | 63 | ||
50 | while (!need_resched()) | 64 | set_bl_bit(); |
65 | if (!need_resched()) { | ||
66 | local_irq_enable(); | ||
51 | cpu_sleep(); | 67 | cpu_sleep(); |
68 | } else | ||
69 | local_irq_enable(); | ||
52 | 70 | ||
53 | start_critical_timings(); | ||
54 | clear_bl_bit(); | ||
55 | set_thread_flag(TIF_POLLING_NRFLAG); | 71 | set_thread_flag(TIF_POLLING_NRFLAG); |
72 | clear_bl_bit(); | ||
56 | } else | 73 | } else |
57 | while (!need_resched()) | 74 | poll_idle(); |
58 | cpu_relax(); | ||
59 | } | 75 | } |
60 | 76 | ||
77 | /* | ||
78 | * The idle thread. There's no useful work to be done, so just try to conserve | ||
79 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | ||
80 | * say that they'd like to reschedule) | ||
81 | */ | ||
61 | void cpu_idle(void) | 82 | void cpu_idle(void) |
62 | { | 83 | { |
84 | unsigned int cpu = smp_processor_id(); | ||
85 | |||
63 | set_thread_flag(TIF_POLLING_NRFLAG); | 86 | set_thread_flag(TIF_POLLING_NRFLAG); |
64 | 87 | ||
65 | /* endless idle loop with no priority at all */ | 88 | /* endless idle loop with no priority at all */ |
66 | while (1) { | 89 | while (1) { |
67 | void (*idle)(void) = pm_idle; | 90 | tick_nohz_stop_sched_tick(1); |
68 | 91 | ||
69 | if (!idle) | 92 | while (!need_resched() && cpu_online(cpu)) { |
70 | idle = default_idle; | 93 | check_pgt_cache(); |
94 | rmb(); | ||
71 | 95 | ||
72 | tick_nohz_stop_sched_tick(1); | 96 | local_irq_disable(); |
73 | while (!need_resched()) | 97 | /* Don't trace irqs off for idle */ |
74 | idle(); | 98 | stop_critical_timings(); |
75 | tick_nohz_restart_sched_tick(); | 99 | pm_idle(); |
100 | /* | ||
101 | * Sanity check to ensure that pm_idle() returns | ||
102 | * with IRQs enabled | ||
103 | */ | ||
104 | WARN_ON(irqs_disabled()); | ||
105 | start_critical_timings(); | ||
106 | } | ||
76 | 107 | ||
108 | tick_nohz_restart_sched_tick(); | ||
77 | preempt_enable_no_resched(); | 109 | preempt_enable_no_resched(); |
78 | schedule(); | 110 | schedule(); |
79 | preempt_disable(); | 111 | preempt_disable(); |
80 | check_pgt_cache(); | ||
81 | } | 112 | } |
82 | } | 113 | } |
83 | 114 | ||
115 | void __init select_idle_routine(void) | ||
116 | { | ||
117 | /* | ||
118 | * If a platform has set its own idle routine, leave it alone. | ||
119 | */ | ||
120 | if (pm_idle) | ||
121 | return; | ||
122 | |||
123 | if (hlt_works()) | ||
124 | pm_idle = default_idle; | ||
125 | else | ||
126 | pm_idle = poll_idle; | ||
127 | } | ||
128 | |||
84 | static void do_nothing(void *unused) | 129 | static void do_nothing(void *unused) |
85 | { | 130 | { |
86 | } | 131 | } |
87 | 132 | ||
133 | void stop_this_cpu(void *unused) | ||
134 | { | ||
135 | local_irq_disable(); | ||
136 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
137 | |||
138 | for (;;) | ||
139 | cpu_sleep(); | ||
140 | } | ||
141 | |||
88 | /* | 142 | /* |
89 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | 143 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of |
90 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | 144 | * pm_idle and update to new pm_idle value. Required while changing pm_idle |