diff options
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r-- | arch/x86/kernel/process_32.c | 118 |
1 files changed, 4 insertions, 114 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 77de848bd1fb..f8476dfbb60d 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -111,12 +111,10 @@ void default_idle(void) | |||
111 | */ | 111 | */ |
112 | smp_mb(); | 112 | smp_mb(); |
113 | 113 | ||
114 | local_irq_disable(); | 114 | if (!need_resched()) |
115 | if (!need_resched()) { | ||
116 | safe_halt(); /* enables interrupts racelessly */ | 115 | safe_halt(); /* enables interrupts racelessly */ |
117 | local_irq_disable(); | 116 | else |
118 | } | 117 | local_irq_enable(); |
119 | local_irq_enable(); | ||
120 | current_thread_info()->status |= TS_POLLING; | 118 | current_thread_info()->status |= TS_POLLING; |
121 | } else { | 119 | } else { |
122 | local_irq_enable(); | 120 | local_irq_enable(); |
@@ -128,17 +126,6 @@ void default_idle(void) | |||
128 | EXPORT_SYMBOL(default_idle); | 126 | EXPORT_SYMBOL(default_idle); |
129 | #endif | 127 | #endif |
130 | 128 | ||
131 | /* | ||
132 | * On SMP it's slightly faster (but much more power-consuming!) | ||
133 | * to poll the ->work.need_resched flag instead of waiting for the | ||
134 | * cross-CPU IPI to arrive. Use this option with caution. | ||
135 | */ | ||
136 | static void poll_idle(void) | ||
137 | { | ||
138 | local_irq_enable(); | ||
139 | cpu_relax(); | ||
140 | } | ||
141 | |||
142 | #ifdef CONFIG_HOTPLUG_CPU | 129 | #ifdef CONFIG_HOTPLUG_CPU |
143 | #include <asm/nmi.h> | 130 | #include <asm/nmi.h> |
144 | /* We don't actually take CPU down, just spin without interrupts. */ | 131 | /* We don't actually take CPU down, just spin without interrupts. */ |
@@ -196,6 +183,7 @@ void cpu_idle(void) | |||
196 | if (cpu_is_offline(cpu)) | 183 | if (cpu_is_offline(cpu)) |
197 | play_dead(); | 184 | play_dead(); |
198 | 185 | ||
186 | local_irq_disable(); | ||
199 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | 187 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
200 | idle(); | 188 | idle(); |
201 | } | 189 | } |
@@ -206,104 +194,6 @@ void cpu_idle(void) | |||
206 | } | 194 | } |
207 | } | 195 | } |
208 | 196 | ||
209 | static void do_nothing(void *unused) | ||
210 | { | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | ||
215 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | ||
216 | * handler on SMP systems. | ||
217 | * | ||
218 | * Caller must have changed pm_idle to the new value before the call. Old | ||
219 | * pm_idle value will not be used by any CPU after the return of this function. | ||
220 | */ | ||
221 | void cpu_idle_wait(void) | ||
222 | { | ||
223 | smp_mb(); | ||
224 | /* kick all the CPUs so that they exit out of pm_idle */ | ||
225 | smp_call_function(do_nothing, NULL, 0, 1); | ||
226 | } | ||
227 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | ||
228 | |||
229 | /* | ||
230 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||
231 | * which can obviate IPI to trigger checking of need_resched. | ||
232 | * We execute MONITOR against need_resched and enter optimized wait state | ||
233 | * through MWAIT. Whenever someone changes need_resched, we would be woken | ||
234 | * up from MWAIT (without an IPI). | ||
235 | * | ||
236 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
237 | * capability. | ||
238 | */ | ||
239 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | ||
240 | { | ||
241 | if (!need_resched()) { | ||
242 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
243 | smp_mb(); | ||
244 | if (!need_resched()) | ||
245 | __sti_mwait(ax, cx); | ||
246 | else | ||
247 | local_irq_enable(); | ||
248 | } else | ||
249 | local_irq_enable(); | ||
250 | } | ||
251 | |||
252 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | ||
253 | static void mwait_idle(void) | ||
254 | { | ||
255 | local_irq_enable(); | ||
256 | mwait_idle_with_hints(0, 0); | ||
257 | } | ||
258 | |||
259 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
260 | { | ||
261 | if (force_mwait) | ||
262 | return 1; | ||
263 | /* Any C1 states supported? */ | ||
264 | return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; | ||
265 | } | ||
266 | |||
267 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | ||
268 | { | ||
269 | static int selected; | ||
270 | |||
271 | if (selected) | ||
272 | return; | ||
273 | #ifdef CONFIG_X86_SMP | ||
274 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | ||
275 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," | ||
276 | " performance may degrade.\n"); | ||
277 | } | ||
278 | #endif | ||
279 | if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { | ||
280 | /* | ||
281 | * Skip, if setup has overridden idle. | ||
282 | * One CPU supports mwait => All CPUs supports mwait | ||
283 | */ | ||
284 | if (!pm_idle) { | ||
285 | printk(KERN_INFO "using mwait in idle threads.\n"); | ||
286 | pm_idle = mwait_idle; | ||
287 | } | ||
288 | } | ||
289 | selected = 1; | ||
290 | } | ||
291 | |||
292 | static int __init idle_setup(char *str) | ||
293 | { | ||
294 | if (!strcmp(str, "poll")) { | ||
295 | printk("using polling idle threads.\n"); | ||
296 | pm_idle = poll_idle; | ||
297 | } else if (!strcmp(str, "mwait")) | ||
298 | force_mwait = 1; | ||
299 | else | ||
300 | return -1; | ||
301 | |||
302 | boot_option_idle_override = 1; | ||
303 | return 0; | ||
304 | } | ||
305 | early_param("idle", idle_setup); | ||
306 | |||
307 | void __show_registers(struct pt_regs *regs, int all) | 197 | void __show_registers(struct pt_regs *regs, int all) |
308 | { | 198 | { |
309 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | 199 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |