aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c105
1 files changed, 27 insertions, 78 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14ae10031ff0..6833bffaadb7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -301,13 +301,7 @@ void exit_idle(void)
301} 301}
302#endif 302#endif
303 303
304/* 304void arch_cpu_idle_prepare(void)
305 * The idle thread. There's no useful work to be
306 * done, so just try to conserve power and have a
307 * low exit latency (ie sit in a loop waiting for
308 * somebody to say that they'd like to reschedule)
309 */
310void cpu_idle(void)
311{ 305{
312 /* 306 /*
313 * If we're the non-boot CPU, nothing set the stack canary up 307 * If we're the non-boot CPU, nothing set the stack canary up
@@ -317,71 +311,40 @@ void cpu_idle(void)
317 * canaries already on the stack wont ever trigger). 311 * canaries already on the stack wont ever trigger).
318 */ 312 */
319 boot_init_stack_canary(); 313 boot_init_stack_canary();
320 current_thread_info()->status |= TS_POLLING; 314}
321
322 while (1) {
323 tick_nohz_idle_enter();
324
325 while (!need_resched()) {
326 rmb();
327
328 if (cpu_is_offline(smp_processor_id()))
329 play_dead();
330
331 /*
332 * Idle routines should keep interrupts disabled
333 * from here on, until they go to idle.
334 * Otherwise, idle callbacks can misfire.
335 */
336 local_touch_nmi();
337 local_irq_disable();
338
339 enter_idle();
340
341 /* Don't trace irqs off for idle */
342 stop_critical_timings();
343
344 /* enter_idle() needs rcu for notifiers */
345 rcu_idle_enter();
346 315
347 if (cpuidle_idle_call()) 316void arch_cpu_idle_enter(void)
348 x86_idle(); 317{
318 local_touch_nmi();
319 enter_idle();
320}
349 321
350 rcu_idle_exit(); 322void arch_cpu_idle_exit(void)
351 start_critical_timings(); 323{
324 __exit_idle();
325}
352 326
353 /* In many cases the interrupt that ended idle 327void arch_cpu_idle_dead(void)
354 has already called exit_idle. But some idle 328{
355 loops can be woken up without interrupt. */ 329 play_dead();
356 __exit_idle(); 330}
357 }
358 331
359 tick_nohz_idle_exit(); 332/*
360 preempt_enable_no_resched(); 333 * Called from the generic idle code.
361 schedule(); 334 */
362 preempt_disable(); 335void arch_cpu_idle(void)
363 } 336{
337 if (cpuidle_idle_call())
338 x86_idle();
364} 339}
365 340
366/* 341/*
367 * We use this if we don't have any better 342 * We use this if we don't have any better idle routine..
368 * idle routine..
369 */ 343 */
370void default_idle(void) 344void default_idle(void)
371{ 345{
372 trace_cpu_idle_rcuidle(1, smp_processor_id()); 346 trace_cpu_idle_rcuidle(1, smp_processor_id());
373 current_thread_info()->status &= ~TS_POLLING; 347 safe_halt();
374 /*
375 * TS_POLLING-cleared state must be visible before we
376 * test NEED_RESCHED:
377 */
378 smp_mb();
379
380 if (!need_resched())
381 safe_halt(); /* enables interrupts racelessly */
382 else
383 local_irq_enable();
384 current_thread_info()->status |= TS_POLLING;
385 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 348 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
386} 349}
387#ifdef CONFIG_APM_MODULE 350#ifdef CONFIG_APM_MODULE
@@ -411,20 +374,6 @@ void stop_this_cpu(void *dummy)
411 halt(); 374 halt();
412} 375}
413 376
414/*
415 * On SMP it's slightly faster (but much more power-consuming!)
416 * to poll the ->work.need_resched flag instead of waiting for the
417 * cross-CPU IPI to arrive. Use this option with caution.
418 */
419static void poll_idle(void)
420{
421 trace_cpu_idle_rcuidle(0, smp_processor_id());
422 local_irq_enable();
423 while (!need_resched())
424 cpu_relax();
425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
426}
427
428bool amd_e400_c1e_detected; 377bool amd_e400_c1e_detected;
429EXPORT_SYMBOL(amd_e400_c1e_detected); 378EXPORT_SYMBOL(amd_e400_c1e_detected);
430 379
@@ -489,10 +438,10 @@ static void amd_e400_idle(void)
489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 438void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
490{ 439{
491#ifdef CONFIG_SMP 440#ifdef CONFIG_SMP
492 if (x86_idle == poll_idle && smp_num_siblings > 1) 441 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 442 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
494#endif 443#endif
495 if (x86_idle) 444 if (x86_idle || boot_option_idle_override == IDLE_POLL)
496 return; 445 return;
497 446
498 if (cpu_has_amd_erratum(amd_erratum_400)) { 447 if (cpu_has_amd_erratum(amd_erratum_400)) {
@@ -517,8 +466,8 @@ static int __init idle_setup(char *str)
517 466
518 if (!strcmp(str, "poll")) { 467 if (!strcmp(str, "poll")) {
519 pr_info("using polling idle threads\n"); 468 pr_info("using polling idle threads\n");
520 x86_idle = poll_idle;
521 boot_option_idle_override = IDLE_POLL; 469 boot_option_idle_override = IDLE_POLL;
470 cpu_idle_poll_ctrl(true);
522 } else if (!strcmp(str, "halt")) { 471 } else if (!strcmp(str, "halt")) {
523 /* 472 /*
524 * When the boot option of idle=halt is added, halt is 473 * When the boot option of idle=halt is added, halt is