aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c116
1 files changed, 17 insertions, 99 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index dcfc1f410dc4..14ae10031ff0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
269EXPORT_SYMBOL(boot_option_idle_override); 269EXPORT_SYMBOL(boot_option_idle_override);
270 270
271/* 271static void (*x86_idle)(void);
272 * Powermanagement idle function, if any..
273 */
274void (*pm_idle)(void);
275#ifdef CONFIG_APM_MODULE
276EXPORT_SYMBOL(pm_idle);
277#endif
278 272
279#ifndef CONFIG_SMP 273#ifndef CONFIG_SMP
280static inline void play_dead(void) 274static inline void play_dead(void)
@@ -351,7 +345,7 @@ void cpu_idle(void)
351 rcu_idle_enter(); 345 rcu_idle_enter();
352 346
353 if (cpuidle_idle_call()) 347 if (cpuidle_idle_call())
354 pm_idle(); 348 x86_idle();
355 349
356 rcu_idle_exit(); 350 rcu_idle_exit();
357 start_critical_timings(); 351 start_critical_timings();
@@ -394,14 +388,16 @@ void default_idle(void)
394EXPORT_SYMBOL(default_idle); 388EXPORT_SYMBOL(default_idle);
395#endif 389#endif
396 390
397bool set_pm_idle_to_default(void) 391#ifdef CONFIG_XEN
392bool xen_set_default_idle(void)
398{ 393{
399 bool ret = !!pm_idle; 394 bool ret = !!x86_idle;
400 395
401 pm_idle = default_idle; 396 x86_idle = default_idle;
402 397
403 return ret; 398 return ret;
404} 399}
400#endif
405void stop_this_cpu(void *dummy) 401void stop_this_cpu(void *dummy)
406{ 402{
407 local_irq_disable(); 403 local_irq_disable();
@@ -411,29 +407,8 @@ void stop_this_cpu(void *dummy)
411 set_cpu_online(smp_processor_id(), false); 407 set_cpu_online(smp_processor_id(), false);
412 disable_local_APIC(); 408 disable_local_APIC();
413 409
414 for (;;) { 410 for (;;)
415 if (hlt_works(smp_processor_id())) 411 halt();
416 halt();
417 }
418}
419
420/* Default MONITOR/MWAIT with no hints, used for default C1 state */
421static void mwait_idle(void)
422{
423 if (!need_resched()) {
424 trace_cpu_idle_rcuidle(1, smp_processor_id());
425 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
426 clflush((void *)&current_thread_info()->flags);
427
428 __monitor((void *)&current_thread_info()->flags, 0, 0);
429 smp_mb();
430 if (!need_resched())
431 __sti_mwait(0, 0);
432 else
433 local_irq_enable();
434 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
435 } else
436 local_irq_enable();
437} 412}
438 413
439/* 414/*
@@ -450,53 +425,6 @@ static void poll_idle(void)
450 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
451} 426}
452 427
453/*
454 * mwait selection logic:
455 *
456 * It depends on the CPU. For AMD CPUs that support MWAIT this is
457 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
458 * then depend on a clock divisor and current Pstate of the core. If
459 * all cores of a processor are in halt state (C1) the processor can
460 * enter the C1E (C1 enhanced) state. If mwait is used this will never
461 * happen.
462 *
463 * idle=mwait overrides this decision and forces the usage of mwait.
464 */
465
466#define MWAIT_INFO 0x05
467#define MWAIT_ECX_EXTENDED_INFO 0x01
468#define MWAIT_EDX_C1 0xf0
469
470int mwait_usable(const struct cpuinfo_x86 *c)
471{
472 u32 eax, ebx, ecx, edx;
473
474 /* Use mwait if idle=mwait boot option is given */
475 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
476 return 1;
477
478 /*
479 * Any idle= boot option other than idle=mwait means that we must not
480 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
481 */
482 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
483 return 0;
484
485 if (c->cpuid_level < MWAIT_INFO)
486 return 0;
487
488 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
489 /* Check, whether EDX has extended info about MWAIT */
490 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
491 return 1;
492
493 /*
494 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
495 * C1 supports MWAIT
496 */
497 return (edx & MWAIT_EDX_C1);
498}
499
500bool amd_e400_c1e_detected; 428bool amd_e400_c1e_detected;
501EXPORT_SYMBOL(amd_e400_c1e_detected); 429EXPORT_SYMBOL(amd_e400_c1e_detected);
502 430
@@ -561,31 +489,24 @@ static void amd_e400_idle(void)
561void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
562{ 490{
563#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
564 if (pm_idle == poll_idle && smp_num_siblings > 1) { 492 if (x86_idle == poll_idle && smp_num_siblings > 1)
565 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
566 }
567#endif 494#endif
568 if (pm_idle) 495 if (x86_idle)
569 return; 496 return;
570 497
571 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 498 if (cpu_has_amd_erratum(amd_erratum_400)) {
572 /*
573 * One CPU supports mwait => All CPUs supports mwait
574 */
575 pr_info("using mwait in idle threads\n");
576 pm_idle = mwait_idle;
577 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
578 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 499 /* E400: APIC timer interrupt does not wake up CPU from C1e */
579 pr_info("using AMD E400 aware idle routine\n"); 500 pr_info("using AMD E400 aware idle routine\n");
580 pm_idle = amd_e400_idle; 501 x86_idle = amd_e400_idle;
581 } else 502 } else
582 pm_idle = default_idle; 503 x86_idle = default_idle;
583} 504}
584 505
585void __init init_amd_e400_c1e_mask(void) 506void __init init_amd_e400_c1e_mask(void)
586{ 507{
587 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ 508 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
588 if (pm_idle == amd_e400_idle) 509 if (x86_idle == amd_e400_idle)
589 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); 510 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
590} 511}
591 512
@@ -596,11 +517,8 @@ static int __init idle_setup(char *str)
596 517
597 if (!strcmp(str, "poll")) { 518 if (!strcmp(str, "poll")) {
598 pr_info("using polling idle threads\n"); 519 pr_info("using polling idle threads\n");
599 pm_idle = poll_idle; 520 x86_idle = poll_idle;
600 boot_option_idle_override = IDLE_POLL; 521 boot_option_idle_override = IDLE_POLL;
601 } else if (!strcmp(str, "mwait")) {
602 boot_option_idle_override = IDLE_FORCE_MWAIT;
603 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
604 } else if (!strcmp(str, "halt")) { 522 } else if (!strcmp(str, "halt")) {
605 /* 523 /*
606 * When the boot option of idle=halt is added, halt is 524 * When the boot option of idle=halt is added, halt is
@@ -609,7 +527,7 @@ static int __init idle_setup(char *str)
609 * To continue to load the CPU idle driver, don't touch 527 * To continue to load the CPU idle driver, don't touch
610 * the boot_option_idle_override. 528 * the boot_option_idle_override.
611 */ 529 */
612 pm_idle = default_idle; 530 x86_idle = default_idle;
613 boot_option_idle_override = IDLE_HALT; 531 boot_option_idle_override = IDLE_HALT;
614 } else if (!strcmp(str, "nomwait")) { 532 } else if (!strcmp(str, "nomwait")) {
615 /* 533 /*