aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c33
1 files changed, 14 insertions, 19 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 09c08a1c706f..e764fc05d700 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -14,6 +14,7 @@
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <trace/events/power.h> 15#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 16#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/apic.h> 19#include <asm/apic.h>
19#include <asm/syscalls.h> 20#include <asm/syscalls.h>
@@ -22,11 +23,6 @@
22#include <asm/i387.h> 23#include <asm/i387.h>
23#include <asm/debugreg.h> 24#include <asm/debugreg.h>
24 25
25unsigned long idle_halt;
26EXPORT_SYMBOL(idle_halt);
27unsigned long idle_nomwait;
28EXPORT_SYMBOL(idle_nomwait);
29
30struct kmem_cache *task_xstate_cachep; 26struct kmem_cache *task_xstate_cachep;
31EXPORT_SYMBOL_GPL(task_xstate_cachep); 27EXPORT_SYMBOL_GPL(task_xstate_cachep);
32 28
@@ -327,7 +323,7 @@ long sys_execve(const char __user *name,
327/* 323/*
328 * Idle related variables and functions 324 * Idle related variables and functions
329 */ 325 */
330unsigned long boot_option_idle_override = 0; 326unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
331EXPORT_SYMBOL(boot_option_idle_override); 327EXPORT_SYMBOL(boot_option_idle_override);
332 328
333/* 329/*
@@ -386,6 +382,8 @@ void default_idle(void)
386 else 382 else
387 local_irq_enable(); 383 local_irq_enable();
388 current_thread_info()->status |= TS_POLLING; 384 current_thread_info()->status |= TS_POLLING;
385 trace_power_end(smp_processor_id());
386 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
389 } else { 387 } else {
390 local_irq_enable(); 388 local_irq_enable();
391 /* loop is done by the caller */ 389 /* loop is done by the caller */
@@ -443,8 +441,6 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
443 */ 441 */
444void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 442void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
445{ 443{
446 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
447 trace_cpu_idle((ax>>4)+1, smp_processor_id());
448 if (!need_resched()) { 444 if (!need_resched()) {
449 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) 445 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
450 clflush((void *)&current_thread_info()->flags); 446 clflush((void *)&current_thread_info()->flags);
@@ -471,6 +467,8 @@ static void mwait_idle(void)
471 __sti_mwait(0, 0); 467 __sti_mwait(0, 0);
472 else 468 else
473 local_irq_enable(); 469 local_irq_enable();
470 trace_power_end(smp_processor_id());
471 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
474 } else 472 } else
475 local_irq_enable(); 473 local_irq_enable();
476} 474}
@@ -503,17 +501,16 @@ static void poll_idle(void)
503 * 501 *
504 * idle=mwait overrides this decision and forces the usage of mwait. 502 * idle=mwait overrides this decision and forces the usage of mwait.
505 */ 503 */
506static int __cpuinitdata force_mwait;
507 504
508#define MWAIT_INFO 0x05 505#define MWAIT_INFO 0x05
509#define MWAIT_ECX_EXTENDED_INFO 0x01 506#define MWAIT_ECX_EXTENDED_INFO 0x01
510#define MWAIT_EDX_C1 0xf0 507#define MWAIT_EDX_C1 0xf0
511 508
512static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 509int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
513{ 510{
514 u32 eax, ebx, ecx, edx; 511 u32 eax, ebx, ecx, edx;
515 512
516 if (force_mwait) 513 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
517 return 1; 514 return 1;
518 515
519 if (c->cpuid_level < MWAIT_INFO) 516 if (c->cpuid_level < MWAIT_INFO)
@@ -633,9 +630,10 @@ static int __init idle_setup(char *str)
633 if (!strcmp(str, "poll")) { 630 if (!strcmp(str, "poll")) {
634 printk("using polling idle threads.\n"); 631 printk("using polling idle threads.\n");
635 pm_idle = poll_idle; 632 pm_idle = poll_idle;
636 } else if (!strcmp(str, "mwait")) 633 boot_option_idle_override = IDLE_POLL;
637 force_mwait = 1; 634 } else if (!strcmp(str, "mwait")) {
638 else if (!strcmp(str, "halt")) { 635 boot_option_idle_override = IDLE_FORCE_MWAIT;
636 } else if (!strcmp(str, "halt")) {
639 /* 637 /*
640 * When the boot option of idle=halt is added, halt is 638 * When the boot option of idle=halt is added, halt is
641 * forced to be used for CPU idle. In such case CPU C2/C3 639 * forced to be used for CPU idle. In such case CPU C2/C3
@@ -644,8 +642,7 @@ static int __init idle_setup(char *str)
644 * the boot_option_idle_override. 642 * the boot_option_idle_override.
645 */ 643 */
646 pm_idle = default_idle; 644 pm_idle = default_idle;
647 idle_halt = 1; 645 boot_option_idle_override = IDLE_HALT;
648 return 0;
649 } else if (!strcmp(str, "nomwait")) { 646 } else if (!strcmp(str, "nomwait")) {
650 /* 647 /*
651 * If the boot option of "idle=nomwait" is added, 648 * If the boot option of "idle=nomwait" is added,
@@ -653,12 +650,10 @@ static int __init idle_setup(char *str)
653 * states. In such case it won't touch the variable 650 * states. In such case it won't touch the variable
654 * of boot_option_idle_override. 651 * of boot_option_idle_override.
655 */ 652 */
656 idle_nomwait = 1; 653 boot_option_idle_override = IDLE_NOMWAIT;
657 return 0;
658 } else 654 } else
659 return -1; 655 return -1;
660 656
661 boot_option_idle_override = 1;
662 return 0; 657 return 0;
663} 658}
664early_param("idle", idle_setup); 659early_param("idle", idle_setup);