diff options
| -rw-r--r-- | arch/arm/mach-omap2/cpuidle34xx.c | 2 | ||||
| -rw-r--r-- | arch/ia64/include/asm/processor.h | 5 | ||||
| -rw-r--r-- | arch/ia64/kernel/process.c | 6 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/shmobile/cpuidle.c | 1 | ||||
| -rw-r--r-- | arch/x86/include/asm/processor.h | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 30 | ||||
| -rw-r--r-- | arch/x86/kernel/process_32.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/process_64.c | 6 | ||||
| -rw-r--r-- | drivers/acpi/processor_core.c | 4 | ||||
| -rw-r--r-- | drivers/acpi/processor_idle.c | 28 | ||||
| -rw-r--r-- | drivers/cpuidle/cpuidle.c | 92 | ||||
| -rw-r--r-- | drivers/idle/intel_idle.c | 69 | ||||
| -rw-r--r-- | include/linux/cpuidle.h | 6 |
13 files changed, 145 insertions, 113 deletions
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index 11b89e9687f3..f7b22a16f385 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c | |||
| @@ -47,6 +47,8 @@ | |||
| 47 | 47 | ||
| 48 | #define OMAP3_STATE_MAX OMAP3_STATE_C7 | 48 | #define OMAP3_STATE_MAX OMAP3_STATE_C7 |
| 49 | 49 | ||
| 50 | #define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */ | ||
| 51 | |||
| 50 | struct omap3_processor_cx { | 52 | struct omap3_processor_cx { |
| 51 | u8 valid; | 53 | u8 valid; |
| 52 | u8 type; | 54 | u8 type; |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 348e44d08ce3..03afe7970748 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
| @@ -717,8 +717,9 @@ prefetchw (const void *x) | |||
| 717 | #define spin_lock_prefetch(x) prefetchw(x) | 717 | #define spin_lock_prefetch(x) prefetchw(x) |
| 718 | 718 | ||
| 719 | extern unsigned long boot_option_idle_override; | 719 | extern unsigned long boot_option_idle_override; |
| 720 | extern unsigned long idle_halt; | 720 | |
| 721 | extern unsigned long idle_nomwait; | 721 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, |
| 722 | IDLE_NOMWAIT, IDLE_POLL}; | ||
| 722 | 723 | ||
| 723 | #endif /* !__ASSEMBLY__ */ | 724 | #endif /* !__ASSEMBLY__ */ |
| 724 | 725 | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 16f1c7b04c69..6d33c5cc94f0 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
| @@ -53,12 +53,8 @@ | |||
| 53 | 53 | ||
| 54 | void (*ia64_mark_idle)(int); | 54 | void (*ia64_mark_idle)(int); |
| 55 | 55 | ||
| 56 | unsigned long boot_option_idle_override = 0; | 56 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; |
| 57 | EXPORT_SYMBOL(boot_option_idle_override); | 57 | EXPORT_SYMBOL(boot_option_idle_override); |
| 58 | unsigned long idle_halt; | ||
| 59 | EXPORT_SYMBOL(idle_halt); | ||
| 60 | unsigned long idle_nomwait; | ||
| 61 | EXPORT_SYMBOL(idle_nomwait); | ||
| 62 | void (*pm_idle) (void); | 58 | void (*pm_idle) (void); |
| 63 | EXPORT_SYMBOL(pm_idle); | 59 | EXPORT_SYMBOL(pm_idle); |
| 64 | void (*pm_power_off) (void); | 60 | void (*pm_power_off) (void); |
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 83972aa319c2..c19e2a940e3f 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c | |||
| @@ -81,7 +81,6 @@ void sh_mobile_setup_cpuidle(void) | |||
| 81 | state->target_residency = 1 * 2; | 81 | state->target_residency = 1 * 2; |
| 82 | state->power_usage = 3; | 82 | state->power_usage = 3; |
| 83 | state->flags = 0; | 83 | state->flags = 0; |
| 84 | state->flags |= CPUIDLE_FLAG_SHALLOW; | ||
| 85 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 84 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
| 86 | state->enter = cpuidle_sleep_enter; | 85 | state->enter = cpuidle_sleep_enter; |
| 87 | 86 | ||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 53fd1d5a1fe0..45636cefa186 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
| @@ -761,10 +761,11 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); | |||
| 761 | extern void init_c1e_mask(void); | 761 | extern void init_c1e_mask(void); |
| 762 | 762 | ||
| 763 | extern unsigned long boot_option_idle_override; | 763 | extern unsigned long boot_option_idle_override; |
| 764 | extern unsigned long idle_halt; | ||
| 765 | extern unsigned long idle_nomwait; | ||
| 766 | extern bool c1e_detected; | 764 | extern bool c1e_detected; |
| 767 | 765 | ||
| 766 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, | ||
| 767 | IDLE_POLL, IDLE_FORCE_MWAIT}; | ||
| 768 | |||
| 768 | extern void enable_sep_cpu(void); | 769 | extern void enable_sep_cpu(void); |
| 769 | extern int sysenter_setup(void); | 770 | extern int sysenter_setup(void); |
| 770 | 771 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 09c08a1c706f..d8286ed54ffa 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -22,11 +22,6 @@ | |||
| 22 | #include <asm/i387.h> | 22 | #include <asm/i387.h> |
| 23 | #include <asm/debugreg.h> | 23 | #include <asm/debugreg.h> |
| 24 | 24 | ||
| 25 | unsigned long idle_halt; | ||
| 26 | EXPORT_SYMBOL(idle_halt); | ||
| 27 | unsigned long idle_nomwait; | ||
| 28 | EXPORT_SYMBOL(idle_nomwait); | ||
| 29 | |||
| 30 | struct kmem_cache *task_xstate_cachep; | 25 | struct kmem_cache *task_xstate_cachep; |
| 31 | EXPORT_SYMBOL_GPL(task_xstate_cachep); | 26 | EXPORT_SYMBOL_GPL(task_xstate_cachep); |
| 32 | 27 | ||
| @@ -327,7 +322,7 @@ long sys_execve(const char __user *name, | |||
| 327 | /* | 322 | /* |
| 328 | * Idle related variables and functions | 323 | * Idle related variables and functions |
| 329 | */ | 324 | */ |
| 330 | unsigned long boot_option_idle_override = 0; | 325 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; |
| 331 | EXPORT_SYMBOL(boot_option_idle_override); | 326 | EXPORT_SYMBOL(boot_option_idle_override); |
| 332 | 327 | ||
| 333 | /* | 328 | /* |
| @@ -386,6 +381,8 @@ void default_idle(void) | |||
| 386 | else | 381 | else |
| 387 | local_irq_enable(); | 382 | local_irq_enable(); |
| 388 | current_thread_info()->status |= TS_POLLING; | 383 | current_thread_info()->status |= TS_POLLING; |
| 384 | trace_power_end(smp_processor_id()); | ||
| 385 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | ||
| 389 | } else { | 386 | } else { |
| 390 | local_irq_enable(); | 387 | local_irq_enable(); |
| 391 | /* loop is done by the caller */ | 388 | /* loop is done by the caller */ |
| @@ -443,8 +440,6 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); | |||
| 443 | */ | 440 | */ |
| 444 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | 441 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) |
| 445 | { | 442 | { |
| 446 | trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); | ||
| 447 | trace_cpu_idle((ax>>4)+1, smp_processor_id()); | ||
| 448 | if (!need_resched()) { | 443 | if (!need_resched()) { |
| 449 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) | 444 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) |
| 450 | clflush((void *)¤t_thread_info()->flags); | 445 | clflush((void *)¤t_thread_info()->flags); |
| @@ -471,6 +466,8 @@ static void mwait_idle(void) | |||
| 471 | __sti_mwait(0, 0); | 466 | __sti_mwait(0, 0); |
| 472 | else | 467 | else |
| 473 | local_irq_enable(); | 468 | local_irq_enable(); |
| 469 | trace_power_end(smp_processor_id()); | ||
| 470 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | ||
| 474 | } else | 471 | } else |
| 475 | local_irq_enable(); | 472 | local_irq_enable(); |
| 476 | } | 473 | } |
| @@ -503,7 +500,6 @@ static void poll_idle(void) | |||
| 503 | * | 500 | * |
| 504 | * idle=mwait overrides this decision and forces the usage of mwait. | 501 | * idle=mwait overrides this decision and forces the usage of mwait. |
| 505 | */ | 502 | */ |
| 506 | static int __cpuinitdata force_mwait; | ||
| 507 | 503 | ||
| 508 | #define MWAIT_INFO 0x05 | 504 | #define MWAIT_INFO 0x05 |
| 509 | #define MWAIT_ECX_EXTENDED_INFO 0x01 | 505 | #define MWAIT_ECX_EXTENDED_INFO 0x01 |
| @@ -513,7 +509,7 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | |||
| 513 | { | 509 | { |
| 514 | u32 eax, ebx, ecx, edx; | 510 | u32 eax, ebx, ecx, edx; |
| 515 | 511 | ||
| 516 | if (force_mwait) | 512 | if (boot_option_idle_override == IDLE_FORCE_MWAIT) |
| 517 | return 1; | 513 | return 1; |
| 518 | 514 | ||
| 519 | if (c->cpuid_level < MWAIT_INFO) | 515 | if (c->cpuid_level < MWAIT_INFO) |
| @@ -633,9 +629,10 @@ static int __init idle_setup(char *str) | |||
| 633 | if (!strcmp(str, "poll")) { | 629 | if (!strcmp(str, "poll")) { |
| 634 | printk("using polling idle threads.\n"); | 630 | printk("using polling idle threads.\n"); |
| 635 | pm_idle = poll_idle; | 631 | pm_idle = poll_idle; |
| 636 | } else if (!strcmp(str, "mwait")) | 632 | boot_option_idle_override = IDLE_POLL; |
| 637 | force_mwait = 1; | 633 | } else if (!strcmp(str, "mwait")) { |
| 638 | else if (!strcmp(str, "halt")) { | 634 | boot_option_idle_override = IDLE_FORCE_MWAIT; |
| 635 | } else if (!strcmp(str, "halt")) { | ||
| 639 | /* | 636 | /* |
| 640 | * When the boot option of idle=halt is added, halt is | 637 | * When the boot option of idle=halt is added, halt is |
| 641 | * forced to be used for CPU idle. In such case CPU C2/C3 | 638 | * forced to be used for CPU idle. In such case CPU C2/C3 |
| @@ -644,8 +641,7 @@ static int __init idle_setup(char *str) | |||
| 644 | * the boot_option_idle_override. | 641 | * the boot_option_idle_override. |
| 645 | */ | 642 | */ |
| 646 | pm_idle = default_idle; | 643 | pm_idle = default_idle; |
| 647 | idle_halt = 1; | 644 | boot_option_idle_override = IDLE_HALT; |
| 648 | return 0; | ||
| 649 | } else if (!strcmp(str, "nomwait")) { | 645 | } else if (!strcmp(str, "nomwait")) { |
| 650 | /* | 646 | /* |
| 651 | * If the boot option of "idle=nomwait" is added, | 647 | * If the boot option of "idle=nomwait" is added, |
| @@ -653,12 +649,10 @@ static int __init idle_setup(char *str) | |||
| 653 | * states. In such case it won't touch the variable | 649 | * states. In such case it won't touch the variable |
| 654 | * of boot_option_idle_override. | 650 | * of boot_option_idle_override. |
| 655 | */ | 651 | */ |
| 656 | idle_nomwait = 1; | 652 | boot_option_idle_override = IDLE_NOMWAIT; |
| 657 | return 0; | ||
| 658 | } else | 653 | } else |
| 659 | return -1; | 654 | return -1; |
| 660 | 655 | ||
| 661 | boot_option_idle_override = 1; | ||
| 662 | return 0; | 656 | return 0; |
| 663 | } | 657 | } |
| 664 | early_param("idle", idle_setup); | 658 | early_param("idle", idle_setup); |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 4b9befa0e347..8d128783af47 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
| @@ -57,8 +57,6 @@ | |||
| 57 | #include <asm/syscalls.h> | 57 | #include <asm/syscalls.h> |
| 58 | #include <asm/debugreg.h> | 58 | #include <asm/debugreg.h> |
| 59 | 59 | ||
| 60 | #include <trace/events/power.h> | ||
| 61 | |||
| 62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 60 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
| 63 | 61 | ||
| 64 | /* | 62 | /* |
| @@ -113,8 +111,6 @@ void cpu_idle(void) | |||
| 113 | stop_critical_timings(); | 111 | stop_critical_timings(); |
| 114 | pm_idle(); | 112 | pm_idle(); |
| 115 | start_critical_timings(); | 113 | start_critical_timings(); |
| 116 | trace_power_end(smp_processor_id()); | ||
| 117 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | ||
| 118 | } | 114 | } |
| 119 | tick_nohz_restart_sched_tick(); | 115 | tick_nohz_restart_sched_tick(); |
| 120 | preempt_enable_no_resched(); | 116 | preempt_enable_no_resched(); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4c818a738396..bd387e8f73b4 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -51,8 +51,6 @@ | |||
| 51 | #include <asm/syscalls.h> | 51 | #include <asm/syscalls.h> |
| 52 | #include <asm/debugreg.h> | 52 | #include <asm/debugreg.h> |
| 53 | 53 | ||
| 54 | #include <trace/events/power.h> | ||
| 55 | |||
| 56 | asmlinkage extern void ret_from_fork(void); | 54 | asmlinkage extern void ret_from_fork(void); |
| 57 | 55 | ||
| 58 | DEFINE_PER_CPU(unsigned long, old_rsp); | 56 | DEFINE_PER_CPU(unsigned long, old_rsp); |
| @@ -141,10 +139,6 @@ void cpu_idle(void) | |||
| 141 | pm_idle(); | 139 | pm_idle(); |
| 142 | start_critical_timings(); | 140 | start_critical_timings(); |
| 143 | 141 | ||
| 144 | trace_power_end(smp_processor_id()); | ||
| 145 | trace_cpu_idle(PWR_EVENT_EXIT, | ||
| 146 | smp_processor_id()); | ||
| 147 | |||
| 148 | /* In many cases the interrupt that ended idle | 142 | /* In many cases the interrupt that ended idle |
| 149 | has already called exit_idle. But some idle | 143 | has already called exit_idle. But some idle |
| 150 | loops can be woken up without interrupt. */ | 144 | loops can be woken up without interrupt. */ |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index bec561c14beb..3c1a2fec8cda 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
| @@ -23,7 +23,7 @@ static int set_no_mwait(const struct dmi_system_id *id) | |||
| 23 | { | 23 | { |
| 24 | printk(KERN_NOTICE PREFIX "%s detected - " | 24 | printk(KERN_NOTICE PREFIX "%s detected - " |
| 25 | "disabling mwait for CPU C-states\n", id->ident); | 25 | "disabling mwait for CPU C-states\n", id->ident); |
| 26 | idle_nomwait = 1; | 26 | boot_option_idle_override = IDLE_NOMWAIT; |
| 27 | return 0; | 27 | return 0; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| @@ -283,7 +283,7 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) | |||
| 283 | { | 283 | { |
| 284 | acpi_status status = AE_OK; | 284 | acpi_status status = AE_OK; |
| 285 | 285 | ||
| 286 | if (idle_nomwait) { | 286 | if (boot_option_idle_override == IDLE_NOMWAIT) { |
| 287 | /* | 287 | /* |
| 288 | * If mwait is disabled for CPU C-states, the C2C3_FFH access | 288 | * If mwait is disabled for CPU C-states, the C2C3_FFH access |
| 289 | * mode will be disabled in the parameter of _PDC object. | 289 | * mode will be disabled in the parameter of _PDC object. |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index a765b823aa9e..d615b7d69bca 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -79,6 +79,13 @@ module_param(bm_check_disable, uint, 0000); | |||
| 79 | static unsigned int latency_factor __read_mostly = 2; | 79 | static unsigned int latency_factor __read_mostly = 2; |
| 80 | module_param(latency_factor, uint, 0644); | 80 | module_param(latency_factor, uint, 0644); |
| 81 | 81 | ||
| 82 | static int disabled_by_idle_boot_param(void) | ||
| 83 | { | ||
| 84 | return boot_option_idle_override == IDLE_POLL || | ||
| 85 | boot_option_idle_override == IDLE_FORCE_MWAIT || | ||
| 86 | boot_option_idle_override == IDLE_HALT; | ||
| 87 | } | ||
| 88 | |||
| 82 | /* | 89 | /* |
| 83 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | 90 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
| 84 | * For now disable this. Probably a bug somewhere else. | 91 | * For now disable this. Probably a bug somewhere else. |
| @@ -455,7 +462,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) | |||
| 455 | continue; | 462 | continue; |
| 456 | } | 463 | } |
| 457 | if (cx.type == ACPI_STATE_C1 && | 464 | if (cx.type == ACPI_STATE_C1 && |
| 458 | (idle_halt || idle_nomwait)) { | 465 | (boot_option_idle_override == IDLE_NOMWAIT)) { |
| 459 | /* | 466 | /* |
| 460 | * In most cases the C1 space_id obtained from | 467 | * In most cases the C1 space_id obtained from |
| 461 | * _CST object is FIXED_HARDWARE access mode. | 468 | * _CST object is FIXED_HARDWARE access mode. |
| @@ -1016,7 +1023,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1016 | state->flags = 0; | 1023 | state->flags = 0; |
| 1017 | switch (cx->type) { | 1024 | switch (cx->type) { |
| 1018 | case ACPI_STATE_C1: | 1025 | case ACPI_STATE_C1: |
| 1019 | state->flags |= CPUIDLE_FLAG_SHALLOW; | ||
| 1020 | if (cx->entry_method == ACPI_CSTATE_FFH) | 1026 | if (cx->entry_method == ACPI_CSTATE_FFH) |
| 1021 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1027 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
| 1022 | 1028 | ||
| @@ -1025,16 +1031,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
| 1025 | break; | 1031 | break; |
| 1026 | 1032 | ||
| 1027 | case ACPI_STATE_C2: | 1033 | case ACPI_STATE_C2: |
| 1028 | state->flags |= CPUIDLE_FLAG_BALANCED; | ||
| 1029 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1034 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
| 1030 | state->enter = acpi_idle_enter_simple; | 1035 | state->enter = acpi_idle_enter_simple; |
| 1031 | dev->safe_state = state; | 1036 | dev->safe_state = state; |
| 1032 | break; | 1037 | break; |
| 1033 | 1038 | ||
| 1034 | case ACPI_STATE_C3: | 1039 | case ACPI_STATE_C3: |
| 1035 | state->flags |= CPUIDLE_FLAG_DEEP; | ||
| 1036 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1040 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
| 1037 | state->flags |= CPUIDLE_FLAG_CHECK_BM; | ||
| 1038 | state->enter = pr->flags.bm_check ? | 1041 | state->enter = pr->flags.bm_check ? |
| 1039 | acpi_idle_enter_bm : | 1042 | acpi_idle_enter_bm : |
| 1040 | acpi_idle_enter_simple; | 1043 | acpi_idle_enter_simple; |
| @@ -1058,7 +1061,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
| 1058 | { | 1061 | { |
| 1059 | int ret = 0; | 1062 | int ret = 0; |
| 1060 | 1063 | ||
| 1061 | if (boot_option_idle_override) | 1064 | if (disabled_by_idle_boot_param()) |
| 1062 | return 0; | 1065 | return 0; |
| 1063 | 1066 | ||
| 1064 | if (!pr) | 1067 | if (!pr) |
| @@ -1089,19 +1092,10 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
| 1089 | acpi_status status = 0; | 1092 | acpi_status status = 0; |
| 1090 | static int first_run; | 1093 | static int first_run; |
| 1091 | 1094 | ||
| 1092 | if (boot_option_idle_override) | 1095 | if (disabled_by_idle_boot_param()) |
| 1093 | return 0; | 1096 | return 0; |
| 1094 | 1097 | ||
| 1095 | if (!first_run) { | 1098 | if (!first_run) { |
| 1096 | if (idle_halt) { | ||
| 1097 | /* | ||
| 1098 | * When the boot option of "idle=halt" is added, halt | ||
| 1099 | * is used for CPU IDLE. | ||
| 1100 | * In such case C2/C3 is meaningless. So the max_cstate | ||
| 1101 | * is set to one. | ||
| 1102 | */ | ||
| 1103 | max_cstate = 1; | ||
| 1104 | } | ||
| 1105 | dmi_check_system(processor_power_dmi_table); | 1099 | dmi_check_system(processor_power_dmi_table); |
| 1106 | max_cstate = acpi_processor_cstate_check(max_cstate); | 1100 | max_cstate = acpi_processor_cstate_check(max_cstate); |
| 1107 | if (max_cstate < ACPI_C_STATES_MAX) | 1101 | if (max_cstate < ACPI_C_STATES_MAX) |
| @@ -1142,7 +1136,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
| 1142 | int acpi_processor_power_exit(struct acpi_processor *pr, | 1136 | int acpi_processor_power_exit(struct acpi_processor *pr, |
| 1143 | struct acpi_device *device) | 1137 | struct acpi_device *device) |
| 1144 | { | 1138 | { |
| 1145 | if (boot_option_idle_override) | 1139 | if (disabled_by_idle_boot_param()) |
| 1146 | return 0; | 1140 | return 0; |
| 1147 | 1141 | ||
| 1148 | cpuidle_unregister_device(&pr->power.dev); | 1142 | cpuidle_unregister_device(&pr->power.dev); |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 386888f10df0..bf5092455a8f 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -96,7 +96,15 @@ static void cpuidle_idle_call(void) | |||
| 96 | 96 | ||
| 97 | /* enter the state and update stats */ | 97 | /* enter the state and update stats */ |
| 98 | dev->last_state = target_state; | 98 | dev->last_state = target_state; |
| 99 | |||
| 100 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); | ||
| 101 | trace_cpu_idle(next_state, dev->cpu); | ||
| 102 | |||
| 99 | dev->last_residency = target_state->enter(dev, target_state); | 103 | dev->last_residency = target_state->enter(dev, target_state); |
| 104 | |||
| 105 | trace_power_end(dev->cpu); | ||
| 106 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); | ||
| 107 | |||
| 100 | if (dev->last_state) | 108 | if (dev->last_state) |
| 101 | target_state = dev->last_state; | 109 | target_state = dev->last_state; |
| 102 | 110 | ||
| @@ -106,8 +114,6 @@ static void cpuidle_idle_call(void) | |||
| 106 | /* give the governor an opportunity to reflect on the outcome */ | 114 | /* give the governor an opportunity to reflect on the outcome */ |
| 107 | if (cpuidle_curr_governor->reflect) | 115 | if (cpuidle_curr_governor->reflect) |
| 108 | cpuidle_curr_governor->reflect(dev); | 116 | cpuidle_curr_governor->reflect(dev); |
| 109 | trace_power_end(smp_processor_id()); | ||
| 110 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | ||
| 111 | } | 117 | } |
| 112 | 118 | ||
| 113 | /** | 119 | /** |
| @@ -155,6 +161,45 @@ void cpuidle_resume_and_unlock(void) | |||
| 155 | 161 | ||
| 156 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | 162 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); |
| 157 | 163 | ||
| 164 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX | ||
| 165 | static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | ||
| 166 | { | ||
| 167 | ktime_t t1, t2; | ||
| 168 | s64 diff; | ||
| 169 | int ret; | ||
| 170 | |||
| 171 | t1 = ktime_get(); | ||
| 172 | local_irq_enable(); | ||
| 173 | while (!need_resched()) | ||
| 174 | cpu_relax(); | ||
| 175 | |||
| 176 | t2 = ktime_get(); | ||
| 177 | diff = ktime_to_us(ktime_sub(t2, t1)); | ||
| 178 | if (diff > INT_MAX) | ||
| 179 | diff = INT_MAX; | ||
| 180 | |||
| 181 | ret = (int) diff; | ||
| 182 | return ret; | ||
| 183 | } | ||
| 184 | |||
| 185 | static void poll_idle_init(struct cpuidle_device *dev) | ||
| 186 | { | ||
| 187 | struct cpuidle_state *state = &dev->states[0]; | ||
| 188 | |||
| 189 | cpuidle_set_statedata(state, NULL); | ||
| 190 | |||
| 191 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); | ||
| 192 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | ||
| 193 | state->exit_latency = 0; | ||
| 194 | state->target_residency = 0; | ||
| 195 | state->power_usage = -1; | ||
| 196 | state->flags = 0; | ||
| 197 | state->enter = poll_idle; | ||
| 198 | } | ||
| 199 | #else | ||
| 200 | static void poll_idle_init(struct cpuidle_device *dev) {} | ||
| 201 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | ||
| 202 | |||
| 158 | /** | 203 | /** |
| 159 | * cpuidle_enable_device - enables idle PM for a CPU | 204 | * cpuidle_enable_device - enables idle PM for a CPU |
| 160 | * @dev: the CPU | 205 | * @dev: the CPU |
| @@ -179,6 +224,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
| 179 | return ret; | 224 | return ret; |
| 180 | } | 225 | } |
| 181 | 226 | ||
| 227 | poll_idle_init(dev); | ||
| 228 | |||
| 182 | if ((ret = cpuidle_add_state_sysfs(dev))) | 229 | if ((ret = cpuidle_add_state_sysfs(dev))) |
| 183 | return ret; | 230 | return ret; |
| 184 | 231 | ||
| @@ -233,45 +280,6 @@ void cpuidle_disable_device(struct cpuidle_device *dev) | |||
| 233 | 280 | ||
| 234 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | 281 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); |
| 235 | 282 | ||
| 236 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX | ||
| 237 | static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | ||
| 238 | { | ||
| 239 | ktime_t t1, t2; | ||
| 240 | s64 diff; | ||
| 241 | int ret; | ||
| 242 | |||
| 243 | t1 = ktime_get(); | ||
| 244 | local_irq_enable(); | ||
| 245 | while (!need_resched()) | ||
| 246 | cpu_relax(); | ||
| 247 | |||
| 248 | t2 = ktime_get(); | ||
| 249 | diff = ktime_to_us(ktime_sub(t2, t1)); | ||
| 250 | if (diff > INT_MAX) | ||
| 251 | diff = INT_MAX; | ||
| 252 | |||
| 253 | ret = (int) diff; | ||
| 254 | return ret; | ||
| 255 | } | ||
| 256 | |||
| 257 | static void poll_idle_init(struct cpuidle_device *dev) | ||
| 258 | { | ||
| 259 | struct cpuidle_state *state = &dev->states[0]; | ||
| 260 | |||
| 261 | cpuidle_set_statedata(state, NULL); | ||
| 262 | |||
| 263 | snprintf(state->name, CPUIDLE_NAME_LEN, "C0"); | ||
| 264 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | ||
| 265 | state->exit_latency = 0; | ||
| 266 | state->target_residency = 0; | ||
| 267 | state->power_usage = -1; | ||
| 268 | state->flags = CPUIDLE_FLAG_POLL; | ||
| 269 | state->enter = poll_idle; | ||
| 270 | } | ||
| 271 | #else | ||
| 272 | static void poll_idle_init(struct cpuidle_device *dev) {} | ||
| 273 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | ||
| 274 | |||
| 275 | /** | 283 | /** |
| 276 | * __cpuidle_register_device - internal register function called before register | 284 | * __cpuidle_register_device - internal register function called before register |
| 277 | * and enable routines | 285 | * and enable routines |
| @@ -292,8 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
| 292 | 300 | ||
| 293 | init_completion(&dev->kobj_unregister); | 301 | init_completion(&dev->kobj_unregister); |
| 294 | 302 | ||
| 295 | poll_idle_init(dev); | ||
| 296 | |||
| 297 | /* | 303 | /* |
| 298 | * cpuidle driver should set the dev->power_specified bit | 304 | * cpuidle driver should set the dev->power_specified bit |
| 299 | * before registering the device if the driver provides | 305 | * before registering the device if the driver provides |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 56ac09d6c930..7acb32e7f817 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -59,6 +59,8 @@ | |||
| 59 | #include <linux/hrtimer.h> /* ktime_get_real() */ | 59 | #include <linux/hrtimer.h> /* ktime_get_real() */ |
| 60 | #include <trace/events/power.h> | 60 | #include <trace/events/power.h> |
| 61 | #include <linux/sched.h> | 61 | #include <linux/sched.h> |
| 62 | #include <linux/notifier.h> | ||
| 63 | #include <linux/cpu.h> | ||
| 62 | #include <asm/mwait.h> | 64 | #include <asm/mwait.h> |
| 63 | 65 | ||
| 64 | #define INTEL_IDLE_VERSION "0.4" | 66 | #define INTEL_IDLE_VERSION "0.4" |
| @@ -73,6 +75,7 @@ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; | |||
| 73 | 75 | ||
| 74 | static unsigned int mwait_substates; | 76 | static unsigned int mwait_substates; |
| 75 | 77 | ||
| 78 | #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF | ||
| 76 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ | 79 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ |
| 77 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ | 80 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ |
| 78 | 81 | ||
| @@ -82,6 +85,14 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | |||
| 82 | static struct cpuidle_state *cpuidle_state_table; | 85 | static struct cpuidle_state *cpuidle_state_table; |
| 83 | 86 | ||
| 84 | /* | 87 | /* |
| 88 | * Set this flag for states where the HW flushes the TLB for us | ||
| 89 | * and so we don't need cross-calls to keep it consistent. | ||
| 90 | * If this flag is set, SW flushes the TLB, so even if the | ||
| 91 | * HW doesn't do the flushing, this flag is safe to use. | ||
| 92 | */ | ||
| 93 | #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 | ||
| 94 | |||
| 95 | /* | ||
| 85 | * States are indexed by the cstate number, | 96 | * States are indexed by the cstate number, |
| 86 | * which is also the index into the MWAIT hint array. | 97 | * which is also the index into the MWAIT hint array. |
| 87 | * Thus C0 is a dummy. | 98 | * Thus C0 is a dummy. |
| @@ -122,7 +133,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 122 | .driver_data = (void *) 0x00, | 133 | .driver_data = (void *) 0x00, |
| 123 | .flags = CPUIDLE_FLAG_TIME_VALID, | 134 | .flags = CPUIDLE_FLAG_TIME_VALID, |
| 124 | .exit_latency = 1, | 135 | .exit_latency = 1, |
| 125 | .target_residency = 4, | 136 | .target_residency = 1, |
| 126 | .enter = &intel_idle }, | 137 | .enter = &intel_idle }, |
| 127 | { /* MWAIT C2 */ | 138 | { /* MWAIT C2 */ |
| 128 | .name = "SNB-C3", | 139 | .name = "SNB-C3", |
| @@ -130,7 +141,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 130 | .driver_data = (void *) 0x10, | 141 | .driver_data = (void *) 0x10, |
| 131 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 142 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 132 | .exit_latency = 80, | 143 | .exit_latency = 80, |
| 133 | .target_residency = 160, | 144 | .target_residency = 211, |
| 134 | .enter = &intel_idle }, | 145 | .enter = &intel_idle }, |
| 135 | { /* MWAIT C3 */ | 146 | { /* MWAIT C3 */ |
| 136 | .name = "SNB-C6", | 147 | .name = "SNB-C6", |
| @@ -138,7 +149,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 138 | .driver_data = (void *) 0x20, | 149 | .driver_data = (void *) 0x20, |
| 139 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 150 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 140 | .exit_latency = 104, | 151 | .exit_latency = 104, |
| 141 | .target_residency = 208, | 152 | .target_residency = 345, |
| 142 | .enter = &intel_idle }, | 153 | .enter = &intel_idle }, |
| 143 | { /* MWAIT C4 */ | 154 | { /* MWAIT C4 */ |
| 144 | .name = "SNB-C7", | 155 | .name = "SNB-C7", |
| @@ -146,7 +157,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
| 146 | .driver_data = (void *) 0x30, | 157 | .driver_data = (void *) 0x30, |
| 147 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 158 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
| 148 | .exit_latency = 109, | 159 | .exit_latency = 109, |
| 149 | .target_residency = 300, | 160 | .target_residency = 345, |
| 150 | .enter = &intel_idle }, | 161 | .enter = &intel_idle }, |
| 151 | }; | 162 | }; |
| 152 | 163 | ||
| @@ -220,8 +231,6 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | |||
| 220 | kt_before = ktime_get_real(); | 231 | kt_before = ktime_get_real(); |
| 221 | 232 | ||
| 222 | stop_critical_timings(); | 233 | stop_critical_timings(); |
| 223 | trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu); | ||
| 224 | trace_cpu_idle((eax >> 4) + 1, cpu); | ||
| 225 | if (!need_resched()) { | 234 | if (!need_resched()) { |
| 226 | 235 | ||
| 227 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 236 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
| @@ -243,6 +252,39 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | |||
| 243 | return usec_delta; | 252 | return usec_delta; |
| 244 | } | 253 | } |
| 245 | 254 | ||
| 255 | static void __setup_broadcast_timer(void *arg) | ||
| 256 | { | ||
| 257 | unsigned long reason = (unsigned long)arg; | ||
| 258 | int cpu = smp_processor_id(); | ||
| 259 | |||
| 260 | reason = reason ? | ||
| 261 | CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; | ||
| 262 | |||
| 263 | clockevents_notify(reason, &cpu); | ||
| 264 | } | ||
| 265 | |||
| 266 | static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n, | ||
| 267 | unsigned long action, void *hcpu) | ||
| 268 | { | ||
| 269 | int hotcpu = (unsigned long)hcpu; | ||
| 270 | |||
| 271 | switch (action & 0xf) { | ||
| 272 | case CPU_ONLINE: | ||
| 273 | smp_call_function_single(hotcpu, __setup_broadcast_timer, | ||
| 274 | (void *)true, 1); | ||
| 275 | break; | ||
| 276 | case CPU_DOWN_PREPARE: | ||
| 277 | smp_call_function_single(hotcpu, __setup_broadcast_timer, | ||
| 278 | (void *)false, 1); | ||
| 279 | break; | ||
| 280 | } | ||
| 281 | return NOTIFY_OK; | ||
| 282 | } | ||
| 283 | |||
| 284 | static struct notifier_block __cpuinitdata setup_broadcast_notifier = { | ||
| 285 | .notifier_call = setup_broadcast_cpuhp_notify, | ||
| 286 | }; | ||
| 287 | |||
| 246 | /* | 288 | /* |
| 247 | * intel_idle_probe() | 289 | * intel_idle_probe() |
| 248 | */ | 290 | */ |
| @@ -305,7 +347,11 @@ static int intel_idle_probe(void) | |||
| 305 | } | 347 | } |
| 306 | 348 | ||
| 307 | if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ | 349 | if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ |
| 308 | lapic_timer_reliable_states = 0xFFFFFFFF; | 350 | lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; |
| 351 | else { | ||
| 352 | smp_call_function(__setup_broadcast_timer, (void *)true, 1); | ||
| 353 | register_cpu_notifier(&setup_broadcast_notifier); | ||
| 354 | } | ||
| 309 | 355 | ||
| 310 | pr_debug(PREFIX "v" INTEL_IDLE_VERSION | 356 | pr_debug(PREFIX "v" INTEL_IDLE_VERSION |
| 311 | " model 0x%X\n", boot_cpu_data.x86_model); | 357 | " model 0x%X\n", boot_cpu_data.x86_model); |
| @@ -403,6 +449,10 @@ static int __init intel_idle_init(void) | |||
| 403 | { | 449 | { |
| 404 | int retval; | 450 | int retval; |
| 405 | 451 | ||
| 452 | /* Do not load intel_idle at all for now if idle= is passed */ | ||
| 453 | if (boot_option_idle_override != IDLE_NO_OVERRIDE) | ||
| 454 | return -ENODEV; | ||
| 455 | |||
| 406 | retval = intel_idle_probe(); | 456 | retval = intel_idle_probe(); |
| 407 | if (retval) | 457 | if (retval) |
| 408 | return retval; | 458 | return retval; |
| @@ -428,6 +478,11 @@ static void __exit intel_idle_exit(void) | |||
| 428 | intel_idle_cpuidle_devices_uninit(); | 478 | intel_idle_cpuidle_devices_uninit(); |
| 429 | cpuidle_unregister_driver(&intel_idle_driver); | 479 | cpuidle_unregister_driver(&intel_idle_driver); |
| 430 | 480 | ||
| 481 | if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { | ||
| 482 | smp_call_function(__setup_broadcast_timer, (void *)false, 1); | ||
| 483 | unregister_cpu_notifier(&setup_broadcast_notifier); | ||
| 484 | } | ||
| 485 | |||
| 431 | return; | 486 | return; |
| 432 | } | 487 | } |
| 433 | 488 | ||
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 1be416bbbb82..36719ead50e8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -47,13 +47,7 @@ struct cpuidle_state { | |||
| 47 | 47 | ||
| 48 | /* Idle State Flags */ | 48 | /* Idle State Flags */ |
| 49 | #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ | 49 | #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ |
| 50 | #define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */ | ||
| 51 | #define CPUIDLE_FLAG_POLL (0x10) /* no latency, no savings */ | ||
| 52 | #define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */ | ||
| 53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ | ||
| 54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ | ||
| 55 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ | 50 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ |
| 56 | #define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */ | ||
| 57 | 51 | ||
| 58 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) | 52 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) |
| 59 | 53 | ||
