aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt11
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/kernel/cpu/bugs.c27
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/process.c89
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/xen/setup.c5
-rw-r--r--drivers/acpi/processor_idle.c1
8 files changed, 15 insertions, 140 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 363e348bff9b..109ee45cf20d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1039,16 +1039,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1039 Claim all unknown PCI IDE storage controllers. 1039 Claim all unknown PCI IDE storage controllers.
1040 1040
1041 idle= [X86] 1041 idle= [X86]
1042 Format: idle=poll, idle=mwait, idle=halt, idle=nomwait 1042 Format: idle=poll, idle=halt, idle=nomwait
1043 Poll forces a polling idle loop that can slightly 1043 Poll forces a polling idle loop that can slightly
1044 improve the performance of waking up a idle CPU, but 1044 improve the performance of waking up a idle CPU, but
1045 will use a lot of power and make the system run hot. 1045 will use a lot of power and make the system run hot.
1046 Not recommended. 1046 Not recommended.
1047 idle=mwait: On systems which support MONITOR/MWAIT but
1048 the kernel chose to not use it because it doesn't save
1049 as much power as a normal idle loop, use the
1050 MONITOR/MWAIT idle loop anyways. Performance should be
1051 the same as idle=poll.
1052 idle=halt: Halt is forced to be used for CPU idle. 1047 idle=halt: Halt is forced to be used for CPU idle.
1053 In such case C2/C3 won't be used again. 1048 In such case C2/C3 won't be used again.
1054 idle=nomwait: Disable mwait for CPU C-states 1049 idle=nomwait: Disable mwait for CPU C-states
@@ -1886,10 +1881,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1886 wfi(ARM) instruction doesn't work correctly and not to 1881 wfi(ARM) instruction doesn't work correctly and not to
1887 use it. This is also useful when using JTAG debugger. 1882 use it. This is also useful when using JTAG debugger.
1888 1883
1889 no-hlt [BUGS=X86-32] Tells the kernel that the hlt
1890 instruction doesn't work correctly and not to
1891 use it.
1892
1893 no_file_caps Tells the kernel not to honor file capabilities. The 1884 no_file_caps Tells the kernel not to honor file capabilities. The
1894 only way then for a file to be executed with privilege 1885 only way then for a file to be executed with privilege
1895 is to be setuid root or executed by root. 1886 is to be setuid root or executed by root.
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 888184b2fc85..b9e7d279f8ef 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,7 +89,6 @@ struct cpuinfo_x86 {
89 char wp_works_ok; /* It doesn't on 386's */ 89 char wp_works_ok; /* It doesn't on 386's */
90 90
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hlt_works_ok;
93 char hard_math; 92 char hard_math;
94 char rfu; 93 char rfu;
95 char fdiv_bug; 94 char fdiv_bug;
@@ -165,15 +164,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
165 164
166extern const struct seq_operations cpuinfo_op; 165extern const struct seq_operations cpuinfo_op;
167 166
168static inline int hlt_works(int cpu)
169{
170#ifdef CONFIG_X86_32
171 return cpu_data(cpu).hlt_works_ok;
172#else
173 return 1;
174#endif
175}
176
177#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 167#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
178 168
179extern void cpu_detect(struct cpuinfo_x86 *c); 169extern void cpu_detect(struct cpuinfo_x86 *c);
@@ -725,7 +715,7 @@ extern unsigned long boot_option_idle_override;
725extern bool amd_e400_c1e_detected; 715extern bool amd_e400_c1e_detected;
726 716
727enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 717enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
728 IDLE_POLL, IDLE_FORCE_MWAIT}; 718 IDLE_POLL};
729 719
730extern void enable_sep_cpu(void); 720extern void enable_sep_cpu(void);
731extern int sysenter_setup(void); 721extern int sysenter_setup(void);
@@ -998,7 +988,11 @@ extern unsigned long arch_align_stack(unsigned long sp);
998extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 988extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
999 989
1000void default_idle(void); 990void default_idle(void);
1001bool set_pm_idle_to_default(void); 991#ifdef CONFIG_XEN
992bool xen_set_default_idle(void);
993#else
994#define xen_set_default_idle 0
995#endif
1002 996
1003void stop_this_cpu(void *dummy); 997void stop_this_cpu(void *dummy);
1004 998
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92dfec986a48..af6455e3fcc9 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
17#include <asm/paravirt.h> 17#include <asm/paravirt.h>
18#include <asm/alternative.h> 18#include <asm/alternative.h>
19 19
20static int __init no_halt(char *s)
21{
22 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
23 boot_cpu_data.hlt_works_ok = 0;
24 return 1;
25}
26
27__setup("no-hlt", no_halt);
28
29static int __init no_387(char *s) 20static int __init no_387(char *s)
30{ 21{
31 boot_cpu_data.hard_math = 0; 22 boot_cpu_data.hard_math = 0;
@@ -89,23 +80,6 @@ static void __init check_fpu(void)
89 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
90} 81}
91 82
92static void __init check_hlt(void)
93{
94 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
95 return;
96
97 pr_info("Checking 'hlt' instruction... ");
98 if (!boot_cpu_data.hlt_works_ok) {
99 pr_cont("disabled\n");
100 return;
101 }
102 halt();
103 halt();
104 halt();
105 halt();
106 pr_cont("OK\n");
107}
108
109/* 83/*
110 * Check whether we are able to run this kernel safely on SMP. 84 * Check whether we are able to run this kernel safely on SMP.
111 * 85 *
@@ -129,7 +103,6 @@ void __init check_bugs(void)
129 print_cpu_info(&boot_cpu_data); 103 print_cpu_info(&boot_cpu_data);
130#endif 104#endif
131 check_config(); 105 check_config();
132 check_hlt();
133 init_utsname()->machine[1] = 106 init_utsname()->machine[1] =
134 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
135 alternative_instructions(); 108 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3286a92e662a..e280253f6f94 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 seq_printf(m, 29 seq_printf(m,
30 "fdiv_bug\t: %s\n" 30 "fdiv_bug\t: %s\n"
31 "hlt_bug\t\t: %s\n"
32 "f00f_bug\t: %s\n" 31 "f00f_bug\t: %s\n"
33 "coma_bug\t: %s\n" 32 "coma_bug\t: %s\n"
34 "fpu\t\t: %s\n" 33 "fpu\t\t: %s\n"
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
36 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
37 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
38 c->fdiv_bug ? "yes" : "no", 37 c->fdiv_bug ? "yes" : "no",
39 c->hlt_works_ok ? "no" : "yes",
40 c->f00f_bug ? "yes" : "no", 38 c->f00f_bug ? "yes" : "no",
41 c->coma_bug ? "yes" : "no", 39 c->coma_bug ? "yes" : "no",
42 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ceb05db59be1..b11719ea2f7b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -390,7 +390,8 @@ void default_idle(void)
390EXPORT_SYMBOL(default_idle); 390EXPORT_SYMBOL(default_idle);
391#endif 391#endif
392 392
393bool set_pm_idle_to_default(void) 393#ifdef CONFIG_XEN
394bool xen_set_default_idle(void)
394{ 395{
395 bool ret = !!x86_idle; 396 bool ret = !!x86_idle;
396 397
@@ -398,6 +399,7 @@ bool set_pm_idle_to_default(void)
398 399
399 return ret; 400 return ret;
400} 401}
402#endif
401void stop_this_cpu(void *dummy) 403void stop_this_cpu(void *dummy)
402{ 404{
403 local_irq_disable(); 405 local_irq_disable();
@@ -407,31 +409,8 @@ void stop_this_cpu(void *dummy)
407 set_cpu_online(smp_processor_id(), false); 409 set_cpu_online(smp_processor_id(), false);
408 disable_local_APIC(); 410 disable_local_APIC();
409 411
410 for (;;) { 412 for (;;)
411 if (hlt_works(smp_processor_id())) 413 halt();
412 halt();
413 }
414}
415
416/* Default MONITOR/MWAIT with no hints, used for default C1 state */
417static void mwait_idle(void)
418{
419 if (!need_resched()) {
420 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
421 trace_cpu_idle_rcuidle(1, smp_processor_id());
422 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
423 clflush((void *)&current_thread_info()->flags);
424
425 __monitor((void *)&current_thread_info()->flags, 0, 0);
426 smp_mb();
427 if (!need_resched())
428 __sti_mwait(0, 0);
429 else
430 local_irq_enable();
431 trace_power_end_rcuidle(smp_processor_id());
432 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
433 } else
434 local_irq_enable();
435} 414}
436 415
437/* 416/*
@@ -450,53 +429,6 @@ static void poll_idle(void)
450 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 429 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
451} 430}
452 431
453/*
454 * mwait selection logic:
455 *
456 * It depends on the CPU. For AMD CPUs that support MWAIT this is
457 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
458 * then depend on a clock divisor and current Pstate of the core. If
459 * all cores of a processor are in halt state (C1) the processor can
460 * enter the C1E (C1 enhanced) state. If mwait is used this will never
461 * happen.
462 *
463 * idle=mwait overrides this decision and forces the usage of mwait.
464 */
465
466#define MWAIT_INFO 0x05
467#define MWAIT_ECX_EXTENDED_INFO 0x01
468#define MWAIT_EDX_C1 0xf0
469
470int mwait_usable(const struct cpuinfo_x86 *c)
471{
472 u32 eax, ebx, ecx, edx;
473
474 /* Use mwait if idle=mwait boot option is given */
475 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
476 return 1;
477
478 /*
479 * Any idle= boot option other than idle=mwait means that we must not
480 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
481 */
482 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
483 return 0;
484
485 if (c->cpuid_level < MWAIT_INFO)
486 return 0;
487
488 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
489 /* Check, whether EDX has extended info about MWAIT */
490 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
491 return 1;
492
493 /*
494 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
495 * C1 supports MWAIT
496 */
497 return (edx & MWAIT_EDX_C1);
498}
499
500bool amd_e400_c1e_detected; 432bool amd_e400_c1e_detected;
501EXPORT_SYMBOL(amd_e400_c1e_detected); 433EXPORT_SYMBOL(amd_e400_c1e_detected);
502 434
@@ -567,13 +499,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
567 if (x86_idle) 499 if (x86_idle)
568 return; 500 return;
569 501
570 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 502 if (cpu_has_amd_erratum(amd_erratum_400)) {
571 /*
572 * One CPU supports mwait => All CPUs supports mwait
573 */
574 pr_info("using mwait in idle threads\n");
575 x86_idle = mwait_idle;
576 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
577 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 503 /* E400: APIC timer interrupt does not wake up CPU from C1e */
578 pr_info("using AMD E400 aware idle routine\n"); 504 pr_info("using AMD E400 aware idle routine\n");
579 x86_idle = amd_e400_idle; 505 x86_idle = amd_e400_idle;
@@ -597,9 +523,6 @@ static int __init idle_setup(char *str)
597 pr_info("using polling idle threads\n"); 523 pr_info("using polling idle threads\n");
598 x86_idle = poll_idle; 524 x86_idle = poll_idle;
599 boot_option_idle_override = IDLE_POLL; 525 boot_option_idle_override = IDLE_POLL;
600 } else if (!strcmp(str, "mwait")) {
601 boot_option_idle_override = IDLE_FORCE_MWAIT;
602 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
603 } else if (!strcmp(str, "halt")) { 526 } else if (!strcmp(str, "halt")) {
604 /* 527 /*
605 * When the boot option of idle=halt is added, halt is 528 * When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed0fe385289d..a6ceaedc396a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void)
1369 void *mwait_ptr; 1369 void *mwait_ptr;
1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1371 1371
1372 if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) 1372 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return; 1373 return;
1374 if (!this_cpu_has(X86_FEATURE_CLFLSH)) 1374 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1375 return; 1375 return;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8971a26d21ab..94eac5c85cdc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,12 +556,9 @@ void __init xen_arch_setup(void)
556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); 556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
557 557
558 /* Set up idle, making sure it calls safe_halt() pvop */ 558 /* Set up idle, making sure it calls safe_halt() pvop */
559#ifdef CONFIG_X86_32
560 boot_cpu_data.hlt_works_ok = 1;
561#endif
562 disable_cpuidle(); 559 disable_cpuidle();
563 disable_cpufreq(); 560 disable_cpufreq();
564 WARN_ON(set_pm_idle_to_default()); 561 WARN_ON(xen_set_default_idle());
565 fiddle_vdso(); 562 fiddle_vdso();
566#ifdef CONFIG_NUMA 563#ifdef CONFIG_NUMA
567 numa_off = 1; 564 numa_off = 1;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 8b433cb08a33..fc95308e9a11 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -71,7 +71,6 @@ static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX];
71static int disabled_by_idle_boot_param(void) 71static int disabled_by_idle_boot_param(void)
72{ 72{
73 return boot_option_idle_override == IDLE_POLL || 73 return boot_option_idle_override == IDLE_POLL ||
74 boot_option_idle_override == IDLE_FORCE_MWAIT ||
75 boot_option_idle_override == IDLE_HALT; 74 boot_option_idle_override == IDLE_HALT;
76} 75}
77 76