aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apm_32.c57
-rw-r--r--arch/x86/kernel/cpu/bugs.c27
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/process.c116
-rw-r--r--arch/x86/kernel/smpboot.c2
5 files changed, 52 insertions, 152 deletions
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index d65464e43503..9f4bc6a1164d 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -232,6 +232,7 @@
232#include <linux/acpi.h> 232#include <linux/acpi.h>
233#include <linux/syscore_ops.h> 233#include <linux/syscore_ops.h>
234#include <linux/i8253.h> 234#include <linux/i8253.h>
235#include <linux/cpuidle.h>
235 236
236#include <asm/uaccess.h> 237#include <asm/uaccess.h>
237#include <asm/desc.h> 238#include <asm/desc.h>
@@ -360,13 +361,35 @@ struct apm_user {
360 * idle percentage above which bios idle calls are done 361 * idle percentage above which bios idle calls are done
361 */ 362 */
362#ifdef CONFIG_APM_CPU_IDLE 363#ifdef CONFIG_APM_CPU_IDLE
363#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
364#define DEFAULT_IDLE_THRESHOLD 95 364#define DEFAULT_IDLE_THRESHOLD 95
365#else 365#else
366#define DEFAULT_IDLE_THRESHOLD 100 366#define DEFAULT_IDLE_THRESHOLD 100
367#endif 367#endif
368#define DEFAULT_IDLE_PERIOD (100 / 3) 368#define DEFAULT_IDLE_PERIOD (100 / 3)
369 369
370static int apm_cpu_idle(struct cpuidle_device *dev,
371 struct cpuidle_driver *drv, int index);
372
373static struct cpuidle_driver apm_idle_driver = {
374 .name = "apm_idle",
375 .owner = THIS_MODULE,
376 .en_core_tk_irqen = 1,
377 .states = {
378 { /* entry 0 is for polling */ },
379 { /* entry 1 is for APM idle */
380 .name = "APM",
381 .desc = "APM idle",
382 .flags = CPUIDLE_FLAG_TIME_VALID,
383 .exit_latency = 250, /* WAG */
384 .target_residency = 500, /* WAG */
385 .enter = &apm_cpu_idle
386 },
387 },
388 .state_count = 2,
389};
390
391static struct cpuidle_device apm_cpuidle_device;
392
370/* 393/*
371 * Local variables 394 * Local variables
372 */ 395 */
@@ -377,7 +400,6 @@ static struct {
377static int clock_slowed; 400static int clock_slowed;
378static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; 401static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
379static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; 402static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
380static int set_pm_idle;
381static int suspends_pending; 403static int suspends_pending;
382static int standbys_pending; 404static int standbys_pending;
383static int ignore_sys_suspend; 405static int ignore_sys_suspend;
@@ -884,8 +906,6 @@ static void apm_do_busy(void)
884#define IDLE_CALC_LIMIT (HZ * 100) 906#define IDLE_CALC_LIMIT (HZ * 100)
885#define IDLE_LEAKY_MAX 16 907#define IDLE_LEAKY_MAX 16
886 908
887static void (*original_pm_idle)(void) __read_mostly;
888
889/** 909/**
890 * apm_cpu_idle - cpu idling for APM capable Linux 910 * apm_cpu_idle - cpu idling for APM capable Linux
891 * 911 *
@@ -894,7 +914,8 @@ static void (*original_pm_idle)(void) __read_mostly;
894 * Furthermore it calls the system default idle routine. 914 * Furthermore it calls the system default idle routine.
895 */ 915 */
896 916
897static void apm_cpu_idle(void) 917static int apm_cpu_idle(struct cpuidle_device *dev,
918 struct cpuidle_driver *drv, int index)
898{ 919{
899 static int use_apm_idle; /* = 0 */ 920 static int use_apm_idle; /* = 0 */
900 static unsigned int last_jiffies; /* = 0 */ 921 static unsigned int last_jiffies; /* = 0 */
@@ -904,7 +925,6 @@ static void apm_cpu_idle(void)
904 unsigned int jiffies_since_last_check = jiffies - last_jiffies; 925 unsigned int jiffies_since_last_check = jiffies - last_jiffies;
905 unsigned int bucket; 926 unsigned int bucket;
906 927
907 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
908recalc: 928recalc:
909 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 929 if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
910 use_apm_idle = 0; 930 use_apm_idle = 0;
@@ -950,10 +970,7 @@ recalc:
950 break; 970 break;
951 } 971 }
952 } 972 }
953 if (original_pm_idle) 973 default_idle();
954 original_pm_idle();
955 else
956 default_idle();
957 local_irq_disable(); 974 local_irq_disable();
958 jiffies_since_last_check = jiffies - last_jiffies; 975 jiffies_since_last_check = jiffies - last_jiffies;
959 if (jiffies_since_last_check > idle_period) 976 if (jiffies_since_last_check > idle_period)
@@ -963,7 +980,7 @@ recalc:
963 if (apm_idle_done) 980 if (apm_idle_done)
964 apm_do_busy(); 981 apm_do_busy();
965 982
966 local_irq_enable(); 983 return index;
967} 984}
968 985
969/** 986/**
@@ -2381,9 +2398,9 @@ static int __init apm_init(void)
2381 if (HZ != 100) 2398 if (HZ != 100)
2382 idle_period = (idle_period * HZ) / 100; 2399 idle_period = (idle_period * HZ) / 100;
2383 if (idle_threshold < 100) { 2400 if (idle_threshold < 100) {
2384 original_pm_idle = pm_idle; 2401 if (!cpuidle_register_driver(&apm_idle_driver))
2385 pm_idle = apm_cpu_idle; 2402 if (cpuidle_register_device(&apm_cpuidle_device))
2386 set_pm_idle = 1; 2403 cpuidle_unregister_driver(&apm_idle_driver);
2387 } 2404 }
2388 2405
2389 return 0; 2406 return 0;
@@ -2393,15 +2410,9 @@ static void __exit apm_exit(void)
2393{ 2410{
2394 int error; 2411 int error;
2395 2412
2396 if (set_pm_idle) { 2413 cpuidle_unregister_device(&apm_cpuidle_device);
2397 pm_idle = original_pm_idle; 2414 cpuidle_unregister_driver(&apm_idle_driver);
2398 /* 2415
2399 * We are about to unload the current idle thread pm callback
2400 * (pm_idle), Wait for all processors to update cached/local
2401 * copies of pm_idle before proceeding.
2402 */
2403 kick_all_cpus_sync();
2404 }
2405 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) 2416 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
2406 && (apm_info.connection_version > 0x0100)) { 2417 && (apm_info.connection_version > 0x0100)) {
2407 error = apm_engage_power_management(APM_DEVICE_ALL, 0); 2418 error = apm_engage_power_management(APM_DEVICE_ALL, 0);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92dfec986a48..af6455e3fcc9 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
17#include <asm/paravirt.h> 17#include <asm/paravirt.h>
18#include <asm/alternative.h> 18#include <asm/alternative.h>
19 19
20static int __init no_halt(char *s)
21{
22 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
23 boot_cpu_data.hlt_works_ok = 0;
24 return 1;
25}
26
27__setup("no-hlt", no_halt);
28
29static int __init no_387(char *s) 20static int __init no_387(char *s)
30{ 21{
31 boot_cpu_data.hard_math = 0; 22 boot_cpu_data.hard_math = 0;
@@ -89,23 +80,6 @@ static void __init check_fpu(void)
89 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
90} 81}
91 82
92static void __init check_hlt(void)
93{
94 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
95 return;
96
97 pr_info("Checking 'hlt' instruction... ");
98 if (!boot_cpu_data.hlt_works_ok) {
99 pr_cont("disabled\n");
100 return;
101 }
102 halt();
103 halt();
104 halt();
105 halt();
106 pr_cont("OK\n");
107}
108
109/* 83/*
110 * Check whether we are able to run this kernel safely on SMP. 84 * Check whether we are able to run this kernel safely on SMP.
111 * 85 *
@@ -129,7 +103,6 @@ void __init check_bugs(void)
129 print_cpu_info(&boot_cpu_data); 103 print_cpu_info(&boot_cpu_data);
130#endif 104#endif
131 check_config(); 105 check_config();
132 check_hlt();
133 init_utsname()->machine[1] = 106 init_utsname()->machine[1] =
134 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
135 alternative_instructions(); 108 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3286a92e662a..e280253f6f94 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 seq_printf(m, 29 seq_printf(m,
30 "fdiv_bug\t: %s\n" 30 "fdiv_bug\t: %s\n"
31 "hlt_bug\t\t: %s\n"
32 "f00f_bug\t: %s\n" 31 "f00f_bug\t: %s\n"
33 "coma_bug\t: %s\n" 32 "coma_bug\t: %s\n"
34 "fpu\t\t: %s\n" 33 "fpu\t\t: %s\n"
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
36 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
37 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
38 c->fdiv_bug ? "yes" : "no", 37 c->fdiv_bug ? "yes" : "no",
39 c->hlt_works_ok ? "no" : "yes",
40 c->f00f_bug ? "yes" : "no", 38 c->f00f_bug ? "yes" : "no",
41 c->coma_bug ? "yes" : "no", 39 c->coma_bug ? "yes" : "no",
42 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index dcfc1f410dc4..14ae10031ff0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
269EXPORT_SYMBOL(boot_option_idle_override); 269EXPORT_SYMBOL(boot_option_idle_override);
270 270
271/* 271static void (*x86_idle)(void);
272 * Powermanagement idle function, if any..
273 */
274void (*pm_idle)(void);
275#ifdef CONFIG_APM_MODULE
276EXPORT_SYMBOL(pm_idle);
277#endif
278 272
279#ifndef CONFIG_SMP 273#ifndef CONFIG_SMP
280static inline void play_dead(void) 274static inline void play_dead(void)
@@ -351,7 +345,7 @@ void cpu_idle(void)
351 rcu_idle_enter(); 345 rcu_idle_enter();
352 346
353 if (cpuidle_idle_call()) 347 if (cpuidle_idle_call())
354 pm_idle(); 348 x86_idle();
355 349
356 rcu_idle_exit(); 350 rcu_idle_exit();
357 start_critical_timings(); 351 start_critical_timings();
@@ -394,14 +388,16 @@ void default_idle(void)
394EXPORT_SYMBOL(default_idle); 388EXPORT_SYMBOL(default_idle);
395#endif 389#endif
396 390
397bool set_pm_idle_to_default(void) 391#ifdef CONFIG_XEN
392bool xen_set_default_idle(void)
398{ 393{
399 bool ret = !!pm_idle; 394 bool ret = !!x86_idle;
400 395
401 pm_idle = default_idle; 396 x86_idle = default_idle;
402 397
403 return ret; 398 return ret;
404} 399}
400#endif
405void stop_this_cpu(void *dummy) 401void stop_this_cpu(void *dummy)
406{ 402{
407 local_irq_disable(); 403 local_irq_disable();
@@ -411,29 +407,8 @@ void stop_this_cpu(void *dummy)
411 set_cpu_online(smp_processor_id(), false); 407 set_cpu_online(smp_processor_id(), false);
412 disable_local_APIC(); 408 disable_local_APIC();
413 409
414 for (;;) { 410 for (;;)
415 if (hlt_works(smp_processor_id())) 411 halt();
416 halt();
417 }
418}
419
420/* Default MONITOR/MWAIT with no hints, used for default C1 state */
421static void mwait_idle(void)
422{
423 if (!need_resched()) {
424 trace_cpu_idle_rcuidle(1, smp_processor_id());
425 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
426 clflush((void *)&current_thread_info()->flags);
427
428 __monitor((void *)&current_thread_info()->flags, 0, 0);
429 smp_mb();
430 if (!need_resched())
431 __sti_mwait(0, 0);
432 else
433 local_irq_enable();
434 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
435 } else
436 local_irq_enable();
437} 412}
438 413
439/* 414/*
@@ -450,53 +425,6 @@ static void poll_idle(void)
450 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
451} 426}
452 427
453/*
454 * mwait selection logic:
455 *
456 * It depends on the CPU. For AMD CPUs that support MWAIT this is
457 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
458 * then depend on a clock divisor and current Pstate of the core. If
459 * all cores of a processor are in halt state (C1) the processor can
460 * enter the C1E (C1 enhanced) state. If mwait is used this will never
461 * happen.
462 *
463 * idle=mwait overrides this decision and forces the usage of mwait.
464 */
465
466#define MWAIT_INFO 0x05
467#define MWAIT_ECX_EXTENDED_INFO 0x01
468#define MWAIT_EDX_C1 0xf0
469
470int mwait_usable(const struct cpuinfo_x86 *c)
471{
472 u32 eax, ebx, ecx, edx;
473
474 /* Use mwait if idle=mwait boot option is given */
475 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
476 return 1;
477
478 /*
479 * Any idle= boot option other than idle=mwait means that we must not
480 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
481 */
482 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
483 return 0;
484
485 if (c->cpuid_level < MWAIT_INFO)
486 return 0;
487
488 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
489 /* Check, whether EDX has extended info about MWAIT */
490 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
491 return 1;
492
493 /*
494 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
495 * C1 supports MWAIT
496 */
497 return (edx & MWAIT_EDX_C1);
498}
499
500bool amd_e400_c1e_detected; 428bool amd_e400_c1e_detected;
501EXPORT_SYMBOL(amd_e400_c1e_detected); 429EXPORT_SYMBOL(amd_e400_c1e_detected);
502 430
@@ -561,31 +489,24 @@ static void amd_e400_idle(void)
561void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
562{ 490{
563#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
564 if (pm_idle == poll_idle && smp_num_siblings > 1) { 492 if (x86_idle == poll_idle && smp_num_siblings > 1)
565 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
566 }
567#endif 494#endif
568 if (pm_idle) 495 if (x86_idle)
569 return; 496 return;
570 497
571 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 498 if (cpu_has_amd_erratum(amd_erratum_400)) {
572 /*
573 * One CPU supports mwait => All CPUs supports mwait
574 */
575 pr_info("using mwait in idle threads\n");
576 pm_idle = mwait_idle;
577 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
578 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 499 /* E400: APIC timer interrupt does not wake up CPU from C1e */
579 pr_info("using AMD E400 aware idle routine\n"); 500 pr_info("using AMD E400 aware idle routine\n");
580 pm_idle = amd_e400_idle; 501 x86_idle = amd_e400_idle;
581 } else 502 } else
582 pm_idle = default_idle; 503 x86_idle = default_idle;
583} 504}
584 505
585void __init init_amd_e400_c1e_mask(void) 506void __init init_amd_e400_c1e_mask(void)
586{ 507{
587 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ 508 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
588 if (pm_idle == amd_e400_idle) 509 if (x86_idle == amd_e400_idle)
589 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); 510 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
590} 511}
591 512
@@ -596,11 +517,8 @@ static int __init idle_setup(char *str)
596 517
597 if (!strcmp(str, "poll")) { 518 if (!strcmp(str, "poll")) {
598 pr_info("using polling idle threads\n"); 519 pr_info("using polling idle threads\n");
599 pm_idle = poll_idle; 520 x86_idle = poll_idle;
600 boot_option_idle_override = IDLE_POLL; 521 boot_option_idle_override = IDLE_POLL;
601 } else if (!strcmp(str, "mwait")) {
602 boot_option_idle_override = IDLE_FORCE_MWAIT;
603 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
604 } else if (!strcmp(str, "halt")) { 522 } else if (!strcmp(str, "halt")) {
605 /* 523 /*
606 * When the boot option of idle=halt is added, halt is 524 * When the boot option of idle=halt is added, halt is
@@ -609,7 +527,7 @@ static int __init idle_setup(char *str)
609 * To continue to load the CPU idle driver, don't touch 527 * To continue to load the CPU idle driver, don't touch
610 * the boot_option_idle_override. 528 * the boot_option_idle_override.
611 */ 529 */
612 pm_idle = default_idle; 530 x86_idle = default_idle;
613 boot_option_idle_override = IDLE_HALT; 531 boot_option_idle_override = IDLE_HALT;
614 } else if (!strcmp(str, "nomwait")) { 532 } else if (!strcmp(str, "nomwait")) {
615 /* 533 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed0fe385289d..a6ceaedc396a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void)
1369 void *mwait_ptr; 1369 void *mwait_ptr;
1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1371 1371
1372 if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) 1372 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return; 1373 return;
1374 if (!this_cpu_has(X86_FEATURE_CLFLSH)) 1374 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1375 return; 1375 return;