diff options
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r-- | arch/x86/kernel/process.c | 105 |
1 files changed, 57 insertions, 48 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 57d1868a86aa..e1ba8cb24e4e 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
15 | #include <trace/events/power.h> | 15 | #include <trace/events/power.h> |
16 | #include <linux/hw_breakpoint.h> | 16 | #include <linux/hw_breakpoint.h> |
17 | #include <asm/cpu.h> | ||
17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
18 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
19 | #include <asm/syscalls.h> | 20 | #include <asm/syscalls.h> |
@@ -22,11 +23,6 @@ | |||
22 | #include <asm/i387.h> | 23 | #include <asm/i387.h> |
23 | #include <asm/debugreg.h> | 24 | #include <asm/debugreg.h> |
24 | 25 | ||
25 | unsigned long idle_halt; | ||
26 | EXPORT_SYMBOL(idle_halt); | ||
27 | unsigned long idle_nomwait; | ||
28 | EXPORT_SYMBOL(idle_nomwait); | ||
29 | |||
30 | struct kmem_cache *task_xstate_cachep; | 26 | struct kmem_cache *task_xstate_cachep; |
31 | EXPORT_SYMBOL_GPL(task_xstate_cachep); | 27 | EXPORT_SYMBOL_GPL(task_xstate_cachep); |
32 | 28 | ||
@@ -91,27 +87,33 @@ void exit_thread(void) | |||
91 | void show_regs(struct pt_regs *regs) | 87 | void show_regs(struct pt_regs *regs) |
92 | { | 88 | { |
93 | show_registers(regs); | 89 | show_registers(regs); |
94 | show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), | 90 | show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0); |
95 | regs->bp); | ||
96 | } | 91 | } |
97 | 92 | ||
98 | void show_regs_common(void) | 93 | void show_regs_common(void) |
99 | { | 94 | { |
100 | const char *board, *product; | 95 | const char *vendor, *product, *board; |
101 | 96 | ||
102 | board = dmi_get_system_info(DMI_BOARD_NAME); | 97 | vendor = dmi_get_system_info(DMI_SYS_VENDOR); |
103 | if (!board) | 98 | if (!vendor) |
104 | board = ""; | 99 | vendor = ""; |
105 | product = dmi_get_system_info(DMI_PRODUCT_NAME); | 100 | product = dmi_get_system_info(DMI_PRODUCT_NAME); |
106 | if (!product) | 101 | if (!product) |
107 | product = ""; | 102 | product = ""; |
108 | 103 | ||
104 | /* Board Name is optional */ | ||
105 | board = dmi_get_system_info(DMI_BOARD_NAME); | ||
106 | |||
109 | printk(KERN_CONT "\n"); | 107 | printk(KERN_CONT "\n"); |
110 | printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", | 108 | printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", |
111 | current->pid, current->comm, print_tainted(), | 109 | current->pid, current->comm, print_tainted(), |
112 | init_utsname()->release, | 110 | init_utsname()->release, |
113 | (int)strcspn(init_utsname()->version, " "), | 111 | (int)strcspn(init_utsname()->version, " "), |
114 | init_utsname()->version, board, product); | 112 | init_utsname()->version); |
113 | printk(KERN_CONT " %s %s", vendor, product); | ||
114 | if (board) | ||
115 | printk(KERN_CONT "/%s", board); | ||
116 | printk(KERN_CONT "\n"); | ||
115 | } | 117 | } |
116 | 118 | ||
117 | void flush_thread(void) | 119 | void flush_thread(void) |
@@ -328,14 +330,16 @@ long sys_execve(const char __user *name, | |||
328 | /* | 330 | /* |
329 | * Idle related variables and functions | 331 | * Idle related variables and functions |
330 | */ | 332 | */ |
331 | unsigned long boot_option_idle_override = 0; | 333 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; |
332 | EXPORT_SYMBOL(boot_option_idle_override); | 334 | EXPORT_SYMBOL(boot_option_idle_override); |
333 | 335 | ||
334 | /* | 336 | /* |
335 | * Powermanagement idle function, if any.. | 337 | * Powermanagement idle function, if any.. |
336 | */ | 338 | */ |
337 | void (*pm_idle)(void); | 339 | void (*pm_idle)(void); |
340 | #ifdef CONFIG_APM_MODULE | ||
338 | EXPORT_SYMBOL(pm_idle); | 341 | EXPORT_SYMBOL(pm_idle); |
342 | #endif | ||
339 | 343 | ||
340 | #ifdef CONFIG_X86_32 | 344 | #ifdef CONFIG_X86_32 |
341 | /* | 345 | /* |
@@ -374,6 +378,7 @@ void default_idle(void) | |||
374 | { | 378 | { |
375 | if (hlt_use_halt()) { | 379 | if (hlt_use_halt()) { |
376 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); | 380 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); |
381 | trace_cpu_idle(1, smp_processor_id()); | ||
377 | current_thread_info()->status &= ~TS_POLLING; | 382 | current_thread_info()->status &= ~TS_POLLING; |
378 | /* | 383 | /* |
379 | * TS_POLLING-cleared state must be visible before we | 384 | * TS_POLLING-cleared state must be visible before we |
@@ -386,6 +391,8 @@ void default_idle(void) | |||
386 | else | 391 | else |
387 | local_irq_enable(); | 392 | local_irq_enable(); |
388 | current_thread_info()->status |= TS_POLLING; | 393 | current_thread_info()->status |= TS_POLLING; |
394 | trace_power_end(smp_processor_id()); | ||
395 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | ||
389 | } else { | 396 | } else { |
390 | local_irq_enable(); | 397 | local_irq_enable(); |
391 | /* loop is done by the caller */ | 398 | /* loop is done by the caller */ |
@@ -443,9 +450,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); | |||
443 | */ | 450 | */ |
444 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | 451 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) |
445 | { | 452 | { |
446 | trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); | ||
447 | if (!need_resched()) { | 453 | if (!need_resched()) { |
448 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | 454 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) |
449 | clflush((void *)¤t_thread_info()->flags); | 455 | clflush((void *)¤t_thread_info()->flags); |
450 | 456 | ||
451 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 457 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
@@ -460,7 +466,8 @@ static void mwait_idle(void) | |||
460 | { | 466 | { |
461 | if (!need_resched()) { | 467 | if (!need_resched()) { |
462 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); | 468 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); |
463 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | 469 | trace_cpu_idle(1, smp_processor_id()); |
470 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | ||
464 | clflush((void *)¤t_thread_info()->flags); | 471 | clflush((void *)¤t_thread_info()->flags); |
465 | 472 | ||
466 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 473 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
@@ -469,6 +476,8 @@ static void mwait_idle(void) | |||
469 | __sti_mwait(0, 0); | 476 | __sti_mwait(0, 0); |
470 | else | 477 | else |
471 | local_irq_enable(); | 478 | local_irq_enable(); |
479 | trace_power_end(smp_processor_id()); | ||
480 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | ||
472 | } else | 481 | } else |
473 | local_irq_enable(); | 482 | local_irq_enable(); |
474 | } | 483 | } |
@@ -481,10 +490,12 @@ static void mwait_idle(void) | |||
481 | static void poll_idle(void) | 490 | static void poll_idle(void) |
482 | { | 491 | { |
483 | trace_power_start(POWER_CSTATE, 0, smp_processor_id()); | 492 | trace_power_start(POWER_CSTATE, 0, smp_processor_id()); |
493 | trace_cpu_idle(0, smp_processor_id()); | ||
484 | local_irq_enable(); | 494 | local_irq_enable(); |
485 | while (!need_resched()) | 495 | while (!need_resched()) |
486 | cpu_relax(); | 496 | cpu_relax(); |
487 | trace_power_end(0); | 497 | trace_power_end(smp_processor_id()); |
498 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | ||
488 | } | 499 | } |
489 | 500 | ||
490 | /* | 501 | /* |
@@ -499,17 +510,16 @@ static void poll_idle(void) | |||
499 | * | 510 | * |
500 | * idle=mwait overrides this decision and forces the usage of mwait. | 511 | * idle=mwait overrides this decision and forces the usage of mwait. |
501 | */ | 512 | */ |
502 | static int __cpuinitdata force_mwait; | ||
503 | 513 | ||
504 | #define MWAIT_INFO 0x05 | 514 | #define MWAIT_INFO 0x05 |
505 | #define MWAIT_ECX_EXTENDED_INFO 0x01 | 515 | #define MWAIT_ECX_EXTENDED_INFO 0x01 |
506 | #define MWAIT_EDX_C1 0xf0 | 516 | #define MWAIT_EDX_C1 0xf0 |
507 | 517 | ||
508 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | 518 | int mwait_usable(const struct cpuinfo_x86 *c) |
509 | { | 519 | { |
510 | u32 eax, ebx, ecx, edx; | 520 | u32 eax, ebx, ecx, edx; |
511 | 521 | ||
512 | if (force_mwait) | 522 | if (boot_option_idle_override == IDLE_FORCE_MWAIT) |
513 | return 1; | 523 | return 1; |
514 | 524 | ||
515 | if (c->cpuid_level < MWAIT_INFO) | 525 | if (c->cpuid_level < MWAIT_INFO) |
@@ -527,45 +537,45 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | |||
527 | return (edx & MWAIT_EDX_C1); | 537 | return (edx & MWAIT_EDX_C1); |
528 | } | 538 | } |
529 | 539 | ||
530 | bool c1e_detected; | 540 | bool amd_e400_c1e_detected; |
531 | EXPORT_SYMBOL(c1e_detected); | 541 | EXPORT_SYMBOL(amd_e400_c1e_detected); |
532 | 542 | ||
533 | static cpumask_var_t c1e_mask; | 543 | static cpumask_var_t amd_e400_c1e_mask; |
534 | 544 | ||
535 | void c1e_remove_cpu(int cpu) | 545 | void amd_e400_remove_cpu(int cpu) |
536 | { | 546 | { |
537 | if (c1e_mask != NULL) | 547 | if (amd_e400_c1e_mask != NULL) |
538 | cpumask_clear_cpu(cpu, c1e_mask); | 548 | cpumask_clear_cpu(cpu, amd_e400_c1e_mask); |
539 | } | 549 | } |
540 | 550 | ||
541 | /* | 551 | /* |
542 | * C1E aware idle routine. We check for C1E active in the interrupt | 552 | * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt |
543 | * pending message MSR. If we detect C1E, then we handle it the same | 553 | * pending message MSR. If we detect C1E, then we handle it the same |
544 | * way as C3 power states (local apic timer and TSC stop) | 554 | * way as C3 power states (local apic timer and TSC stop) |
545 | */ | 555 | */ |
546 | static void c1e_idle(void) | 556 | static void amd_e400_idle(void) |
547 | { | 557 | { |
548 | if (need_resched()) | 558 | if (need_resched()) |
549 | return; | 559 | return; |
550 | 560 | ||
551 | if (!c1e_detected) { | 561 | if (!amd_e400_c1e_detected) { |
552 | u32 lo, hi; | 562 | u32 lo, hi; |
553 | 563 | ||
554 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | 564 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); |
555 | 565 | ||
556 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | 566 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { |
557 | c1e_detected = true; | 567 | amd_e400_c1e_detected = true; |
558 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 568 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
559 | mark_tsc_unstable("TSC halt in AMD C1E"); | 569 | mark_tsc_unstable("TSC halt in AMD C1E"); |
560 | printk(KERN_INFO "System has AMD C1E enabled\n"); | 570 | printk(KERN_INFO "System has AMD C1E enabled\n"); |
561 | } | 571 | } |
562 | } | 572 | } |
563 | 573 | ||
564 | if (c1e_detected) { | 574 | if (amd_e400_c1e_detected) { |
565 | int cpu = smp_processor_id(); | 575 | int cpu = smp_processor_id(); |
566 | 576 | ||
567 | if (!cpumask_test_cpu(cpu, c1e_mask)) { | 577 | if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { |
568 | cpumask_set_cpu(cpu, c1e_mask); | 578 | cpumask_set_cpu(cpu, amd_e400_c1e_mask); |
569 | /* | 579 | /* |
570 | * Force broadcast so ACPI can not interfere. | 580 | * Force broadcast so ACPI can not interfere. |
571 | */ | 581 | */ |
@@ -608,17 +618,17 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
608 | pm_idle = mwait_idle; | 618 | pm_idle = mwait_idle; |
609 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { | 619 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { |
610 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ | 620 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ |
611 | printk(KERN_INFO "using C1E aware idle routine\n"); | 621 | printk(KERN_INFO "using AMD E400 aware idle routine\n"); |
612 | pm_idle = c1e_idle; | 622 | pm_idle = amd_e400_idle; |
613 | } else | 623 | } else |
614 | pm_idle = default_idle; | 624 | pm_idle = default_idle; |
615 | } | 625 | } |
616 | 626 | ||
617 | void __init init_c1e_mask(void) | 627 | void __init init_amd_e400_c1e_mask(void) |
618 | { | 628 | { |
619 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | 629 | /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ |
620 | if (pm_idle == c1e_idle) | 630 | if (pm_idle == amd_e400_idle) |
621 | zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); | 631 | zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); |
622 | } | 632 | } |
623 | 633 | ||
624 | static int __init idle_setup(char *str) | 634 | static int __init idle_setup(char *str) |
@@ -629,9 +639,11 @@ static int __init idle_setup(char *str) | |||
629 | if (!strcmp(str, "poll")) { | 639 | if (!strcmp(str, "poll")) { |
630 | printk("using polling idle threads.\n"); | 640 | printk("using polling idle threads.\n"); |
631 | pm_idle = poll_idle; | 641 | pm_idle = poll_idle; |
632 | } else if (!strcmp(str, "mwait")) | 642 | boot_option_idle_override = IDLE_POLL; |
633 | force_mwait = 1; | 643 | } else if (!strcmp(str, "mwait")) { |
634 | else if (!strcmp(str, "halt")) { | 644 | boot_option_idle_override = IDLE_FORCE_MWAIT; |
645 | WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n"); | ||
646 | } else if (!strcmp(str, "halt")) { | ||
635 | /* | 647 | /* |
636 | * When the boot option of idle=halt is added, halt is | 648 | * When the boot option of idle=halt is added, halt is |
637 | * forced to be used for CPU idle. In such case CPU C2/C3 | 649 | * forced to be used for CPU idle. In such case CPU C2/C3 |
@@ -640,8 +652,7 @@ static int __init idle_setup(char *str) | |||
640 | * the boot_option_idle_override. | 652 | * the boot_option_idle_override. |
641 | */ | 653 | */ |
642 | pm_idle = default_idle; | 654 | pm_idle = default_idle; |
643 | idle_halt = 1; | 655 | boot_option_idle_override = IDLE_HALT; |
644 | return 0; | ||
645 | } else if (!strcmp(str, "nomwait")) { | 656 | } else if (!strcmp(str, "nomwait")) { |
646 | /* | 657 | /* |
647 | * If the boot option of "idle=nomwait" is added, | 658 | * If the boot option of "idle=nomwait" is added, |
@@ -649,12 +660,10 @@ static int __init idle_setup(char *str) | |||
649 | * states. In such case it won't touch the variable | 660 | * states. In such case it won't touch the variable |
650 | * of boot_option_idle_override. | 661 | * of boot_option_idle_override. |
651 | */ | 662 | */ |
652 | idle_nomwait = 1; | 663 | boot_option_idle_override = IDLE_NOMWAIT; |
653 | return 0; | ||
654 | } else | 664 | } else |
655 | return -1; | 665 | return -1; |
656 | 666 | ||
657 | boot_option_idle_override = 1; | ||
658 | return 0; | 667 | return 0; |
659 | } | 668 | } |
660 | early_param("idle", idle_setup); | 669 | early_param("idle", idle_setup); |