diff options
author | Len Brown <len.brown@intel.com> | 2011-04-01 16:59:53 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2011-05-29 03:38:57 -0400 |
commit | 02c68a02018669d1817c43c42de800975cbec467 (patch) | |
tree | 9e02b7913f650492fcdbb78cd6e8d3ff3160e43c /arch | |
parent | 333c5ae9948194428fe6c5ef5c088304fc98263b (diff) |
x86 idle: clarify AMD erratum 400 workaround
The workaround for AMD erratum 400 uses the term "c1e" falsely suggesting:
1. Intel C1E is somehow involved
2. All AMD processors with C1E are involved
Use the string "amd_c1e" instead of simply "c1e" to clarify that
this workaround is specific to AMD's version of C1E.
Use the string "e400" to clarify that the workaround is specific
to AMD processors with Erratum 400.
This patch is text-substitution only, with no functional change.
cc: x86@kernel.org
Acked-by: Borislav Petkov <borislav.petkov@amd.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/acpi.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/idle.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 38 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 |
6 files changed, 25 insertions, 25 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 4ea15ca89b2b..52fd57f95c50 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -138,7 +138,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
138 | boot_cpu_data.x86_model <= 0x05 && | 138 | boot_cpu_data.x86_model <= 0x05 && |
139 | boot_cpu_data.x86_mask < 0x0A) | 139 | boot_cpu_data.x86_mask < 0x0A) |
140 | return 1; | 140 | return 1; |
141 | else if (c1e_detected) | 141 | else if (amd_e400_c1e_detected) |
142 | return 1; | 142 | return 1; |
143 | else | 143 | else |
144 | return max_cstate; | 144 | return max_cstate; |
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h index 38d87379e270..f49253d75710 100644 --- a/arch/x86/include/asm/idle.h +++ b/arch/x86/include/asm/idle.h | |||
@@ -16,6 +16,6 @@ static inline void enter_idle(void) { } | |||
16 | static inline void exit_idle(void) { } | 16 | static inline void exit_idle(void) { } |
17 | #endif /* CONFIG_X86_64 */ | 17 | #endif /* CONFIG_X86_64 */ |
18 | 18 | ||
19 | void c1e_remove_cpu(int cpu); | 19 | void amd_e400_remove_cpu(int cpu); |
20 | 20 | ||
21 | #endif /* _ASM_X86_IDLE_H */ | 21 | #endif /* _ASM_X86_IDLE_H */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 45636cefa186..b9c03fb3369a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -758,10 +758,10 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | |||
758 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 758 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
759 | 759 | ||
760 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 760 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
761 | extern void init_c1e_mask(void); | 761 | extern void init_amd_e400_c1e_mask(void); |
762 | 762 | ||
763 | extern unsigned long boot_option_idle_override; | 763 | extern unsigned long boot_option_idle_override; |
764 | extern bool c1e_detected; | 764 | extern bool amd_e400_c1e_detected; |
765 | 765 | ||
766 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, | 766 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
767 | IDLE_POLL, IDLE_FORCE_MWAIT}; | 767 | IDLE_POLL, IDLE_FORCE_MWAIT}; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1d59834396bd..745a602f204f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -887,7 +887,7 @@ static void vgetcpu_set_mode(void) | |||
887 | void __init identify_boot_cpu(void) | 887 | void __init identify_boot_cpu(void) |
888 | { | 888 | { |
889 | identify_cpu(&boot_cpu_data); | 889 | identify_cpu(&boot_cpu_data); |
890 | init_c1e_mask(); | 890 | init_amd_e400_c1e_mask(); |
891 | #ifdef CONFIG_X86_32 | 891 | #ifdef CONFIG_X86_32 |
892 | sysenter_setup(); | 892 | sysenter_setup(); |
893 | enable_sep_cpu(); | 893 | enable_sep_cpu(); |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ff4554198981..2efbfb712fb7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -538,45 +538,45 @@ int mwait_usable(const struct cpuinfo_x86 *c) | |||
538 | return (edx & MWAIT_EDX_C1); | 538 | return (edx & MWAIT_EDX_C1); |
539 | } | 539 | } |
540 | 540 | ||
541 | bool c1e_detected; | 541 | bool amd_e400_c1e_detected; |
542 | EXPORT_SYMBOL(c1e_detected); | 542 | EXPORT_SYMBOL(amd_e400_c1e_detected); |
543 | 543 | ||
544 | static cpumask_var_t c1e_mask; | 544 | static cpumask_var_t amd_e400_c1e_mask; |
545 | 545 | ||
546 | void c1e_remove_cpu(int cpu) | 546 | void amd_e400_remove_cpu(int cpu) |
547 | { | 547 | { |
548 | if (c1e_mask != NULL) | 548 | if (amd_e400_c1e_mask != NULL) |
549 | cpumask_clear_cpu(cpu, c1e_mask); | 549 | cpumask_clear_cpu(cpu, amd_e400_c1e_mask); |
550 | } | 550 | } |
551 | 551 | ||
552 | /* | 552 | /* |
553 | * C1E aware idle routine. We check for C1E active in the interrupt | 553 | * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt |
554 | * pending message MSR. If we detect C1E, then we handle it the same | 554 | * pending message MSR. If we detect C1E, then we handle it the same |
555 | * way as C3 power states (local apic timer and TSC stop) | 555 | * way as C3 power states (local apic timer and TSC stop) |
556 | */ | 556 | */ |
557 | static void c1e_idle(void) | 557 | static void amd_e400_idle(void) |
558 | { | 558 | { |
559 | if (need_resched()) | 559 | if (need_resched()) |
560 | return; | 560 | return; |
561 | 561 | ||
562 | if (!c1e_detected) { | 562 | if (!amd_e400_c1e_detected) { |
563 | u32 lo, hi; | 563 | u32 lo, hi; |
564 | 564 | ||
565 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | 565 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); |
566 | 566 | ||
567 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | 567 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { |
568 | c1e_detected = true; | 568 | amd_e400_c1e_detected = true; |
569 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 569 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
570 | mark_tsc_unstable("TSC halt in AMD C1E"); | 570 | mark_tsc_unstable("TSC halt in AMD C1E"); |
571 | printk(KERN_INFO "System has AMD C1E enabled\n"); | 571 | printk(KERN_INFO "System has AMD C1E enabled\n"); |
572 | } | 572 | } |
573 | } | 573 | } |
574 | 574 | ||
575 | if (c1e_detected) { | 575 | if (amd_e400_c1e_detected) { |
576 | int cpu = smp_processor_id(); | 576 | int cpu = smp_processor_id(); |
577 | 577 | ||
578 | if (!cpumask_test_cpu(cpu, c1e_mask)) { | 578 | if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { |
579 | cpumask_set_cpu(cpu, c1e_mask); | 579 | cpumask_set_cpu(cpu, amd_e400_c1e_mask); |
580 | /* | 580 | /* |
581 | * Force broadcast so ACPI can not interfere. | 581 | * Force broadcast so ACPI can not interfere. |
582 | */ | 582 | */ |
@@ -619,17 +619,17 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
619 | pm_idle = mwait_idle; | 619 | pm_idle = mwait_idle; |
620 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { | 620 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { |
621 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ | 621 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ |
622 | printk(KERN_INFO "using C1E aware idle routine\n"); | 622 | printk(KERN_INFO "using AMD E400 aware idle routine\n"); |
623 | pm_idle = c1e_idle; | 623 | pm_idle = amd_e400_idle; |
624 | } else | 624 | } else |
625 | pm_idle = default_idle; | 625 | pm_idle = default_idle; |
626 | } | 626 | } |
627 | 627 | ||
628 | void __init init_c1e_mask(void) | 628 | void __init init_amd_e400_c1e_mask(void) |
629 | { | 629 | { |
630 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | 630 | /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ |
631 | if (pm_idle == c1e_idle) | 631 | if (pm_idle == amd_e400_idle) |
632 | zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); | 632 | zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); |
633 | } | 633 | } |
634 | 634 | ||
635 | static int __init idle_setup(char *str) | 635 | static int __init idle_setup(char *str) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 08776a953487..2c33633595cc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1379,7 +1379,7 @@ void play_dead_common(void) | |||
1379 | { | 1379 | { |
1380 | idle_task_exit(); | 1380 | idle_task_exit(); |
1381 | reset_lazy_tlbstate(); | 1381 | reset_lazy_tlbstate(); |
1382 | c1e_remove_cpu(raw_smp_processor_id()); | 1382 | amd_e400_remove_cpu(raw_smp_processor_id()); |
1383 | 1383 | ||
1384 | mb(); | 1384 | mb(); |
1385 | /* Ack it */ | 1385 | /* Ack it */ |