aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2011-04-01 16:59:53 -0400
committerLen Brown <len.brown@intel.com>2011-05-29 03:38:57 -0400
commit02c68a02018669d1817c43c42de800975cbec467 (patch)
tree9e02b7913f650492fcdbb78cd6e8d3ff3160e43c /arch/x86/kernel
parent333c5ae9948194428fe6c5ef5c088304fc98263b (diff)
x86 idle: clarify AMD erratum 400 workaround
The workaround for AMD erratum 400 uses the term "c1e" falsely suggesting: 1. Intel C1E is somehow involved 2. All AMD processors with C1E are involved Use the string "amd_c1e" instead of simply "c1e" to clarify that this workaround is specific to AMD's version of C1E. Use the string "e400" to clarify that the workaround is specific to AMD processors with Erratum 400. This patch is text-substitution only, with no functional change. cc: x86@kernel.org Acked-by: Borislav Petkov <borislav.petkov@amd.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/process.c38
-rw-r--r--arch/x86/kernel/smpboot.c2
3 files changed, 21 insertions, 21 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 1d59834396bd..745a602f204f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -887,7 +887,7 @@ static void vgetcpu_set_mode(void)
887void __init identify_boot_cpu(void) 887void __init identify_boot_cpu(void)
888{ 888{
889 identify_cpu(&boot_cpu_data); 889 identify_cpu(&boot_cpu_data);
890 init_c1e_mask(); 890 init_amd_e400_c1e_mask();
891#ifdef CONFIG_X86_32 891#ifdef CONFIG_X86_32
892 sysenter_setup(); 892 sysenter_setup();
893 enable_sep_cpu(); 893 enable_sep_cpu();
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ff4554198981..2efbfb712fb7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -538,45 +538,45 @@ int mwait_usable(const struct cpuinfo_x86 *c)
538 return (edx & MWAIT_EDX_C1); 538 return (edx & MWAIT_EDX_C1);
539} 539}
540 540
541bool c1e_detected; 541bool amd_e400_c1e_detected;
542EXPORT_SYMBOL(c1e_detected); 542EXPORT_SYMBOL(amd_e400_c1e_detected);
543 543
544static cpumask_var_t c1e_mask; 544static cpumask_var_t amd_e400_c1e_mask;
545 545
546void c1e_remove_cpu(int cpu) 546void amd_e400_remove_cpu(int cpu)
547{ 547{
548 if (c1e_mask != NULL) 548 if (amd_e400_c1e_mask != NULL)
549 cpumask_clear_cpu(cpu, c1e_mask); 549 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
550} 550}
551 551
552/* 552/*
553 * C1E aware idle routine. We check for C1E active in the interrupt 553 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
554 * pending message MSR. If we detect C1E, then we handle it the same 554 * pending message MSR. If we detect C1E, then we handle it the same
555 * way as C3 power states (local apic timer and TSC stop) 555 * way as C3 power states (local apic timer and TSC stop)
556 */ 556 */
557static void c1e_idle(void) 557static void amd_e400_idle(void)
558{ 558{
559 if (need_resched()) 559 if (need_resched())
560 return; 560 return;
561 561
562 if (!c1e_detected) { 562 if (!amd_e400_c1e_detected) {
563 u32 lo, hi; 563 u32 lo, hi;
564 564
565 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); 565 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
566 566
567 if (lo & K8_INTP_C1E_ACTIVE_MASK) { 567 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
568 c1e_detected = true; 568 amd_e400_c1e_detected = true;
569 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 569 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
570 mark_tsc_unstable("TSC halt in AMD C1E"); 570 mark_tsc_unstable("TSC halt in AMD C1E");
571 printk(KERN_INFO "System has AMD C1E enabled\n"); 571 printk(KERN_INFO "System has AMD C1E enabled\n");
572 } 572 }
573 } 573 }
574 574
575 if (c1e_detected) { 575 if (amd_e400_c1e_detected) {
576 int cpu = smp_processor_id(); 576 int cpu = smp_processor_id();
577 577
578 if (!cpumask_test_cpu(cpu, c1e_mask)) { 578 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
579 cpumask_set_cpu(cpu, c1e_mask); 579 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
580 /* 580 /*
581 * Force broadcast so ACPI can not interfere. 581 * Force broadcast so ACPI can not interfere.
582 */ 582 */
@@ -619,17 +619,17 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
619 pm_idle = mwait_idle; 619 pm_idle = mwait_idle;
620 } else if (cpu_has_amd_erratum(amd_erratum_400)) { 620 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
621 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 621 /* E400: APIC timer interrupt does not wake up CPU from C1e */
622 printk(KERN_INFO "using C1E aware idle routine\n"); 622 printk(KERN_INFO "using AMD E400 aware idle routine\n");
623 pm_idle = c1e_idle; 623 pm_idle = amd_e400_idle;
624 } else 624 } else
625 pm_idle = default_idle; 625 pm_idle = default_idle;
626} 626}
627 627
628void __init init_c1e_mask(void) 628void __init init_amd_e400_c1e_mask(void)
629{ 629{
630 /* If we're using c1e_idle, we need to allocate c1e_mask. */ 630 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
631 if (pm_idle == c1e_idle) 631 if (pm_idle == amd_e400_idle)
632 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); 632 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
633} 633}
634 634
635static int __init idle_setup(char *str) 635static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 08776a953487..2c33633595cc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1379,7 +1379,7 @@ void play_dead_common(void)
1379{ 1379{
1380 idle_task_exit(); 1380 idle_task_exit();
1381 reset_lazy_tlbstate(); 1381 reset_lazy_tlbstate();
1382 c1e_remove_cpu(raw_smp_processor_id()); 1382 amd_e400_remove_cpu(raw_smp_processor_id());
1383 1383
1384 mb(); 1384 mb();
1385 /* Ack it */ 1385 /* Ack it */