aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBorislav Petkov <bp@amd64.org>2010-08-25 12:28:23 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-08-25 16:32:52 -0400
commitacf01734b1747b1ec4be6f159aff579ea5f7f8e2 (patch)
tree19763f3e002cd2f005009b7eebb1e31c30d1c9f1
parent76be97c1fc945db08aae1f1b746012662d643e97 (diff)
x86, tsc: Remove CPU frequency calibration on AMD
6b37f5a20c0e5c334c010a587058354215433e92 introduced the CPU frequency calibration code for AMD CPUs whose TSCs didn't increment with the core's P0 frequency. From F10h, revB onward, however, the TSC increment rate is denoted by MSRC001_0015[24] and when this bit is set (which should be done by the BIOS) the TSC increments with the P0 frequency so the calibration is not needed and booting can be a couple of mcecs faster on those machines. Besides, there should be virtually no machines out there which don't have this bit set, therefore this calibration can be safely removed. It is a shaky hack anyway since it assumes implicitly that the core is in P0 when BIOS hands off to the OS, which might not always be the case. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> LKML-Reference: <20100825162823.GE26438@aftab> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/kernel/cpu/amd.c17
-rw-r--r--arch/x86/kernel/tsc.c58
2 files changed, 17 insertions, 58 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ba5f62f45f01..fc563fabde67 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -412,6 +412,23 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
412 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 412 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
413 } 413 }
414#endif 414#endif
415
416 /* We need to do the following only once */
417 if (c != &boot_cpu_data)
418 return;
419
420 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
421
422 if (c->x86 > 0x10 ||
423 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
424 u64 val;
425
426 rdmsrl(MSR_K7_HWCR, val);
427 if (!(val & BIT(24)))
428 printk(KERN_WARNING FW_BUG "TSC doesn't count "
429 "with P0 frequency!\n");
430 }
431 }
415} 432}
416 433
417static void __cpuinit init_amd(struct cpuinfo_x86 *c) 434static void __cpuinit init_amd(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ce8e50239332..13b6a6cc77f2 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -854,60 +854,6 @@ static void __init init_tsc_clocksource(void)
854 clocksource_register_khz(&clocksource_tsc, tsc_khz); 854 clocksource_register_khz(&clocksource_tsc, tsc_khz);
855} 855}
856 856
857#ifdef CONFIG_X86_64
858/*
859 * calibrate_cpu is used on systems with fixed rate TSCs to determine
860 * processor frequency
861 */
862#define TICK_COUNT 100000000
863static unsigned long __init calibrate_cpu(void)
864{
865 int tsc_start, tsc_now;
866 int i, no_ctr_free;
867 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
868 unsigned long flags;
869
870 for (i = 0; i < 4; i++)
871 if (avail_to_resrv_perfctr_nmi_bit(i))
872 break;
873 no_ctr_free = (i == 4);
874 if (no_ctr_free) {
875 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
876 "cpu_khz value may be incorrect.\n");
877 i = 3;
878 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
879 wrmsrl(MSR_K7_EVNTSEL3, 0);
880 rdmsrl(MSR_K7_PERFCTR3, pmc3);
881 } else {
882 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
883 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
884 }
885 local_irq_save(flags);
886 /* start measuring cycles, incrementing from 0 */
887 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
888 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
889 rdtscl(tsc_start);
890 do {
891 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
892 tsc_now = get_cycles();
893 } while ((tsc_now - tsc_start) < TICK_COUNT);
894
895 local_irq_restore(flags);
896 if (no_ctr_free) {
897 wrmsrl(MSR_K7_EVNTSEL3, 0);
898 wrmsrl(MSR_K7_PERFCTR3, pmc3);
899 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
900 } else {
901 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
902 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
903 }
904
905 return pmc_now * tsc_khz / (tsc_now - tsc_start);
906}
907#else
908static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
909#endif
910
911void __init tsc_init(void) 857void __init tsc_init(void)
912{ 858{
913 u64 lpj; 859 u64 lpj;
@@ -926,10 +872,6 @@ void __init tsc_init(void)
926 return; 872 return;
927 } 873 }
928 874
929 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
930 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
931 cpu_khz = calibrate_cpu();
932
933 printk("Detected %lu.%03lu MHz processor.\n", 875 printk("Detected %lu.%03lu MHz processor.\n",
934 (unsigned long)cpu_khz / 1000, 876 (unsigned long)cpu_khz / 1000,
935 (unsigned long)cpu_khz % 1000); 877 (unsigned long)cpu_khz % 1000);