aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r--arch/x86/kernel/tsc.c58
1 files changed, 0 insertions, 58 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ce8e50239332..13b6a6cc77f2 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -854,60 +854,6 @@ static void __init init_tsc_clocksource(void)
854 clocksource_register_khz(&clocksource_tsc, tsc_khz); 854 clocksource_register_khz(&clocksource_tsc, tsc_khz);
855} 855}
856 856
857#ifdef CONFIG_X86_64
858/*
859 * calibrate_cpu is used on systems with fixed rate TSCs to determine
860 * processor frequency
861 */
862#define TICK_COUNT 100000000
863static unsigned long __init calibrate_cpu(void)
864{
865 int tsc_start, tsc_now;
866 int i, no_ctr_free;
867 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
868 unsigned long flags;
869
870 for (i = 0; i < 4; i++)
871 if (avail_to_resrv_perfctr_nmi_bit(i))
872 break;
873 no_ctr_free = (i == 4);
874 if (no_ctr_free) {
875 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
876 "cpu_khz value may be incorrect.\n");
877 i = 3;
878 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
879 wrmsrl(MSR_K7_EVNTSEL3, 0);
880 rdmsrl(MSR_K7_PERFCTR3, pmc3);
881 } else {
882 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
883 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
884 }
885 local_irq_save(flags);
886 /* start measuring cycles, incrementing from 0 */
887 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
888 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
889 rdtscl(tsc_start);
890 do {
891 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
892 tsc_now = get_cycles();
893 } while ((tsc_now - tsc_start) < TICK_COUNT);
894
895 local_irq_restore(flags);
896 if (no_ctr_free) {
897 wrmsrl(MSR_K7_EVNTSEL3, 0);
898 wrmsrl(MSR_K7_PERFCTR3, pmc3);
899 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
900 } else {
901 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
902 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
903 }
904
905 return pmc_now * tsc_khz / (tsc_now - tsc_start);
906}
907#else
908static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
909#endif
910
911void __init tsc_init(void) 857void __init tsc_init(void)
912{ 858{
913 u64 lpj; 859 u64 lpj;
@@ -926,10 +872,6 @@ void __init tsc_init(void)
926 return; 872 return;
927 } 873 }
928 874
929 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
930 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
931 cpu_khz = calibrate_cpu();
932
933 printk("Detected %lu.%03lu MHz processor.\n", 875 printk("Detected %lu.%03lu MHz processor.\n",
934 (unsigned long)cpu_khz / 1000, 876 (unsigned long)cpu_khz / 1000,
935 (unsigned long)cpu_khz % 1000); 877 (unsigned long)cpu_khz % 1000);