aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r--arch/x86/kernel/tsc.c58
1 files changed, 0 insertions, 58 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index a1c2cd768538..0c40d8b72416 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -897,60 +897,6 @@ static void __init init_tsc_clocksource(void)
897 clocksource_register_khz(&clocksource_tsc, tsc_khz); 897 clocksource_register_khz(&clocksource_tsc, tsc_khz);
898} 898}
899 899
900#ifdef CONFIG_X86_64
901/*
902 * calibrate_cpu is used on systems with fixed rate TSCs to determine
903 * processor frequency
904 */
905#define TICK_COUNT 100000000
906static unsigned long __init calibrate_cpu(void)
907{
908 int tsc_start, tsc_now;
909 int i, no_ctr_free;
910 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
911 unsigned long flags;
912
913 for (i = 0; i < 4; i++)
914 if (avail_to_resrv_perfctr_nmi_bit(i))
915 break;
916 no_ctr_free = (i == 4);
917 if (no_ctr_free) {
918 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
919 "cpu_khz value may be incorrect.\n");
920 i = 3;
921 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
922 wrmsrl(MSR_K7_EVNTSEL3, 0);
923 rdmsrl(MSR_K7_PERFCTR3, pmc3);
924 } else {
925 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
926 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
927 }
928 local_irq_save(flags);
929 /* start measuring cycles, incrementing from 0 */
930 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
931 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
932 rdtscl(tsc_start);
933 do {
934 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
935 tsc_now = get_cycles();
936 } while ((tsc_now - tsc_start) < TICK_COUNT);
937
938 local_irq_restore(flags);
939 if (no_ctr_free) {
940 wrmsrl(MSR_K7_EVNTSEL3, 0);
941 wrmsrl(MSR_K7_PERFCTR3, pmc3);
942 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
943 } else {
944 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
945 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
946 }
947
948 return pmc_now * tsc_khz / (tsc_now - tsc_start);
949}
950#else
951static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
952#endif
953
954void __init tsc_init(void) 900void __init tsc_init(void)
955{ 901{
956 u64 lpj; 902 u64 lpj;
@@ -969,10 +915,6 @@ void __init tsc_init(void)
969 return; 915 return;
970 } 916 }
971 917
972 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
973 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
974 cpu_khz = calibrate_cpu();
975
976 printk("Detected %lu.%03lu MHz processor.\n", 918 printk("Detected %lu.%03lu MHz processor.\n",
977 (unsigned long)cpu_khz / 1000, 919 (unsigned long)cpu_khz / 1000,
978 (unsigned long)cpu_khz % 1000); 920 (unsigned long)cpu_khz % 1000);