aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 16:01:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 16:01:08 -0400
commit2f0384e5fc4766ad909597547d0e2b716c036755 (patch)
treebf965a4bee85fa09edec91772647fbc5aafa0fc4 /arch/x86/kernel/tsc.c
parentbc4016f48161454a9a8e5eb209b0693c6cde9f62 (diff)
parent5c80cc78de46aef6cd5e714208da05c3f7f548f8 (diff)
Merge branch 'x86-amd-nb-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-amd-nb-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, amd_nb: Enable GART support for AMD family 0x15 CPUs x86, amd: Use compute unit information to determine thread siblings x86, amd: Extract compute unit information for AMD CPUs x86, amd: Add support for CPUID topology extension of AMD CPUs x86, nmi: Support NMI watchdog on newer AMD CPU families x86, mtrr: Assume SYS_CFG[Tom2ForceMemTypeWB] exists on all future AMD CPUs x86, k8: Rename k8.[ch] to amd_nb.[ch] and CONFIG_K8_NB to CONFIG_AMD_NB x86, k8-gart: Decouple handling of garts and northbridges x86, cacheinfo: Fix dependency of AMD L3 CID x86, kvm: add new AMD SVM feature bits x86, cpu: Fix allowed CPUID bits for KVM guests x86, cpu: Update AMD CPUID feature bits x86, cpu: Fix renamed, not-yet-shipping AMD CPUID feature bit x86, AMD: Remove needless CPU family check (for L3 cache info) x86, tsc: Remove CPU frequency calibration on AMD
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r--arch/x86/kernel/tsc.c58
1 files changed, 0 insertions, 58 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index a1c2cd768538..0c40d8b72416 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -897,60 +897,6 @@ static void __init init_tsc_clocksource(void)
897 clocksource_register_khz(&clocksource_tsc, tsc_khz); 897 clocksource_register_khz(&clocksource_tsc, tsc_khz);
898} 898}
899 899
900#ifdef CONFIG_X86_64
901/*
902 * calibrate_cpu is used on systems with fixed rate TSCs to determine
903 * processor frequency
904 */
905#define TICK_COUNT 100000000
906static unsigned long __init calibrate_cpu(void)
907{
908 int tsc_start, tsc_now;
909 int i, no_ctr_free;
910 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
911 unsigned long flags;
912
913 for (i = 0; i < 4; i++)
914 if (avail_to_resrv_perfctr_nmi_bit(i))
915 break;
916 no_ctr_free = (i == 4);
917 if (no_ctr_free) {
918 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
919 "cpu_khz value may be incorrect.\n");
920 i = 3;
921 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
922 wrmsrl(MSR_K7_EVNTSEL3, 0);
923 rdmsrl(MSR_K7_PERFCTR3, pmc3);
924 } else {
925 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
926 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
927 }
928 local_irq_save(flags);
929 /* start measuring cycles, incrementing from 0 */
930 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
931 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
932 rdtscl(tsc_start);
933 do {
934 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
935 tsc_now = get_cycles();
936 } while ((tsc_now - tsc_start) < TICK_COUNT);
937
938 local_irq_restore(flags);
939 if (no_ctr_free) {
940 wrmsrl(MSR_K7_EVNTSEL3, 0);
941 wrmsrl(MSR_K7_PERFCTR3, pmc3);
942 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
943 } else {
944 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
945 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
946 }
947
948 return pmc_now * tsc_khz / (tsc_now - tsc_start);
949}
950#else
951static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
952#endif
953
954void __init tsc_init(void) 900void __init tsc_init(void)
955{ 901{
956 u64 lpj; 902 u64 lpj;
@@ -969,10 +915,6 @@ void __init tsc_init(void)
969 return; 915 return;
970 } 916 }
971 917
972 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
973 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
974 cpu_khz = calibrate_cpu();
975
976 printk("Detected %lu.%03lu MHz processor.\n", 918 printk("Detected %lu.%03lu MHz processor.\n",
977 (unsigned long)cpu_khz / 1000, 919 (unsigned long)cpu_khz / 1000,
978 (unsigned long)cpu_khz % 1000); 920 (unsigned long)cpu_khz % 1000);