diff options
Diffstat (limited to 'arch/x86/kernel/tsc.c')
| -rw-r--r-- | arch/x86/kernel/tsc.c | 58 |
1 files changed, 0 insertions, 58 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 26a863a9c2a8..4496315eb224 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -892,60 +892,6 @@ static void __init init_tsc_clocksource(void) | |||
| 892 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | 892 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
| 893 | } | 893 | } |
| 894 | 894 | ||
| 895 | #ifdef CONFIG_X86_64 | ||
| 896 | /* | ||
| 897 | * calibrate_cpu is used on systems with fixed rate TSCs to determine | ||
| 898 | * processor frequency | ||
| 899 | */ | ||
| 900 | #define TICK_COUNT 100000000 | ||
| 901 | static unsigned long __init calibrate_cpu(void) | ||
| 902 | { | ||
| 903 | int tsc_start, tsc_now; | ||
| 904 | int i, no_ctr_free; | ||
| 905 | unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; | ||
| 906 | unsigned long flags; | ||
| 907 | |||
| 908 | for (i = 0; i < 4; i++) | ||
| 909 | if (avail_to_resrv_perfctr_nmi_bit(i)) | ||
| 910 | break; | ||
| 911 | no_ctr_free = (i == 4); | ||
| 912 | if (no_ctr_free) { | ||
| 913 | WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " | ||
| 914 | "cpu_khz value may be incorrect.\n"); | ||
| 915 | i = 3; | ||
| 916 | rdmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
| 917 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
| 918 | rdmsrl(MSR_K7_PERFCTR3, pmc3); | ||
| 919 | } else { | ||
| 920 | reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
| 921 | reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
| 922 | } | ||
| 923 | local_irq_save(flags); | ||
| 924 | /* start measuring cycles, incrementing from 0 */ | ||
| 925 | wrmsrl(MSR_K7_PERFCTR0 + i, 0); | ||
| 926 | wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); | ||
| 927 | rdtscl(tsc_start); | ||
| 928 | do { | ||
| 929 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); | ||
| 930 | tsc_now = get_cycles(); | ||
| 931 | } while ((tsc_now - tsc_start) < TICK_COUNT); | ||
| 932 | |||
| 933 | local_irq_restore(flags); | ||
| 934 | if (no_ctr_free) { | ||
| 935 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
| 936 | wrmsrl(MSR_K7_PERFCTR3, pmc3); | ||
| 937 | wrmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
| 938 | } else { | ||
| 939 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
| 940 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
| 941 | } | ||
| 942 | |||
| 943 | return pmc_now * tsc_khz / (tsc_now - tsc_start); | ||
| 944 | } | ||
| 945 | #else | ||
| 946 | static inline unsigned long calibrate_cpu(void) { return cpu_khz; } | ||
| 947 | #endif | ||
| 948 | |||
| 949 | void __init tsc_init(void) | 895 | void __init tsc_init(void) |
| 950 | { | 896 | { |
| 951 | u64 lpj; | 897 | u64 lpj; |
| @@ -964,10 +910,6 @@ void __init tsc_init(void) | |||
| 964 | return; | 910 | return; |
| 965 | } | 911 | } |
| 966 | 912 | ||
| 967 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | ||
| 968 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | ||
| 969 | cpu_khz = calibrate_cpu(); | ||
| 970 | |||
| 971 | printk("Detected %lu.%03lu MHz processor.\n", | 913 | printk("Detected %lu.%03lu MHz processor.\n", |
| 972 | (unsigned long)cpu_khz / 1000, | 914 | (unsigned long)cpu_khz / 1000, |
| 973 | (unsigned long)cpu_khz % 1000); | 915 | (unsigned long)cpu_khz % 1000); |
