diff options
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r-- | arch/x86/kernel/tsc.c | 86 |
1 files changed, 72 insertions, 14 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 71f4368b357e..17409e8d1097 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/time.h> | 17 | #include <asm/time.h> |
18 | #include <asm/delay.h> | 18 | #include <asm/delay.h> |
19 | #include <asm/hypervisor.h> | 19 | #include <asm/hypervisor.h> |
20 | #include <asm/nmi.h> | ||
21 | #include <asm/x86_init.h> | ||
20 | 22 | ||
21 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ | 23 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
22 | EXPORT_SYMBOL(cpu_khz); | 24 | EXPORT_SYMBOL(cpu_khz); |
@@ -400,15 +402,9 @@ unsigned long native_calibrate_tsc(void) | |||
400 | { | 402 | { |
401 | u64 tsc1, tsc2, delta, ref1, ref2; | 403 | u64 tsc1, tsc2, delta, ref1, ref2; |
402 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; | 404 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; |
403 | unsigned long flags, latch, ms, fast_calibrate, hv_tsc_khz; | 405 | unsigned long flags, latch, ms, fast_calibrate; |
404 | int hpet = is_hpet_enabled(), i, loopmin; | 406 | int hpet = is_hpet_enabled(), i, loopmin; |
405 | 407 | ||
406 | hv_tsc_khz = get_hypervisor_tsc_freq(); | ||
407 | if (hv_tsc_khz) { | ||
408 | printk(KERN_INFO "TSC: Frequency read from the hypervisor\n"); | ||
409 | return hv_tsc_khz; | ||
410 | } | ||
411 | |||
412 | local_irq_save(flags); | 408 | local_irq_save(flags); |
413 | fast_calibrate = quick_pit_calibrate(); | 409 | fast_calibrate = quick_pit_calibrate(); |
414 | local_irq_restore(flags); | 410 | local_irq_restore(flags); |
@@ -566,7 +562,7 @@ int recalibrate_cpu_khz(void) | |||
566 | unsigned long cpu_khz_old = cpu_khz; | 562 | unsigned long cpu_khz_old = cpu_khz; |
567 | 563 | ||
568 | if (cpu_has_tsc) { | 564 | if (cpu_has_tsc) { |
569 | tsc_khz = calibrate_tsc(); | 565 | tsc_khz = x86_platform.calibrate_tsc(); |
570 | cpu_khz = tsc_khz; | 566 | cpu_khz = tsc_khz; |
571 | cpu_data(0).loops_per_jiffy = | 567 | cpu_data(0).loops_per_jiffy = |
572 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | 568 | cpufreq_scale(cpu_data(0).loops_per_jiffy, |
@@ -744,10 +740,16 @@ static cycle_t __vsyscall_fn vread_tsc(void) | |||
744 | } | 740 | } |
745 | #endif | 741 | #endif |
746 | 742 | ||
743 | static void resume_tsc(void) | ||
744 | { | ||
745 | clocksource_tsc.cycle_last = 0; | ||
746 | } | ||
747 | |||
747 | static struct clocksource clocksource_tsc = { | 748 | static struct clocksource clocksource_tsc = { |
748 | .name = "tsc", | 749 | .name = "tsc", |
749 | .rating = 300, | 750 | .rating = 300, |
750 | .read = read_tsc, | 751 | .read = read_tsc, |
752 | .resume = resume_tsc, | ||
751 | .mask = CLOCKSOURCE_MASK(64), | 753 | .mask = CLOCKSOURCE_MASK(64), |
752 | .shift = 22, | 754 | .shift = 22, |
753 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | 755 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
@@ -761,12 +763,14 @@ void mark_tsc_unstable(char *reason) | |||
761 | { | 763 | { |
762 | if (!tsc_unstable) { | 764 | if (!tsc_unstable) { |
763 | tsc_unstable = 1; | 765 | tsc_unstable = 1; |
764 | printk("Marking TSC unstable due to %s\n", reason); | 766 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); |
765 | /* Change only the rating, when not registered */ | 767 | /* Change only the rating, when not registered */ |
766 | if (clocksource_tsc.mult) | 768 | if (clocksource_tsc.mult) |
767 | clocksource_change_rating(&clocksource_tsc, 0); | 769 | clocksource_mark_unstable(&clocksource_tsc); |
768 | else | 770 | else { |
771 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; | ||
769 | clocksource_tsc.rating = 0; | 772 | clocksource_tsc.rating = 0; |
773 | } | ||
770 | } | 774 | } |
771 | } | 775 | } |
772 | 776 | ||
@@ -852,15 +856,71 @@ static void __init init_tsc_clocksource(void) | |||
852 | clocksource_register(&clocksource_tsc); | 856 | clocksource_register(&clocksource_tsc); |
853 | } | 857 | } |
854 | 858 | ||
859 | #ifdef CONFIG_X86_64 | ||
860 | /* | ||
861 | * calibrate_cpu is used on systems with fixed rate TSCs to determine | ||
862 | * processor frequency | ||
863 | */ | ||
864 | #define TICK_COUNT 100000000 | ||
865 | static unsigned long __init calibrate_cpu(void) | ||
866 | { | ||
867 | int tsc_start, tsc_now; | ||
868 | int i, no_ctr_free; | ||
869 | unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; | ||
870 | unsigned long flags; | ||
871 | |||
872 | for (i = 0; i < 4; i++) | ||
873 | if (avail_to_resrv_perfctr_nmi_bit(i)) | ||
874 | break; | ||
875 | no_ctr_free = (i == 4); | ||
876 | if (no_ctr_free) { | ||
877 | WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " | ||
878 | "cpu_khz value may be incorrect.\n"); | ||
879 | i = 3; | ||
880 | rdmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
881 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
882 | rdmsrl(MSR_K7_PERFCTR3, pmc3); | ||
883 | } else { | ||
884 | reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
885 | reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
886 | } | ||
887 | local_irq_save(flags); | ||
888 | /* start measuring cycles, incrementing from 0 */ | ||
889 | wrmsrl(MSR_K7_PERFCTR0 + i, 0); | ||
890 | wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); | ||
891 | rdtscl(tsc_start); | ||
892 | do { | ||
893 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); | ||
894 | tsc_now = get_cycles(); | ||
895 | } while ((tsc_now - tsc_start) < TICK_COUNT); | ||
896 | |||
897 | local_irq_restore(flags); | ||
898 | if (no_ctr_free) { | ||
899 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
900 | wrmsrl(MSR_K7_PERFCTR3, pmc3); | ||
901 | wrmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
902 | } else { | ||
903 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
904 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
905 | } | ||
906 | |||
907 | return pmc_now * tsc_khz / (tsc_now - tsc_start); | ||
908 | } | ||
909 | #else | ||
910 | static inline unsigned long calibrate_cpu(void) { return cpu_khz; } | ||
911 | #endif | ||
912 | |||
855 | void __init tsc_init(void) | 913 | void __init tsc_init(void) |
856 | { | 914 | { |
857 | u64 lpj; | 915 | u64 lpj; |
858 | int cpu; | 916 | int cpu; |
859 | 917 | ||
918 | x86_init.timers.tsc_pre_init(); | ||
919 | |||
860 | if (!cpu_has_tsc) | 920 | if (!cpu_has_tsc) |
861 | return; | 921 | return; |
862 | 922 | ||
863 | tsc_khz = calibrate_tsc(); | 923 | tsc_khz = x86_platform.calibrate_tsc(); |
864 | cpu_khz = tsc_khz; | 924 | cpu_khz = tsc_khz; |
865 | 925 | ||
866 | if (!tsc_khz) { | 926 | if (!tsc_khz) { |
@@ -868,11 +928,9 @@ void __init tsc_init(void) | |||
868 | return; | 928 | return; |
869 | } | 929 | } |
870 | 930 | ||
871 | #ifdef CONFIG_X86_64 | ||
872 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | 931 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && |
873 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | 932 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) |
874 | cpu_khz = calibrate_cpu(); | 933 | cpu_khz = calibrate_cpu(); |
875 | #endif | ||
876 | 934 | ||
877 | printk("Detected %lu.%03lu MHz processor.\n", | 935 | printk("Detected %lu.%03lu MHz processor.\n", |
878 | (unsigned long)cpu_khz / 1000, | 936 | (unsigned long)cpu_khz / 1000, |