aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-08-20 10:27:41 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-31 03:35:46 -0400
commit08047c4f1740c7cee75d58e2919d48c09f951649 (patch)
treeffaf378e4be6dec0ef572f30385c1c545c7df494
parent454ede7eebf91b92ab1eafe10c6b6ed04de29bf8 (diff)
x86: Move calibrate_cpu to tsc.c
Move the code where it's only user is. Also we need to look whether this hardwired hackery might interfere with perfcounters. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/include/asm/time.h2
-rw-r--r--arch/x86/kernel/time_32.c1
-rw-r--r--arch/x86/kernel/time_64.c51
-rw-r--r--arch/x86/kernel/tsc.c57
4 files changed, 55 insertions, 56 deletions
diff --git a/arch/x86/include/asm/time.h b/arch/x86/include/asm/time.h
index 91bb162b5a31..9c5608b21c27 100644
--- a/arch/x86/include/asm/time.h
+++ b/arch/x86/include/asm/time.h
@@ -57,6 +57,4 @@ extern void time_init(void);
57 57
58#endif /* CONFIG_PARAVIRT */ 58#endif /* CONFIG_PARAVIRT */
59 59
60extern unsigned long __init calibrate_cpu(void);
61
62#endif /* _ASM_X86_TIME_H */ 60#endif /* _ASM_X86_TIME_H */
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 186abc577b2b..fd876cc77487 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -21,7 +21,6 @@
21#include <asm/timer.h> 21#include <asm/timer.h>
22#include <asm/hpet.h> 22#include <asm/hpet.h>
23#include <asm/time.h> 23#include <asm/time.h>
24#include <asm/nmi.h>
25 24
26#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) 25#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
27int timer_ack; 26int timer_ack;
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index 78cbdf5c006b..e59a40ebff14 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -21,7 +21,6 @@
21#include <asm/timer.h> 21#include <asm/timer.h>
22#include <asm/hpet.h> 22#include <asm/hpet.h>
23#include <asm/time.h> 23#include <asm/time.h>
24#include <asm/nmi.h>
25 24
26#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) 25#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
27int timer_ack; 26int timer_ack;
@@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
84 return IRQ_HANDLED; 83 return IRQ_HANDLED;
85} 84}
86 85
87/*
88 * calibrate_cpu is used on systems with fixed rate TSCs to determine
89 * processor frequency
90 */
91#define TICK_COUNT 100000000
92unsigned long __init calibrate_cpu(void)
93{
94 int tsc_start, tsc_now;
95 int i, no_ctr_free;
96 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
97 unsigned long flags;
98
99 for (i = 0; i < 4; i++)
100 if (avail_to_resrv_perfctr_nmi_bit(i))
101 break;
102 no_ctr_free = (i == 4);
103 if (no_ctr_free) {
104 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
105 "cpu_khz value may be incorrect.\n");
106 i = 3;
107 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
108 wrmsrl(MSR_K7_EVNTSEL3, 0);
109 rdmsrl(MSR_K7_PERFCTR3, pmc3);
110 } else {
111 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
112 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
113 }
114 local_irq_save(flags);
115 /* start measuring cycles, incrementing from 0 */
116 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
117 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
118 rdtscl(tsc_start);
119 do {
120 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
121 tsc_now = get_cycles();
122 } while ((tsc_now - tsc_start) < TICK_COUNT);
123
124 local_irq_restore(flags);
125 if (no_ctr_free) {
126 wrmsrl(MSR_K7_EVNTSEL3, 0);
127 wrmsrl(MSR_K7_PERFCTR3, pmc3);
128 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
129 } else {
130 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
131 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
132 }
133
134 return pmc_now * tsc_khz / (tsc_now - tsc_start);
135}
136
137static struct irqaction irq0 = { 86static struct irqaction irq0 = {
138 .handler = timer_interrupt, 87 .handler = timer_interrupt,
139 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, 88 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 652bc214eebf..97a0bcbad100 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -17,6 +17,7 @@
17#include <asm/time.h> 17#include <asm/time.h>
18#include <asm/delay.h> 18#include <asm/delay.h>
19#include <asm/hypervisor.h> 19#include <asm/hypervisor.h>
20#include <asm/nmi.h>
20 21
21unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 22unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
22EXPORT_SYMBOL(cpu_khz); 23EXPORT_SYMBOL(cpu_khz);
@@ -852,6 +853,60 @@ static void __init init_tsc_clocksource(void)
852 clocksource_register(&clocksource_tsc); 853 clocksource_register(&clocksource_tsc);
853} 854}
854 855
856#ifdef CONFIG_X86_64
857/*
858 * calibrate_cpu is used on systems with fixed rate TSCs to determine
859 * processor frequency
860 */
861#define TICK_COUNT 100000000
862static unsigned long __init calibrate_cpu(void)
863{
864 int tsc_start, tsc_now;
865 int i, no_ctr_free;
866 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
867 unsigned long flags;
868
869 for (i = 0; i < 4; i++)
870 if (avail_to_resrv_perfctr_nmi_bit(i))
871 break;
872 no_ctr_free = (i == 4);
873 if (no_ctr_free) {
874 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
875 "cpu_khz value may be incorrect.\n");
876 i = 3;
877 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
878 wrmsrl(MSR_K7_EVNTSEL3, 0);
879 rdmsrl(MSR_K7_PERFCTR3, pmc3);
880 } else {
881 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
882 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
883 }
884 local_irq_save(flags);
885 /* start measuring cycles, incrementing from 0 */
886 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
887 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
888 rdtscl(tsc_start);
889 do {
890 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
891 tsc_now = get_cycles();
892 } while ((tsc_now - tsc_start) < TICK_COUNT);
893
894 local_irq_restore(flags);
895 if (no_ctr_free) {
896 wrmsrl(MSR_K7_EVNTSEL3, 0);
897 wrmsrl(MSR_K7_PERFCTR3, pmc3);
898 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
899 } else {
900 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
901 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
902 }
903
904 return pmc_now * tsc_khz / (tsc_now - tsc_start);
905}
906#else
907static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
908#endif
909
855void __init tsc_init(void) 910void __init tsc_init(void)
856{ 911{
857 u64 lpj; 912 u64 lpj;
@@ -870,11 +925,9 @@ void __init tsc_init(void)
870 return; 925 return;
871 } 926 }
872 927
873#ifdef CONFIG_X86_64
874 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && 928 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
875 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) 929 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
876 cpu_khz = calibrate_cpu(); 930 cpu_khz = calibrate_cpu();
877#endif
878 931
879 printk("Detected %lu.%03lu MHz processor.\n", 932 printk("Detected %lu.%03lu MHz processor.\n",
880 (unsigned long)cpu_khz / 1000, 933 (unsigned long)cpu_khz / 1000,