diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-08-20 10:27:41 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-08-31 03:35:46 -0400 |
commit | 08047c4f1740c7cee75d58e2919d48c09f951649 (patch) | |
tree | ffaf378e4be6dec0ef572f30385c1c545c7df494 /arch/x86/kernel/time_64.c | |
parent | 454ede7eebf91b92ab1eafe10c6b6ed04de29bf8 (diff) |
x86: Move calibrate_cpu to tsc.c
Move the code where it's only user is. Also we need to look whether
this hardwired hackery might interfere with perfcounters.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/time_64.c')
-rw-r--r-- | arch/x86/kernel/time_64.c | 51 |
1 files changed, 0 insertions, 51 deletions
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index 78cbdf5c006b..e59a40ebff14 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <asm/timer.h> | 21 | #include <asm/timer.h> |
22 | #include <asm/hpet.h> | 22 | #include <asm/hpet.h> |
23 | #include <asm/time.h> | 23 | #include <asm/time.h> |
24 | #include <asm/nmi.h> | ||
25 | 24 | ||
26 | #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) | 25 | #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) |
27 | int timer_ack; | 26 | int timer_ack; |
@@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
84 | return IRQ_HANDLED; | 83 | return IRQ_HANDLED; |
85 | } | 84 | } |
86 | 85 | ||
87 | /* | ||
88 | * calibrate_cpu is used on systems with fixed rate TSCs to determine | ||
89 | * processor frequency | ||
90 | */ | ||
91 | #define TICK_COUNT 100000000 | ||
92 | unsigned long __init calibrate_cpu(void) | ||
93 | { | ||
94 | int tsc_start, tsc_now; | ||
95 | int i, no_ctr_free; | ||
96 | unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; | ||
97 | unsigned long flags; | ||
98 | |||
99 | for (i = 0; i < 4; i++) | ||
100 | if (avail_to_resrv_perfctr_nmi_bit(i)) | ||
101 | break; | ||
102 | no_ctr_free = (i == 4); | ||
103 | if (no_ctr_free) { | ||
104 | WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " | ||
105 | "cpu_khz value may be incorrect.\n"); | ||
106 | i = 3; | ||
107 | rdmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
108 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
109 | rdmsrl(MSR_K7_PERFCTR3, pmc3); | ||
110 | } else { | ||
111 | reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
112 | reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
113 | } | ||
114 | local_irq_save(flags); | ||
115 | /* start measuring cycles, incrementing from 0 */ | ||
116 | wrmsrl(MSR_K7_PERFCTR0 + i, 0); | ||
117 | wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); | ||
118 | rdtscl(tsc_start); | ||
119 | do { | ||
120 | rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); | ||
121 | tsc_now = get_cycles(); | ||
122 | } while ((tsc_now - tsc_start) < TICK_COUNT); | ||
123 | |||
124 | local_irq_restore(flags); | ||
125 | if (no_ctr_free) { | ||
126 | wrmsrl(MSR_K7_EVNTSEL3, 0); | ||
127 | wrmsrl(MSR_K7_PERFCTR3, pmc3); | ||
128 | wrmsrl(MSR_K7_EVNTSEL3, evntsel3); | ||
129 | } else { | ||
130 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | ||
131 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | ||
132 | } | ||
133 | |||
134 | return pmc_now * tsc_khz / (tsc_now - tsc_start); | ||
135 | } | ||
136 | |||
137 | static struct irqaction irq0 = { | 86 | static struct irqaction irq0 = { |
138 | .handler = timer_interrupt, | 87 | .handler = timer_interrupt, |
139 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, | 88 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, |