diff options
author | Alok Kataria <akataria@vmware.com> | 2008-07-01 14:43:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-09 01:43:25 -0400 |
commit | bfc0f5947afa5e3a13e55867f4478c8a92c11dca (patch) | |
tree | bb642adee69c7804eaafbb7fad66af61b4f9f56f /arch/x86/kernel | |
parent | 0ef95533326a7b37d16025af9edc0c18e644b346 (diff) |
x86: merge tsc calibration
Merge the tsc calibration code for the 32bit and 64bit kernel.
The paravirtualized calculate_cpu_khz for 64bit now points to the correct
tsc_calibrate code as in 32bit.
Original native_calculate_cpu_khz for 64 bit is now called as calibrate_cpu.
Also moved the recalibrate_cpu_khz function in the common file.
Note that this function is called only from powernow K7 cpu freq driver.
Signed-off-by: Alok N Kataria <akataria@vmware.com>
Signed-off-by: Dan Hecht <dhecht@vmware.com>
Cc: Dan Hecht <dhecht@vmware.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/time_64.c | 26 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 131 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 74 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 94 |
4 files changed, 153 insertions, 172 deletions
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index 39ae8511a137..c6ac4dad41f6 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -56,7 +56,7 @@ static irqreturn_t timer_event_interrupt(int irq, void *dev_id) | |||
56 | /* calibrate_cpu is used on systems with fixed rate TSCs to determine | 56 | /* calibrate_cpu is used on systems with fixed rate TSCs to determine |
57 | * processor frequency */ | 57 | * processor frequency */ |
58 | #define TICK_COUNT 100000000 | 58 | #define TICK_COUNT 100000000 |
59 | unsigned long __init native_calculate_cpu_khz(void) | 59 | static unsigned long __init calibrate_cpu(void) |
60 | { | 60 | { |
61 | int tsc_start, tsc_now; | 61 | int tsc_start, tsc_now; |
62 | int i, no_ctr_free; | 62 | int i, no_ctr_free; |
@@ -114,14 +114,18 @@ void __init hpet_time_init(void) | |||
114 | setup_irq(0, &irq0); | 114 | setup_irq(0, &irq0); |
115 | } | 115 | } |
116 | 116 | ||
117 | extern void set_cyc2ns_scale(unsigned long cpu_khz, int cpu); | ||
118 | |||
117 | void __init time_init(void) | 119 | void __init time_init(void) |
118 | { | 120 | { |
119 | tsc_calibrate(); | 121 | int cpu; |
122 | |||
123 | cpu_khz = calculate_cpu_khz(); | ||
124 | tsc_khz = cpu_khz; | ||
120 | 125 | ||
121 | cpu_khz = tsc_khz; | ||
122 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | 126 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && |
123 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | 127 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) |
124 | cpu_khz = calculate_cpu_khz(); | 128 | cpu_khz = calibrate_cpu(); |
125 | 129 | ||
126 | lpj_fine = ((unsigned long)tsc_khz * 1000)/HZ; | 130 | lpj_fine = ((unsigned long)tsc_khz * 1000)/HZ; |
127 | 131 | ||
@@ -134,7 +138,17 @@ void __init time_init(void) | |||
134 | vgetcpu_mode = VGETCPU_LSL; | 138 | vgetcpu_mode = VGETCPU_LSL; |
135 | 139 | ||
136 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", | 140 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", |
137 | cpu_khz / 1000, cpu_khz % 1000); | 141 | cpu_khz / 1000, cpu_khz % 1000); |
142 | |||
143 | /* | ||
144 | * Secondary CPUs do not run through tsc_init(), so set up | ||
145 | * all the scale factors for all CPUs, assuming the same | ||
146 | * speed as the bootup CPU. (cpufreq notifiers will fix this | ||
147 | * up if their speed diverges) | ||
148 | */ | ||
149 | for_each_possible_cpu(cpu) | ||
150 | set_cyc2ns_scale(cpu_khz, cpu); | ||
151 | |||
138 | init_tsc_clocksource(); | 152 | init_tsc_clocksource(); |
139 | late_time_init = choose_time_init(); | 153 | late_time_init = choose_time_init(); |
140 | } | 154 | } |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 5d0be778fadd..e6ee14533c75 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -1,7 +1,11 @@ | |||
1 | #include <linux/kernel.h> | ||
1 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
2 | #include <linux/init.h> | 3 | #include <linux/init.h> |
3 | #include <linux/module.h> | 4 | #include <linux/module.h> |
4 | #include <linux/timer.h> | 5 | #include <linux/timer.h> |
6 | #include <linux/acpi_pmtmr.h> | ||
7 | |||
8 | #include <asm/hpet.h> | ||
5 | 9 | ||
6 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ | 10 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ |
7 | EXPORT_SYMBOL(cpu_khz); | 11 | EXPORT_SYMBOL(cpu_khz); |
@@ -84,3 +88,130 @@ int __init notsc_setup(char *str) | |||
84 | #endif | 88 | #endif |
85 | 89 | ||
86 | __setup("notsc", notsc_setup); | 90 | __setup("notsc", notsc_setup); |
91 | |||
92 | #define MAX_RETRIES 5 | ||
93 | #define SMI_TRESHOLD 50000 | ||
94 | |||
95 | /* | ||
96 | * Read TSC and the reference counters. Take care of SMI disturbance | ||
97 | */ | ||
98 | static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) | ||
99 | { | ||
100 | u64 t1, t2; | ||
101 | int i; | ||
102 | |||
103 | for (i = 0; i < MAX_RETRIES; i++) { | ||
104 | t1 = get_cycles(); | ||
105 | if (hpet) | ||
106 | *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; | ||
107 | else | ||
108 | *pm = acpi_pm_read_early(); | ||
109 | t2 = get_cycles(); | ||
110 | if ((t2 - t1) < SMI_TRESHOLD) | ||
111 | return t2; | ||
112 | } | ||
113 | return ULLONG_MAX; | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * tsc_calibrate - calibrate the tsc on boot | ||
118 | */ | ||
119 | static unsigned int __init tsc_calibrate(void) | ||
120 | { | ||
121 | unsigned long flags; | ||
122 | u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2; | ||
123 | int hpet = is_hpet_enabled(); | ||
124 | unsigned int tsc_khz_val = 0; | ||
125 | |||
126 | local_irq_save(flags); | ||
127 | |||
128 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); | ||
129 | |||
130 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
131 | |||
132 | outb(0xb0, 0x43); | ||
133 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | ||
134 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | ||
135 | tr1 = get_cycles(); | ||
136 | while ((inb(0x61) & 0x20) == 0); | ||
137 | tr2 = get_cycles(); | ||
138 | |||
139 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | ||
140 | |||
141 | local_irq_restore(flags); | ||
142 | |||
143 | /* | ||
144 | * Preset the result with the raw and inaccurate PIT | ||
145 | * calibration value | ||
146 | */ | ||
147 | delta = (tr2 - tr1); | ||
148 | do_div(delta, 50); | ||
149 | tsc_khz_val = delta; | ||
150 | |||
151 | /* hpet or pmtimer available ? */ | ||
152 | if (!hpet && !pm1 && !pm2) { | ||
153 | printk(KERN_INFO "TSC calibrated against PIT\n"); | ||
154 | goto out; | ||
155 | } | ||
156 | |||
157 | /* Check, whether the sampling was disturbed by an SMI */ | ||
158 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) { | ||
159 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | ||
160 | "using PIT calibration result\n"); | ||
161 | goto out; | ||
162 | } | ||
163 | |||
164 | tsc2 = (tsc2 - tsc1) * 1000000LL; | ||
165 | |||
166 | if (hpet) { | ||
167 | printk(KERN_INFO "TSC calibrated against HPET\n"); | ||
168 | if (hpet2 < hpet1) | ||
169 | hpet2 += 0x100000000ULL; | ||
170 | hpet2 -= hpet1; | ||
171 | tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | ||
172 | do_div(tsc1, 1000000); | ||
173 | } else { | ||
174 | printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); | ||
175 | if (pm2 < pm1) | ||
176 | pm2 += (u64)ACPI_PM_OVRRUN; | ||
177 | pm2 -= pm1; | ||
178 | tsc1 = pm2 * 1000000000LL; | ||
179 | do_div(tsc1, PMTMR_TICKS_PER_SEC); | ||
180 | } | ||
181 | |||
182 | do_div(tsc2, tsc1); | ||
183 | tsc_khz_val = tsc2; | ||
184 | |||
185 | out: | ||
186 | return tsc_khz_val; | ||
187 | } | ||
188 | |||
189 | unsigned long native_calculate_cpu_khz(void) | ||
190 | { | ||
191 | return tsc_calibrate(); | ||
192 | } | ||
193 | |||
194 | #ifdef CONFIG_X86_32 | ||
195 | /* Only called from the Powernow K7 cpu freq driver */ | ||
196 | int recalibrate_cpu_khz(void) | ||
197 | { | ||
198 | #ifndef CONFIG_SMP | ||
199 | unsigned long cpu_khz_old = cpu_khz; | ||
200 | |||
201 | if (cpu_has_tsc) { | ||
202 | cpu_khz = calculate_cpu_khz(); | ||
203 | tsc_khz = cpu_khz; | ||
204 | cpu_data(0).loops_per_jiffy = | ||
205 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | ||
206 | cpu_khz_old, cpu_khz); | ||
207 | return 0; | ||
208 | } else | ||
209 | return -ENODEV; | ||
210 | #else | ||
211 | return -ENODEV; | ||
212 | #endif | ||
213 | } | ||
214 | |||
215 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
216 | |||
217 | #endif /* CONFIG_X86_32 */ | ||
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index dc8990056d75..40c0aafb358d 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -42,7 +42,7 @@ extern int tsc_disabled; | |||
42 | 42 | ||
43 | DEFINE_PER_CPU(unsigned long, cyc2ns); | 43 | DEFINE_PER_CPU(unsigned long, cyc2ns); |
44 | 44 | ||
45 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | 45 | void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
46 | { | 46 | { |
47 | unsigned long long tsc_now, ns_now; | 47 | unsigned long long tsc_now, ns_now; |
48 | unsigned long flags, *scale; | 48 | unsigned long flags, *scale; |
@@ -65,78 +65,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
65 | local_irq_restore(flags); | 65 | local_irq_restore(flags); |
66 | } | 66 | } |
67 | 67 | ||
68 | unsigned long native_calculate_cpu_khz(void) | ||
69 | { | ||
70 | unsigned long long start, end; | ||
71 | unsigned long count; | ||
72 | u64 delta64 = (u64)ULLONG_MAX; | ||
73 | int i; | ||
74 | unsigned long flags; | ||
75 | |||
76 | local_irq_save(flags); | ||
77 | |||
78 | /* run 3 times to ensure the cache is warm and to get an accurate reading */ | ||
79 | for (i = 0; i < 3; i++) { | ||
80 | mach_prepare_counter(); | ||
81 | rdtscll(start); | ||
82 | mach_countup(&count); | ||
83 | rdtscll(end); | ||
84 | |||
85 | /* | ||
86 | * Error: ECTCNEVERSET | ||
87 | * The CTC wasn't reliable: we got a hit on the very first read, | ||
88 | * or the CPU was so fast/slow that the quotient wouldn't fit in | ||
89 | * 32 bits.. | ||
90 | */ | ||
91 | if (count <= 1) | ||
92 | continue; | ||
93 | |||
94 | /* cpu freq too slow: */ | ||
95 | if ((end - start) <= CALIBRATE_TIME_MSEC) | ||
96 | continue; | ||
97 | |||
98 | /* | ||
99 | * We want the minimum time of all runs in case one of them | ||
100 | * is inaccurate due to SMI or other delay | ||
101 | */ | ||
102 | delta64 = min(delta64, (end - start)); | ||
103 | } | ||
104 | |||
105 | /* cpu freq too fast (or every run was bad): */ | ||
106 | if (delta64 > (1ULL<<32)) | ||
107 | goto err; | ||
108 | |||
109 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | ||
110 | do_div(delta64,CALIBRATE_TIME_MSEC); | ||
111 | |||
112 | local_irq_restore(flags); | ||
113 | return (unsigned long)delta64; | ||
114 | err: | ||
115 | local_irq_restore(flags); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | int recalibrate_cpu_khz(void) | ||
120 | { | ||
121 | #ifndef CONFIG_SMP | ||
122 | unsigned long cpu_khz_old = cpu_khz; | ||
123 | |||
124 | if (cpu_has_tsc) { | ||
125 | cpu_khz = calculate_cpu_khz(); | ||
126 | tsc_khz = cpu_khz; | ||
127 | cpu_data(0).loops_per_jiffy = | ||
128 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | ||
129 | cpu_khz_old, cpu_khz); | ||
130 | return 0; | ||
131 | } else | ||
132 | return -ENODEV; | ||
133 | #else | ||
134 | return -ENODEV; | ||
135 | #endif | ||
136 | } | ||
137 | |||
138 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
139 | |||
140 | #ifdef CONFIG_CPU_FREQ | 68 | #ifdef CONFIG_CPU_FREQ |
141 | 69 | ||
142 | /* | 70 | /* |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index 69cbe4c9f050..c852ff9bd5d4 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -40,7 +40,7 @@ extern int tsc_disabled; | |||
40 | 40 | ||
41 | DEFINE_PER_CPU(unsigned long, cyc2ns); | 41 | DEFINE_PER_CPU(unsigned long, cyc2ns); |
42 | 42 | ||
43 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | 43 | void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
44 | { | 44 | { |
45 | unsigned long long tsc_now, ns_now; | 45 | unsigned long long tsc_now, ns_now; |
46 | unsigned long flags, *scale; | 46 | unsigned long flags, *scale; |
@@ -130,98 +130,6 @@ core_initcall(cpufreq_tsc); | |||
130 | 130 | ||
131 | #endif | 131 | #endif |
132 | 132 | ||
133 | #define MAX_RETRIES 5 | ||
134 | #define SMI_TRESHOLD 50000 | ||
135 | |||
136 | /* | ||
137 | * Read TSC and the reference counters. Take care of SMI disturbance | ||
138 | */ | ||
139 | static unsigned long __init tsc_read_refs(unsigned long *pm, | ||
140 | unsigned long *hpet) | ||
141 | { | ||
142 | unsigned long t1, t2; | ||
143 | int i; | ||
144 | |||
145 | for (i = 0; i < MAX_RETRIES; i++) { | ||
146 | t1 = get_cycles(); | ||
147 | if (hpet) | ||
148 | *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; | ||
149 | else | ||
150 | *pm = acpi_pm_read_early(); | ||
151 | t2 = get_cycles(); | ||
152 | if ((t2 - t1) < SMI_TRESHOLD) | ||
153 | return t2; | ||
154 | } | ||
155 | return ULONG_MAX; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * tsc_calibrate - calibrate the tsc on boot | ||
160 | */ | ||
161 | void __init tsc_calibrate(void) | ||
162 | { | ||
163 | unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2; | ||
164 | int hpet = is_hpet_enabled(), cpu; | ||
165 | |||
166 | local_irq_save(flags); | ||
167 | |||
168 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); | ||
169 | |||
170 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
171 | |||
172 | outb(0xb0, 0x43); | ||
173 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | ||
174 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | ||
175 | tr1 = get_cycles(); | ||
176 | while ((inb(0x61) & 0x20) == 0); | ||
177 | tr2 = get_cycles(); | ||
178 | |||
179 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | ||
180 | |||
181 | local_irq_restore(flags); | ||
182 | |||
183 | /* | ||
184 | * Preset the result with the raw and inaccurate PIT | ||
185 | * calibration value | ||
186 | */ | ||
187 | tsc_khz = (tr2 - tr1) / 50; | ||
188 | |||
189 | /* hpet or pmtimer available ? */ | ||
190 | if (!hpet && !pm1 && !pm2) { | ||
191 | printk(KERN_INFO "TSC calibrated against PIT\n"); | ||
192 | goto out; | ||
193 | } | ||
194 | |||
195 | /* Check, whether the sampling was disturbed by an SMI */ | ||
196 | if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { | ||
197 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | ||
198 | "using PIT calibration result\n"); | ||
199 | goto out; | ||
200 | } | ||
201 | |||
202 | tsc2 = (tsc2 - tsc1) * 1000000L; | ||
203 | |||
204 | if (hpet) { | ||
205 | printk(KERN_INFO "TSC calibrated against HPET\n"); | ||
206 | if (hpet2 < hpet1) | ||
207 | hpet2 += 0x100000000UL; | ||
208 | hpet2 -= hpet1; | ||
209 | tsc1 = (hpet2 * hpet_readl(HPET_PERIOD)) / 1000000; | ||
210 | } else { | ||
211 | printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); | ||
212 | if (pm2 < pm1) | ||
213 | pm2 += ACPI_PM_OVRRUN; | ||
214 | pm2 -= pm1; | ||
215 | tsc1 = (pm2 * 1000000000) / PMTMR_TICKS_PER_SEC; | ||
216 | } | ||
217 | |||
218 | tsc_khz = tsc2 / tsc1; | ||
219 | |||
220 | out: | ||
221 | for_each_possible_cpu(cpu) | ||
222 | set_cyc2ns_scale(tsc_khz, cpu); | ||
223 | } | ||
224 | |||
225 | /* | 133 | /* |
226 | * Make an educated guess if the TSC is trustworthy and synchronized | 134 | * Make an educated guess if the TSC is trustworthy and synchronized |
227 | * over all CPUs. | 135 | * over all CPUs. |