diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:01 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:01 -0400 |
commit | 9a163ed8e0552fdcffe405d2ea7134819a81456e (patch) | |
tree | b322fd2afbb812ba7ddfd22f3734aaab007c2aa5 /arch/x86/kernel/tsc_32.c | |
parent | f7627e2513987bb5d4e8cb13c4e0a478352141ac (diff) |
i386: move kernel
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tsc_32.c')
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 413 |
1 files changed, 413 insertions, 0 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c new file mode 100644 index 000000000000..a39280b4dd3a --- /dev/null +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -0,0 +1,413 @@ | |||
1 | /* | ||
2 | * This code largely moved from arch/i386/kernel/timer/timer_tsc.c | ||
3 | * which was originally moved from arch/i386/kernel/time.c. | ||
4 | * See comments there for proper credits. | ||
5 | */ | ||
6 | |||
7 | #include <linux/sched.h> | ||
8 | #include <linux/clocksource.h> | ||
9 | #include <linux/workqueue.h> | ||
10 | #include <linux/cpufreq.h> | ||
11 | #include <linux/jiffies.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/dmi.h> | ||
14 | |||
15 | #include <asm/delay.h> | ||
16 | #include <asm/tsc.h> | ||
17 | #include <asm/io.h> | ||
18 | #include <asm/timer.h> | ||
19 | |||
20 | #include "mach_timer.h" | ||
21 | |||
22 | static int tsc_enabled; | ||
23 | |||
24 | /* | ||
25 | * On some systems the TSC frequency does not | ||
26 | * change with the cpu frequency. So we need | ||
27 | * an extra value to store the TSC freq | ||
28 | */ | ||
29 | unsigned int tsc_khz; | ||
30 | EXPORT_SYMBOL_GPL(tsc_khz); | ||
31 | |||
32 | int tsc_disable; | ||
33 | |||
34 | #ifdef CONFIG_X86_TSC | ||
35 | static int __init tsc_setup(char *str) | ||
36 | { | ||
37 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | ||
38 | "cannot disable TSC.\n"); | ||
39 | return 1; | ||
40 | } | ||
41 | #else | ||
42 | /* | ||
43 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | ||
44 | * in cpu/common.c | ||
45 | */ | ||
46 | static int __init tsc_setup(char *str) | ||
47 | { | ||
48 | tsc_disable = 1; | ||
49 | |||
50 | return 1; | ||
51 | } | ||
52 | #endif | ||
53 | |||
54 | __setup("notsc", tsc_setup); | ||
55 | |||
56 | /* | ||
57 | * code to mark and check if the TSC is unstable | ||
58 | * due to cpufreq or due to unsynced TSCs | ||
59 | */ | ||
60 | static int tsc_unstable; | ||
61 | |||
62 | int check_tsc_unstable(void) | ||
63 | { | ||
64 | return tsc_unstable; | ||
65 | } | ||
66 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | ||
67 | |||
68 | /* Accellerators for sched_clock() | ||
69 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
70 | * basic equation: | ||
71 | * ns = cycles / (freq / ns_per_sec) | ||
72 | * ns = cycles * (ns_per_sec / freq) | ||
73 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
74 | * ns = cycles * (10^6 / cpu_khz) | ||
75 | * | ||
76 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
77 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
78 | * ns = cycles * cyc2ns_scale / SC | ||
79 | * | ||
80 | * And since SC is a constant power of two, we can convert the div | ||
81 | * into a shift. | ||
82 | * | ||
83 | * We can use khz divisor instead of mhz to keep a better percision, since | ||
84 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
85 | * (mathieu.desnoyers@polymtl.ca) | ||
86 | * | ||
87 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
88 | */ | ||
89 | unsigned long cyc2ns_scale __read_mostly; | ||
90 | |||
91 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | ||
92 | |||
93 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | ||
94 | { | ||
95 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * Scheduler clock - returns current time in nanosec units. | ||
100 | */ | ||
101 | unsigned long long native_sched_clock(void) | ||
102 | { | ||
103 | unsigned long long this_offset; | ||
104 | |||
105 | /* | ||
106 | * Fall back to jiffies if there's no TSC available: | ||
107 | * ( But note that we still use it if the TSC is marked | ||
108 | * unstable. We do this because unlike Time Of Day, | ||
109 | * the scheduler clock tolerates small errors and it's | ||
110 | * very important for it to be as fast as the platform | ||
111 | * can achive it. ) | ||
112 | */ | ||
113 | if (unlikely(!tsc_enabled && !tsc_unstable)) | ||
114 | /* No locking but a rare wrong value is not a big deal: */ | ||
115 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | ||
116 | |||
117 | /* read the Time Stamp Counter: */ | ||
118 | rdtscll(this_offset); | ||
119 | |||
120 | /* return the value in ns */ | ||
121 | return cycles_2_ns(this_offset); | ||
122 | } | ||
123 | |||
124 | /* We need to define a real function for sched_clock, to override the | ||
125 | weak default version */ | ||
126 | #ifdef CONFIG_PARAVIRT | ||
127 | unsigned long long sched_clock(void) | ||
128 | { | ||
129 | return paravirt_sched_clock(); | ||
130 | } | ||
131 | #else | ||
132 | unsigned long long sched_clock(void) | ||
133 | __attribute__((alias("native_sched_clock"))); | ||
134 | #endif | ||
135 | |||
136 | unsigned long native_calculate_cpu_khz(void) | ||
137 | { | ||
138 | unsigned long long start, end; | ||
139 | unsigned long count; | ||
140 | u64 delta64; | ||
141 | int i; | ||
142 | unsigned long flags; | ||
143 | |||
144 | local_irq_save(flags); | ||
145 | |||
146 | /* run 3 times to ensure the cache is warm */ | ||
147 | for (i = 0; i < 3; i++) { | ||
148 | mach_prepare_counter(); | ||
149 | rdtscll(start); | ||
150 | mach_countup(&count); | ||
151 | rdtscll(end); | ||
152 | } | ||
153 | /* | ||
154 | * Error: ECTCNEVERSET | ||
155 | * The CTC wasn't reliable: we got a hit on the very first read, | ||
156 | * or the CPU was so fast/slow that the quotient wouldn't fit in | ||
157 | * 32 bits.. | ||
158 | */ | ||
159 | if (count <= 1) | ||
160 | goto err; | ||
161 | |||
162 | delta64 = end - start; | ||
163 | |||
164 | /* cpu freq too fast: */ | ||
165 | if (delta64 > (1ULL<<32)) | ||
166 | goto err; | ||
167 | |||
168 | /* cpu freq too slow: */ | ||
169 | if (delta64 <= CALIBRATE_TIME_MSEC) | ||
170 | goto err; | ||
171 | |||
172 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | ||
173 | do_div(delta64,CALIBRATE_TIME_MSEC); | ||
174 | |||
175 | local_irq_restore(flags); | ||
176 | return (unsigned long)delta64; | ||
177 | err: | ||
178 | local_irq_restore(flags); | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | int recalibrate_cpu_khz(void) | ||
183 | { | ||
184 | #ifndef CONFIG_SMP | ||
185 | unsigned long cpu_khz_old = cpu_khz; | ||
186 | |||
187 | if (cpu_has_tsc) { | ||
188 | cpu_khz = calculate_cpu_khz(); | ||
189 | tsc_khz = cpu_khz; | ||
190 | cpu_data[0].loops_per_jiffy = | ||
191 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | ||
192 | cpu_khz_old, cpu_khz); | ||
193 | return 0; | ||
194 | } else | ||
195 | return -ENODEV; | ||
196 | #else | ||
197 | return -ENODEV; | ||
198 | #endif | ||
199 | } | ||
200 | |||
201 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
202 | |||
203 | #ifdef CONFIG_CPU_FREQ | ||
204 | |||
205 | /* | ||
206 | * if the CPU frequency is scaled, TSC-based delays will need a different | ||
207 | * loops_per_jiffy value to function properly. | ||
208 | */ | ||
209 | static unsigned int ref_freq = 0; | ||
210 | static unsigned long loops_per_jiffy_ref = 0; | ||
211 | static unsigned long cpu_khz_ref = 0; | ||
212 | |||
213 | static int | ||
214 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | ||
215 | { | ||
216 | struct cpufreq_freqs *freq = data; | ||
217 | |||
218 | if (!ref_freq) { | ||
219 | if (!freq->old){ | ||
220 | ref_freq = freq->new; | ||
221 | return 0; | ||
222 | } | ||
223 | ref_freq = freq->old; | ||
224 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | ||
225 | cpu_khz_ref = cpu_khz; | ||
226 | } | ||
227 | |||
228 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
229 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
230 | (val == CPUFREQ_RESUMECHANGE)) { | ||
231 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
232 | cpu_data[freq->cpu].loops_per_jiffy = | ||
233 | cpufreq_scale(loops_per_jiffy_ref, | ||
234 | ref_freq, freq->new); | ||
235 | |||
236 | if (cpu_khz) { | ||
237 | |||
238 | if (num_online_cpus() == 1) | ||
239 | cpu_khz = cpufreq_scale(cpu_khz_ref, | ||
240 | ref_freq, freq->new); | ||
241 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | ||
242 | tsc_khz = cpu_khz; | ||
243 | set_cyc2ns_scale(cpu_khz); | ||
244 | /* | ||
245 | * TSC based sched_clock turns | ||
246 | * to junk w/ cpufreq | ||
247 | */ | ||
248 | mark_tsc_unstable("cpufreq changes"); | ||
249 | } | ||
250 | } | ||
251 | } | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | static struct notifier_block time_cpufreq_notifier_block = { | ||
257 | .notifier_call = time_cpufreq_notifier | ||
258 | }; | ||
259 | |||
260 | static int __init cpufreq_tsc(void) | ||
261 | { | ||
262 | return cpufreq_register_notifier(&time_cpufreq_notifier_block, | ||
263 | CPUFREQ_TRANSITION_NOTIFIER); | ||
264 | } | ||
265 | core_initcall(cpufreq_tsc); | ||
266 | |||
267 | #endif | ||
268 | |||
269 | /* clock source code */ | ||
270 | |||
271 | static unsigned long current_tsc_khz = 0; | ||
272 | |||
273 | static cycle_t read_tsc(void) | ||
274 | { | ||
275 | cycle_t ret; | ||
276 | |||
277 | rdtscll(ret); | ||
278 | |||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | static struct clocksource clocksource_tsc = { | ||
283 | .name = "tsc", | ||
284 | .rating = 300, | ||
285 | .read = read_tsc, | ||
286 | .mask = CLOCKSOURCE_MASK(64), | ||
287 | .mult = 0, /* to be set */ | ||
288 | .shift = 22, | ||
289 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | ||
290 | CLOCK_SOURCE_MUST_VERIFY, | ||
291 | }; | ||
292 | |||
293 | void mark_tsc_unstable(char *reason) | ||
294 | { | ||
295 | if (!tsc_unstable) { | ||
296 | tsc_unstable = 1; | ||
297 | tsc_enabled = 0; | ||
298 | printk("Marking TSC unstable due to: %s.\n", reason); | ||
299 | /* Can be called before registration */ | ||
300 | if (clocksource_tsc.mult) | ||
301 | clocksource_change_rating(&clocksource_tsc, 0); | ||
302 | else | ||
303 | clocksource_tsc.rating = 0; | ||
304 | } | ||
305 | } | ||
306 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
307 | |||
308 | static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) | ||
309 | { | ||
310 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | ||
311 | d->ident); | ||
312 | tsc_unstable = 1; | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | /* List of systems that have known TSC problems */ | ||
317 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | ||
318 | { | ||
319 | .callback = dmi_mark_tsc_unstable, | ||
320 | .ident = "IBM Thinkpad 380XD", | ||
321 | .matches = { | ||
322 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
323 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | ||
324 | }, | ||
325 | }, | ||
326 | {} | ||
327 | }; | ||
328 | |||
329 | /* | ||
330 | * Make an educated guess if the TSC is trustworthy and synchronized | ||
331 | * over all CPUs. | ||
332 | */ | ||
333 | __cpuinit int unsynchronized_tsc(void) | ||
334 | { | ||
335 | if (!cpu_has_tsc || tsc_unstable) | ||
336 | return 1; | ||
337 | /* | ||
338 | * Intel systems are normally all synchronized. | ||
339 | * Exceptions must mark TSC as unstable: | ||
340 | */ | ||
341 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | ||
342 | /* assume multi socket systems are not synchronized: */ | ||
343 | if (num_possible_cpus() > 1) | ||
344 | tsc_unstable = 1; | ||
345 | } | ||
346 | return tsc_unstable; | ||
347 | } | ||
348 | |||
349 | /* | ||
350 | * Geode_LX - the OLPC CPU has a possibly a very reliable TSC | ||
351 | */ | ||
352 | #ifdef CONFIG_MGEODE_LX | ||
353 | /* RTSC counts during suspend */ | ||
354 | #define RTSC_SUSP 0x100 | ||
355 | |||
356 | static void __init check_geode_tsc_reliable(void) | ||
357 | { | ||
358 | unsigned long val; | ||
359 | |||
360 | rdmsrl(MSR_GEODE_BUSCONT_CONF0, val); | ||
361 | if ((val & RTSC_SUSP)) | ||
362 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | ||
363 | } | ||
364 | #else | ||
365 | static inline void check_geode_tsc_reliable(void) { } | ||
366 | #endif | ||
367 | |||
368 | |||
369 | void __init tsc_init(void) | ||
370 | { | ||
371 | if (!cpu_has_tsc || tsc_disable) | ||
372 | goto out_no_tsc; | ||
373 | |||
374 | cpu_khz = calculate_cpu_khz(); | ||
375 | tsc_khz = cpu_khz; | ||
376 | |||
377 | if (!cpu_khz) | ||
378 | goto out_no_tsc; | ||
379 | |||
380 | printk("Detected %lu.%03lu MHz processor.\n", | ||
381 | (unsigned long)cpu_khz / 1000, | ||
382 | (unsigned long)cpu_khz % 1000); | ||
383 | |||
384 | set_cyc2ns_scale(cpu_khz); | ||
385 | use_tsc_delay(); | ||
386 | |||
387 | /* Check and install the TSC clocksource */ | ||
388 | dmi_check_system(bad_tsc_dmi_table); | ||
389 | |||
390 | unsynchronized_tsc(); | ||
391 | check_geode_tsc_reliable(); | ||
392 | current_tsc_khz = tsc_khz; | ||
393 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | ||
394 | clocksource_tsc.shift); | ||
395 | /* lower the rating if we already know its unstable: */ | ||
396 | if (check_tsc_unstable()) { | ||
397 | clocksource_tsc.rating = 0; | ||
398 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | ||
399 | } else | ||
400 | tsc_enabled = 1; | ||
401 | |||
402 | clocksource_register(&clocksource_tsc); | ||
403 | |||
404 | return; | ||
405 | |||
406 | out_no_tsc: | ||
407 | /* | ||
408 | * Set the tsc_disable flag if there's no TSC support, this | ||
409 | * makes it a fast flag for the kernel to see whether it | ||
410 | * should be using the TSC. | ||
411 | */ | ||
412 | tsc_disable = 1; | ||
413 | } | ||