diff options
Diffstat (limited to 'arch/i386/kernel/timers/timer_tsc.c')
-rw-r--r-- | arch/i386/kernel/timers/timer_tsc.c | 178 |
1 files changed, 0 insertions, 178 deletions
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index f1187ddb0d0f..243ec0484079 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c | |||
@@ -32,10 +32,6 @@ static unsigned long hpet_last; | |||
32 | static struct timer_opts timer_tsc; | 32 | static struct timer_opts timer_tsc; |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | static inline void cpufreq_delayed_get(void); | ||
36 | |||
37 | int tsc_disable __devinitdata = 0; | ||
38 | |||
39 | static int use_tsc; | 35 | static int use_tsc; |
40 | /* Number of usecs that the last interrupt was delayed */ | 36 | /* Number of usecs that the last interrupt was delayed */ |
41 | static int delay_at_last_interrupt; | 37 | static int delay_at_last_interrupt; |
@@ -144,30 +140,6 @@ static unsigned long long monotonic_clock_tsc(void) | |||
144 | return base + cycles_2_ns(this_offset - last_offset); | 140 | return base + cycles_2_ns(this_offset - last_offset); |
145 | } | 141 | } |
146 | 142 | ||
147 | /* | ||
148 | * Scheduler clock - returns current time in nanosec units. | ||
149 | */ | ||
150 | unsigned long long sched_clock(void) | ||
151 | { | ||
152 | unsigned long long this_offset; | ||
153 | |||
154 | /* | ||
155 | * In the NUMA case we dont use the TSC as they are not | ||
156 | * synchronized across all CPUs. | ||
157 | */ | ||
158 | #ifndef CONFIG_NUMA | ||
159 | if (!use_tsc) | ||
160 | #endif | ||
161 | /* no locking but a rare wrong value is not a big deal */ | ||
162 | return jiffies_64 * (1000000000 / HZ); | ||
163 | |||
164 | /* Read the Time Stamp Counter */ | ||
165 | rdtscll(this_offset); | ||
166 | |||
167 | /* return the value in ns */ | ||
168 | return cycles_2_ns(this_offset); | ||
169 | } | ||
170 | |||
171 | static void delay_tsc(unsigned long loops) | 143 | static void delay_tsc(unsigned long loops) |
172 | { | 144 | { |
173 | unsigned long bclock, now; | 145 | unsigned long bclock, now; |
@@ -231,136 +203,6 @@ static void mark_offset_tsc_hpet(void) | |||
231 | } | 203 | } |
232 | #endif | 204 | #endif |
233 | 205 | ||
234 | |||
235 | #ifdef CONFIG_CPU_FREQ | ||
236 | #include <linux/workqueue.h> | ||
237 | |||
238 | static unsigned int cpufreq_delayed_issched = 0; | ||
239 | static unsigned int cpufreq_init = 0; | ||
240 | static struct work_struct cpufreq_delayed_get_work; | ||
241 | |||
242 | static void handle_cpufreq_delayed_get(void *v) | ||
243 | { | ||
244 | unsigned int cpu; | ||
245 | for_each_online_cpu(cpu) { | ||
246 | cpufreq_get(cpu); | ||
247 | } | ||
248 | cpufreq_delayed_issched = 0; | ||
249 | } | ||
250 | |||
251 | /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries | ||
252 | * to verify the CPU frequency the timing core thinks the CPU is running | ||
253 | * at is still correct. | ||
254 | */ | ||
255 | static inline void cpufreq_delayed_get(void) | ||
256 | { | ||
257 | if (cpufreq_init && !cpufreq_delayed_issched) { | ||
258 | cpufreq_delayed_issched = 1; | ||
259 | printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n"); | ||
260 | schedule_work(&cpufreq_delayed_get_work); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | /* If the CPU frequency is scaled, TSC-based delays will need a different | ||
265 | * loops_per_jiffy value to function properly. | ||
266 | */ | ||
267 | |||
268 | static unsigned int ref_freq = 0; | ||
269 | static unsigned long loops_per_jiffy_ref = 0; | ||
270 | |||
271 | #ifndef CONFIG_SMP | ||
272 | static unsigned long fast_gettimeoffset_ref = 0; | ||
273 | static unsigned int cpu_khz_ref = 0; | ||
274 | #endif | ||
275 | |||
276 | static int | ||
277 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | ||
278 | void *data) | ||
279 | { | ||
280 | struct cpufreq_freqs *freq = data; | ||
281 | |||
282 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
283 | write_seqlock_irq(&xtime_lock); | ||
284 | if (!ref_freq) { | ||
285 | if (!freq->old){ | ||
286 | ref_freq = freq->new; | ||
287 | goto end; | ||
288 | } | ||
289 | ref_freq = freq->old; | ||
290 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | ||
291 | #ifndef CONFIG_SMP | ||
292 | fast_gettimeoffset_ref = fast_gettimeoffset_quotient; | ||
293 | cpu_khz_ref = cpu_khz; | ||
294 | #endif | ||
295 | } | ||
296 | |||
297 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
298 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
299 | (val == CPUFREQ_RESUMECHANGE)) { | ||
300 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
301 | cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); | ||
302 | #ifndef CONFIG_SMP | ||
303 | if (cpu_khz) | ||
304 | cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); | ||
305 | if (use_tsc) { | ||
306 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | ||
307 | fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); | ||
308 | set_cyc2ns_scale(cpu_khz); | ||
309 | } | ||
310 | } | ||
311 | #endif | ||
312 | } | ||
313 | |||
314 | end: | ||
315 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
316 | write_sequnlock_irq(&xtime_lock); | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static struct notifier_block time_cpufreq_notifier_block = { | ||
322 | .notifier_call = time_cpufreq_notifier | ||
323 | }; | ||
324 | |||
325 | |||
326 | static int __init cpufreq_tsc(void) | ||
327 | { | ||
328 | int ret; | ||
329 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | ||
330 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | ||
331 | CPUFREQ_TRANSITION_NOTIFIER); | ||
332 | if (!ret) | ||
333 | cpufreq_init = 1; | ||
334 | return ret; | ||
335 | } | ||
336 | core_initcall(cpufreq_tsc); | ||
337 | |||
338 | #else /* CONFIG_CPU_FREQ */ | ||
339 | static inline void cpufreq_delayed_get(void) { return; } | ||
340 | #endif | ||
341 | |||
342 | int recalibrate_cpu_khz(void) | ||
343 | { | ||
344 | #ifndef CONFIG_SMP | ||
345 | unsigned int cpu_khz_old = cpu_khz; | ||
346 | |||
347 | if (cpu_has_tsc) { | ||
348 | local_irq_disable(); | ||
349 | init_cpu_khz(); | ||
350 | local_irq_enable(); | ||
351 | cpu_data[0].loops_per_jiffy = | ||
352 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | ||
353 | cpu_khz_old, | ||
354 | cpu_khz); | ||
355 | return 0; | ||
356 | } else | ||
357 | return -ENODEV; | ||
358 | #else | ||
359 | return -ENODEV; | ||
360 | #endif | ||
361 | } | ||
362 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
363 | |||
364 | static void mark_offset_tsc(void) | 206 | static void mark_offset_tsc(void) |
365 | { | 207 | { |
366 | unsigned long lost,delay; | 208 | unsigned long lost,delay; |
@@ -451,9 +293,6 @@ static void mark_offset_tsc(void) | |||
451 | 293 | ||
452 | clock_fallback(); | 294 | clock_fallback(); |
453 | } | 295 | } |
454 | /* ... but give the TSC a fair chance */ | ||
455 | if (lost_count > 25) | ||
456 | cpufreq_delayed_get(); | ||
457 | } else | 296 | } else |
458 | lost_count = 0; | 297 | lost_count = 0; |
459 | /* update the monotonic base value */ | 298 | /* update the monotonic base value */ |
@@ -578,23 +417,6 @@ static int tsc_resume(void) | |||
578 | return 0; | 417 | return 0; |
579 | } | 418 | } |
580 | 419 | ||
581 | #ifndef CONFIG_X86_TSC | ||
582 | /* disable flag for tsc. Takes effect by clearing the TSC cpu flag | ||
583 | * in cpu/common.c */ | ||
584 | static int __init tsc_setup(char *str) | ||
585 | { | ||
586 | tsc_disable = 1; | ||
587 | return 1; | ||
588 | } | ||
589 | #else | ||
590 | static int __init tsc_setup(char *str) | ||
591 | { | ||
592 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | ||
593 | "cannot disable TSC.\n"); | ||
594 | return 1; | ||
595 | } | ||
596 | #endif | ||
597 | __setup("notsc", tsc_setup); | ||
598 | 420 | ||
599 | 421 | ||
600 | 422 | ||