aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
commita1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch)
tree0f1777542b385ebefd30b3586d830fd8ed6fda5b /arch/x86/kernel/tsc.c
parent75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff)
parentd28daf923ac5e4a0d7cecebae56f3e339189366b (diff)
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts: arch/Kconfig kernel/trace/trace.h Merge reason: resolve the conflicts, plus adopt to the new ring-buffer APIs. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r--arch/x86/kernel/tsc.c45
1 files changed, 31 insertions, 14 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ae3180c506a6..71f4368b357e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -275,15 +275,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
275 * use the TSC value at the transitions to calculate a pretty 275 * use the TSC value at the transitions to calculate a pretty
276 * good value for the TSC frequencty. 276 * good value for the TSC frequencty.
277 */ 277 */
278static inline int pit_verify_msb(unsigned char val)
279{
280 /* Ignore LSB */
281 inb(0x42);
282 return inb(0x42) == val;
283}
284
278static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) 285static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
279{ 286{
280 int count; 287 int count;
281 u64 tsc = 0; 288 u64 tsc = 0;
282 289
283 for (count = 0; count < 50000; count++) { 290 for (count = 0; count < 50000; count++) {
284 /* Ignore LSB */ 291 if (!pit_verify_msb(val))
285 inb(0x42);
286 if (inb(0x42) != val)
287 break; 292 break;
288 tsc = get_cycles(); 293 tsc = get_cycles();
289 } 294 }
@@ -336,8 +341,7 @@ static unsigned long quick_pit_calibrate(void)
336 * to do that is to just read back the 16-bit counter 341 * to do that is to just read back the 16-bit counter
337 * once from the PIT. 342 * once from the PIT.
338 */ 343 */
339 inb(0x42); 344 pit_verify_msb(0);
340 inb(0x42);
341 345
342 if (pit_expect_msb(0xff, &tsc, &d1)) { 346 if (pit_expect_msb(0xff, &tsc, &d1)) {
343 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { 347 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
@@ -348,8 +352,19 @@ static unsigned long quick_pit_calibrate(void)
348 * Iterate until the error is less than 500 ppm 352 * Iterate until the error is less than 500 ppm
349 */ 353 */
350 delta -= tsc; 354 delta -= tsc;
351 if (d1+d2 < delta >> 11) 355 if (d1+d2 >= delta >> 11)
352 goto success; 356 continue;
357
358 /*
359 * Check the PIT one more time to verify that
360 * all TSC reads were stable wrt the PIT.
361 *
362 * This also guarantees serialization of the
363 * last cycle read ('d2') in pit_expect_msb.
364 */
365 if (!pit_verify_msb(0xfe - i))
366 break;
367 goto success;
353 } 368 }
354 } 369 }
355 printk("Fast TSC calibration failed\n"); 370 printk("Fast TSC calibration failed\n");
@@ -590,22 +605,26 @@ EXPORT_SYMBOL(recalibrate_cpu_khz);
590 */ 605 */
591 606
592DEFINE_PER_CPU(unsigned long, cyc2ns); 607DEFINE_PER_CPU(unsigned long, cyc2ns);
608DEFINE_PER_CPU(unsigned long long, cyc2ns_offset);
593 609
594static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) 610static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
595{ 611{
596 unsigned long long tsc_now, ns_now; 612 unsigned long long tsc_now, ns_now, *offset;
597 unsigned long flags, *scale; 613 unsigned long flags, *scale;
598 614
599 local_irq_save(flags); 615 local_irq_save(flags);
600 sched_clock_idle_sleep_event(); 616 sched_clock_idle_sleep_event();
601 617
602 scale = &per_cpu(cyc2ns, cpu); 618 scale = &per_cpu(cyc2ns, cpu);
619 offset = &per_cpu(cyc2ns_offset, cpu);
603 620
604 rdtscll(tsc_now); 621 rdtscll(tsc_now);
605 ns_now = __cycles_2_ns(tsc_now); 622 ns_now = __cycles_2_ns(tsc_now);
606 623
607 if (cpu_khz) 624 if (cpu_khz) {
608 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; 625 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
626 *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR);
627 }
609 628
610 sched_clock_idle_wakeup_event(0); 629 sched_clock_idle_wakeup_event(0);
611 local_irq_restore(flags); 630 local_irq_restore(flags);
@@ -632,17 +651,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
632 void *data) 651 void *data)
633{ 652{
634 struct cpufreq_freqs *freq = data; 653 struct cpufreq_freqs *freq = data;
635 unsigned long *lpj, dummy; 654 unsigned long *lpj;
636 655
637 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) 656 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
638 return 0; 657 return 0;
639 658
640 lpj = &dummy; 659 lpj = &boot_cpu_data.loops_per_jiffy;
641 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
642#ifdef CONFIG_SMP 660#ifdef CONFIG_SMP
661 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
643 lpj = &cpu_data(freq->cpu).loops_per_jiffy; 662 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
644#else
645 lpj = &boot_cpu_data.loops_per_jiffy;
646#endif 663#endif
647 664
648 if (!ref_freq) { 665 if (!ref_freq) {