aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r--arch/x86/kernel/tsc.c51
1 files changed, 40 insertions, 11 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 2c9cf0fd78f5..fc0a147e3727 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -290,14 +290,15 @@ static inline int pit_verify_msb(unsigned char val)
290static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) 290static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
291{ 291{
292 int count; 292 int count;
293 u64 tsc = 0; 293 u64 tsc = 0, prev_tsc = 0;
294 294
295 for (count = 0; count < 50000; count++) { 295 for (count = 0; count < 50000; count++) {
296 if (!pit_verify_msb(val)) 296 if (!pit_verify_msb(val))
297 break; 297 break;
298 prev_tsc = tsc;
298 tsc = get_cycles(); 299 tsc = get_cycles();
299 } 300 }
300 *deltap = get_cycles() - tsc; 301 *deltap = get_cycles() - prev_tsc;
301 *tscp = tsc; 302 *tscp = tsc;
302 303
303 /* 304 /*
@@ -311,9 +312,9 @@ static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *de
311 * How many MSB values do we want to see? We aim for 312 * How many MSB values do we want to see? We aim for
312 * a maximum error rate of 500ppm (in practice the 313 * a maximum error rate of 500ppm (in practice the
313 * real error is much smaller), but refuse to spend 314 * real error is much smaller), but refuse to spend
314 * more than 25ms on it. 315 * more than 50ms on it.
315 */ 316 */
316#define MAX_QUICK_PIT_MS 25 317#define MAX_QUICK_PIT_MS 50
317#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) 318#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
318 319
319static unsigned long quick_pit_calibrate(void) 320static unsigned long quick_pit_calibrate(void)
@@ -383,15 +384,12 @@ success:
383 * 384 *
384 * As a result, we can depend on there not being 385 * As a result, we can depend on there not being
385 * any odd delays anywhere, and the TSC reads are 386 * any odd delays anywhere, and the TSC reads are
386 * reliable (within the error). We also adjust the 387 * reliable (within the error).
387 * delta to the middle of the error bars, just
388 * because it looks nicer.
389 * 388 *
390 * kHz = ticks / time-in-seconds / 1000; 389 * kHz = ticks / time-in-seconds / 1000;
391 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 390 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
392 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) 391 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
393 */ 392 */
394 delta += (long)(d2 - d1)/2;
395 delta *= PIT_TICK_RATE; 393 delta *= PIT_TICK_RATE;
396 do_div(delta, i*256*1000); 394 do_div(delta, i*256*1000);
397 printk("Fast TSC calibration using PIT\n"); 395 printk("Fast TSC calibration using PIT\n");
@@ -622,7 +620,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
622 620
623 if (cpu_khz) { 621 if (cpu_khz) {
624 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; 622 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
625 *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR); 623 *offset = ns_now - mult_frac(tsc_now, *scale,
624 (1UL << CYC2NS_SCALE_FACTOR));
626 } 625 }
627 626
628 sched_clock_idle_wakeup_event(0); 627 sched_clock_idle_wakeup_event(0);
@@ -631,7 +630,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
631 630
632static unsigned long long cyc2ns_suspend; 631static unsigned long long cyc2ns_suspend;
633 632
634void save_sched_clock_state(void) 633void tsc_save_sched_clock_state(void)
635{ 634{
636 if (!sched_clock_stable) 635 if (!sched_clock_stable)
637 return; 636 return;
@@ -647,7 +646,7 @@ void save_sched_clock_state(void)
647 * that sched_clock() continues from the point where it was left off during 646 * that sched_clock() continues from the point where it was left off during
648 * suspend. 647 * suspend.
649 */ 648 */
650void restore_sched_clock_state(void) 649void tsc_restore_sched_clock_state(void)
651{ 650{
652 unsigned long long offset; 651 unsigned long long offset;
653 unsigned long flags; 652 unsigned long flags;
@@ -934,6 +933,16 @@ static int __init init_tsc_clocksource(void)
934 clocksource_tsc.rating = 0; 933 clocksource_tsc.rating = 0;
935 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
936 } 935 }
936
937 /*
938 * Trust the results of the earlier calibration on systems
939 * exporting a reliable TSC.
940 */
941 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
942 clocksource_register_khz(&clocksource_tsc, tsc_khz);
943 return 0;
944 }
945
937 schedule_delayed_work(&tsc_irqwork, 0); 946 schedule_delayed_work(&tsc_irqwork, 0);
938 return 0; 947 return 0;
939} 948}
@@ -995,3 +1004,23 @@ void __init tsc_init(void)
995 check_system_tsc_reliable(); 1004 check_system_tsc_reliable();
996} 1005}
997 1006
1007#ifdef CONFIG_SMP
1008/*
1009 * If we have a constant TSC and are using the TSC for the delay loop,
1010 * we can skip clock calibration if another cpu in the same socket has already
1011 * been calibrated. This assumes that CONSTANT_TSC applies to all
1012 * cpus in the socket - this should be a safe assumption.
1013 */
1014unsigned long __cpuinit calibrate_delay_is_known(void)
1015{
1016 int i, cpu = smp_processor_id();
1017
1018 if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
1019 return 0;
1020
1021 for_each_online_cpu(i)
1022 if (cpu_data(i).phys_proc_id == cpu_data(cpu).phys_proc_id)
1023 return cpu_data(i).loops_per_jiffy;
1024 return 0;
1025}
1026#endif