aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDimitri Sivanich <sivanich@sgi.com>2006-09-26 04:52:34 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:34 -0400
commitcbf9b4bb76c9ce53b7fdde0dffcd000951b5f0d4 (patch)
tree0942698b5aef01d2b89fe04111f7ba40b28865f0
parentd28c4393a7bf558538e9def269c1caeab6ec056f (diff)
[PATCH] X86_64 monotonic_clock goes backwards
I've noticed some erratic behavior while testing the X86_64 version of monotonic_clock(). While spinning in a loop reading monotonic clock values (pinned to a single cpu) I noticed that the difference between subsequent values occasionally went negative (time going backwards). I found that in the following code: this_offset = get_cycles_sync(); /* FIXME: 1000 or 1000000? */ --> offset = (this_offset - last_offset)*1000 / cpu_khz; } return base + offset; the offset sometimes turns out to be 0, even though this_offset > last_offset. +Added fix From: Toyo Abe <toyoa@mvista.com> The x86_64-mm-monotonic-clock.patch in 2.6.18-rc4-mm2 made a change to the updating of monotonic_base. It now uses cycles_2_ns(). I suggest that a set_cyc2ns_scale() should be done prior to the setup_irq(). Because cycles_2_ns() can be called from the timer ISR right after the irq0 is enabled. Signed-off-by: Toyo Abe <toyoa@mvista.com> Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/x86_64/kernel/time.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index d66c7f750e75..97115e608ed8 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -276,6 +276,7 @@ static void set_rtc_mmss(unsigned long nowtime)
276 * Note: This function is required to return accurate 276 * Note: This function is required to return accurate
277 * time even in the absence of multiple timer ticks. 277 * time even in the absence of multiple timer ticks.
278 */ 278 */
279static inline unsigned long long cycles_2_ns(unsigned long long cyc);
279unsigned long long monotonic_clock(void) 280unsigned long long monotonic_clock(void)
280{ 281{
281 unsigned long seq; 282 unsigned long seq;
@@ -300,8 +301,7 @@ unsigned long long monotonic_clock(void)
300 base = monotonic_base; 301 base = monotonic_base;
301 } while (read_seqretry(&xtime_lock, seq)); 302 } while (read_seqretry(&xtime_lock, seq));
302 this_offset = get_cycles_sync(); 303 this_offset = get_cycles_sync();
303 /* FIXME: 1000 or 1000000? */ 304 offset = cycles_2_ns(this_offset - last_offset);
304 offset = (this_offset - last_offset)*1000 / cpu_khz;
305 } 305 }
306 return base + offset; 306 return base + offset;
307} 307}
@@ -405,8 +405,7 @@ void main_timer_handler(struct pt_regs *regs)
405 offset %= USEC_PER_TICK; 405 offset %= USEC_PER_TICK;
406 } 406 }
407 407
408 /* FIXME: 1000 or 1000000? */ 408 monotonic_base += cycles_2_ns(tsc - vxtime.last_tsc);
409 monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz;
410 409
411 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot; 410 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
412 411
@@ -929,10 +928,8 @@ void __init time_init(void)
929 vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz; 928 vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
930 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; 929 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
931 vxtime.last_tsc = get_cycles_sync(); 930 vxtime.last_tsc = get_cycles_sync();
932 setup_irq(0, &irq0);
933
934 set_cyc2ns_scale(cpu_khz); 931 set_cyc2ns_scale(cpu_khz);
935 932 setup_irq(0, &irq0);
936 hotcpu_notifier(time_cpu_notifier, 0); 933 hotcpu_notifier(time_cpu_notifier, 0);
937 time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id()); 934 time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());
938 935