aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/timers
diff options
context:
space:
mode:
authorjohn stultz <johnstul@us.ibm.com>2006-02-01 06:05:19 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 11:53:14 -0500
commitbfaa1deeb982c985d8e0435e835baeaae63b57fd (patch)
tree4e7dcee0443cb0595258f9c8bd16982ba87386f4 /arch/i386/kernel/timers
parent2f7016d917faef8f1e016b4a7bd7f594694480b6 (diff)
[PATCH] disable lost tick compensation before TSCs are synced
Avoid lost tick compensation early in boot before the TSCs are synchronized. Currently timekeeping is enabled before the TSCs are synchronized, thus when the TSCs are synched (reset to zero), it appears that a number of lost ticks have occurred. This can cause premature expiry of timers and in extreme cases can cause the soft lockup detection to fire. This resolves issues reported by Andy Whitcroft as well as bug #5366 reported by Tim Mann. Signed-off-by: John Stultz <johnstul@us.ibm.com> Acked-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/timers')
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 47675bbbb31..7c86e3c5f1c 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -45,6 +45,15 @@ static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
45static unsigned long long monotonic_base; 45static unsigned long long monotonic_base;
46static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; 46static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
47 47
48/* Avoid compensating for lost ticks before TSCs are synched */
49static int detect_lost_ticks;
50static int __init start_lost_tick_compensation(void)
51{
52 detect_lost_ticks = 1;
53 return 0;
54}
55late_initcall(start_lost_tick_compensation);
56
48/* convert from cycles(64bits) => nanoseconds (64bits) 57/* convert from cycles(64bits) => nanoseconds (64bits)
49 * basic equation: 58 * basic equation:
50 * ns = cycles / (freq / ns_per_sec) 59 * ns = cycles / (freq / ns_per_sec)
@@ -196,7 +205,8 @@ static void mark_offset_tsc_hpet(void)
196 205
197 /* lost tick compensation */ 206 /* lost tick compensation */
198 offset = hpet_readl(HPET_T0_CMP) - hpet_tick; 207 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
199 if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) { 208 if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))
209 && detect_lost_ticks) {
200 int lost_ticks = (offset - hpet_last) / hpet_tick; 210 int lost_ticks = (offset - hpet_last) / hpet_tick;
201 jiffies_64 += lost_ticks; 211 jiffies_64 += lost_ticks;
202 } 212 }
@@ -421,7 +431,7 @@ static void mark_offset_tsc(void)
421 delta += delay_at_last_interrupt; 431 delta += delay_at_last_interrupt;
422 lost = delta/(1000000/HZ); 432 lost = delta/(1000000/HZ);
423 delay = delta%(1000000/HZ); 433 delay = delta%(1000000/HZ);
424 if (lost >= 2) { 434 if (lost >= 2 && detect_lost_ticks) {
425 jiffies_64 += lost-1; 435 jiffies_64 += lost-1;
426 436
427 /* sanity check to ensure we're not always losing ticks */ 437 /* sanity check to ensure we're not always losing ticks */