diff options
author | john stultz <johnstul@us.ibm.com> | 2006-07-14 03:24:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-15 00:53:54 -0400 |
commit | 3e143475c22036847f898d7e76ba337c1d7dbf6f (patch) | |
tree | 4bbd8f322f6a1cd43be409393742967234d23151 /kernel/timer.c | |
parent | 635adb6cd25c8f816c9017a0a0349cd389eafcd3 (diff) |
[PATCH] improve timekeeping resume robustness
Resolve problems seen w/ APM suspend.
Due to resume initialization ordering, its possible we could get a timer
interrupt before the timekeeping resume() function is called. This patch
ensures we don't do any timekeeping accounting before we're fully resumed.
(akpm: fixes the machine-freezes-on-APM-resume bug)
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 19 |
1 files changed, 18 insertions, 1 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index acfa557e685b..05809c2e2fd6 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -969,6 +969,7 @@ void __init timekeeping_init(void) | |||
969 | } | 969 | } |
970 | 970 | ||
971 | 971 | ||
972 | static int timekeeping_suspended; | ||
972 | /* | 973 | /* |
973 | * timekeeping_resume - Resumes the generic timekeeping subsystem. | 974 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
974 | * @dev: unused | 975 | * @dev: unused |
@@ -984,6 +985,18 @@ static int timekeeping_resume(struct sys_device *dev) | |||
984 | write_seqlock_irqsave(&xtime_lock, flags); | 985 | write_seqlock_irqsave(&xtime_lock, flags); |
985 | /* restart the last cycle value */ | 986 | /* restart the last cycle value */ |
986 | clock->cycle_last = clocksource_read(clock); | 987 | clock->cycle_last = clocksource_read(clock); |
988 | clock->error = 0; | ||
989 | timekeeping_suspended = 0; | ||
990 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | ||
995 | { | ||
996 | unsigned long flags; | ||
997 | |||
998 | write_seqlock_irqsave(&xtime_lock, flags); | ||
999 | timekeeping_suspended = 1; | ||
987 | write_sequnlock_irqrestore(&xtime_lock, flags); | 1000 | write_sequnlock_irqrestore(&xtime_lock, flags); |
988 | return 0; | 1001 | return 0; |
989 | } | 1002 | } |
@@ -991,6 +1004,7 @@ static int timekeeping_resume(struct sys_device *dev) | |||
991 | /* sysfs resume/suspend bits for timekeeping */ | 1004 | /* sysfs resume/suspend bits for timekeeping */ |
992 | static struct sysdev_class timekeeping_sysclass = { | 1005 | static struct sysdev_class timekeeping_sysclass = { |
993 | .resume = timekeeping_resume, | 1006 | .resume = timekeeping_resume, |
1007 | .suspend = timekeeping_suspend, | ||
994 | set_kset_name("timekeeping"), | 1008 | set_kset_name("timekeeping"), |
995 | }; | 1009 | }; |
996 | 1010 | ||
@@ -1101,13 +1115,16 @@ static void update_wall_time(void) | |||
1101 | { | 1115 | { |
1102 | cycle_t offset; | 1116 | cycle_t offset; |
1103 | 1117 | ||
1104 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; | 1118 | /* Make sure we're fully resumed: */ |
1119 | if (unlikely(timekeeping_suspended)) | ||
1120 | return; | ||
1105 | 1121 | ||
1106 | #ifdef CONFIG_GENERIC_TIME | 1122 | #ifdef CONFIG_GENERIC_TIME |
1107 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | 1123 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; |
1108 | #else | 1124 | #else |
1109 | offset = clock->cycle_interval; | 1125 | offset = clock->cycle_interval; |
1110 | #endif | 1126 | #endif |
1127 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; | ||
1111 | 1128 | ||
1112 | /* normally this loop will run just once, however in the | 1129 | /* normally this loop will run just once, however in the |
1113 | * case of lost or late ticks, it will accumulate correctly. | 1130 | * case of lost or late ticks, it will accumulate correctly. |