diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-08-28 14:25:24 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-08-28 14:25:24 -0400 |
commit | 7285dd7fd375763bfb8ab1ac9cf3f1206f503c16 (patch) | |
tree | 42f809ab3616cc3d93d655acccfc2d54e9f6d0e4 /kernel | |
parent | 90cba64a5f672a239f43ec5cb9a11b806887331e (diff) |
clocksource: Resolve cpu hotplug dead lock with TSC unstable
Martin Schwidefsky analyzed it:
To register a clocksource the clocksource_mutex is acquired and if
necessary timekeeping_notify is called to install the clocksource as
the timekeeper clock. timekeeping_notify uses stop_machine which needs
to take cpu_add_remove_lock mutex.
Starting a new cpu is done with the cpu_add_remove_lock mutex held.
native_cpu_up checks the tsc of the new cpu and if the tsc is no good
clocksource_change_rating is called. Which needs the clocksource_mutex
and the deadlock is complete.
The solution is to replace the TSC via the clocksource watchdog
mechanism. Mark the TSC as unstable and schedule the watchdog work so
it gets removed in the watchdog thread context.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
LKML-Reference: <new-submission>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: John Stultz <johnstul@us.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/time/clocksource.c | 33 |
1 files changed, 30 insertions, 3 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index e0c86ad6e9fb..a0af4ffcb6e5 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -149,15 +149,42 @@ static void clocksource_watchdog_work(struct work_struct *work) | |||
149 | kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); | 149 | kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void clocksource_unstable(struct clocksource *cs, int64_t delta) | 152 | static void __clocksource_unstable(struct clocksource *cs) |
153 | { | 153 | { |
154 | printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", | ||
155 | cs->name, delta); | ||
156 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); | 154 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); |
157 | cs->flags |= CLOCK_SOURCE_UNSTABLE; | 155 | cs->flags |= CLOCK_SOURCE_UNSTABLE; |
158 | schedule_work(&watchdog_work); | 156 | schedule_work(&watchdog_work); |
159 | } | 157 | } |
160 | 158 | ||
159 | static void clocksource_unstable(struct clocksource *cs, int64_t delta) | ||
160 | { | ||
161 | printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", | ||
162 | cs->name, delta); | ||
163 | __clocksource_unstable(cs); | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * clocksource_mark_unstable - mark clocksource unstable via watchdog | ||
168 | * @cs: clocksource to be marked unstable | ||
169 | * | ||
170 | * This function is called instead of clocksource_change_rating from | ||
171 | * cpu hotplug code to avoid a deadlock between the clocksource mutex | ||
172 | * and the cpu hotplug mutex. It defers the update of the clocksource | ||
173 | * to the watchdog thread. | ||
174 | */ | ||
175 | void clocksource_mark_unstable(struct clocksource *cs) | ||
176 | { | ||
177 | unsigned long flags; | ||
178 | |||
179 | spin_lock_irqsave(&watchdog_lock, flags); | ||
180 | if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { | ||
181 | if (list_empty(&cs->wd_list)) | ||
182 | list_add(&cs->wd_list, &watchdog_list); | ||
183 | __clocksource_unstable(cs); | ||
184 | } | ||
185 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
186 | } | ||
187 | |||
161 | static void clocksource_watchdog(unsigned long data) | 188 | static void clocksource_watchdog(unsigned long data) |
162 | { | 189 | { |
163 | struct clocksource *cs; | 190 | struct clocksource *cs; |