diff options
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r-- | kernel/stop_machine.c | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 69eb76daed34..067cb83f37ea 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -238,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, | |||
238 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); | 238 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); |
239 | DEFINE_WAKE_Q(wakeq); | 239 | DEFINE_WAKE_Q(wakeq); |
240 | int err; | 240 | int err; |
241 | |||
241 | retry: | 242 | retry: |
243 | /* | ||
244 | * The waking up of stopper threads has to happen in the same | ||
245 | * scheduling context as the queueing. Otherwise, there is a | ||
246 | * possibility of one of the above stoppers being woken up by another | ||
247 | * CPU, and preempting us. This will cause us to not wake up the other | ||
248 | * stopper forever. | ||
249 | */ | ||
250 | preempt_disable(); | ||
242 | raw_spin_lock_irq(&stopper1->lock); | 251 | raw_spin_lock_irq(&stopper1->lock); |
243 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); | 252 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); |
244 | 253 | ||
245 | err = -ENOENT; | 254 | if (!stopper1->enabled || !stopper2->enabled) { |
246 | if (!stopper1->enabled || !stopper2->enabled) | 255 | err = -ENOENT; |
247 | goto unlock; | 256 | goto unlock; |
257 | } | ||
258 | |||
248 | /* | 259 | /* |
249 | * Ensure that if we race with __stop_cpus() the stoppers won't get | 260 | * Ensure that if we race with __stop_cpus() the stoppers won't get |
250 | * queued up in reverse order leading to system deadlock. | 261 | * queued up in reverse order leading to system deadlock. |
@@ -255,36 +266,30 @@ retry: | |||
255 | * It can be falsely true but it is safe to spin until it is cleared, | 266 | * It can be falsely true but it is safe to spin until it is cleared, |
256 | * queue_stop_cpus_work() does everything under preempt_disable(). | 267 | * queue_stop_cpus_work() does everything under preempt_disable(). |
257 | */ | 268 | */ |
258 | err = -EDEADLK; | 269 | if (unlikely(stop_cpus_in_progress)) { |
259 | if (unlikely(stop_cpus_in_progress)) | 270 | err = -EDEADLK; |
260 | goto unlock; | 271 | goto unlock; |
272 | } | ||
261 | 273 | ||
262 | err = 0; | 274 | err = 0; |
263 | __cpu_stop_queue_work(stopper1, work1, &wakeq); | 275 | __cpu_stop_queue_work(stopper1, work1, &wakeq); |
264 | __cpu_stop_queue_work(stopper2, work2, &wakeq); | 276 | __cpu_stop_queue_work(stopper2, work2, &wakeq); |
265 | /* | 277 | |
266 | * The waking up of stopper threads has to happen | ||
267 | * in the same scheduling context as the queueing. | ||
268 | * Otherwise, there is a possibility of one of the | ||
269 | * above stoppers being woken up by another CPU, | ||
270 | * and preempting us. This will cause us to n ot | ||
271 | * wake up the other stopper forever. | ||
272 | */ | ||
273 | preempt_disable(); | ||
274 | unlock: | 278 | unlock: |
275 | raw_spin_unlock(&stopper2->lock); | 279 | raw_spin_unlock(&stopper2->lock); |
276 | raw_spin_unlock_irq(&stopper1->lock); | 280 | raw_spin_unlock_irq(&stopper1->lock); |
277 | 281 | ||
278 | if (unlikely(err == -EDEADLK)) { | 282 | if (unlikely(err == -EDEADLK)) { |
283 | preempt_enable(); | ||
284 | |||
279 | while (stop_cpus_in_progress) | 285 | while (stop_cpus_in_progress) |
280 | cpu_relax(); | 286 | cpu_relax(); |
287 | |||
281 | goto retry; | 288 | goto retry; |
282 | } | 289 | } |
283 | 290 | ||
284 | if (!err) { | 291 | wake_up_q(&wakeq); |
285 | wake_up_q(&wakeq); | 292 | preempt_enable(); |
286 | preempt_enable(); | ||
287 | } | ||
288 | 293 | ||
289 | return err; | 294 | return err; |
290 | } | 295 | } |