diff options
| -rw-r--r-- | kernel/stop_machine.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 1ff523dae6e2..e190d1ef3a23 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -260,6 +260,15 @@ retry: | |||
| 260 | err = 0; | 260 | err = 0; |
| 261 | __cpu_stop_queue_work(stopper1, work1, &wakeq); | 261 | __cpu_stop_queue_work(stopper1, work1, &wakeq); |
| 262 | __cpu_stop_queue_work(stopper2, work2, &wakeq); | 262 | __cpu_stop_queue_work(stopper2, work2, &wakeq); |
| 263 | /* | ||
| 264 | * The waking up of stopper threads has to happen | ||
| 265 | * in the same scheduling context as the queueing. | ||
| 266 | * Otherwise, there is a possibility of one of the | ||
| 267 | * above stoppers being woken up by another CPU, | ||
| 268 | * and preempting us. This will cause us to n ot | ||
| 269 | * wake up the other stopper forever. | ||
| 270 | */ | ||
| 271 | preempt_disable(); | ||
| 263 | unlock: | 272 | unlock: |
| 264 | raw_spin_unlock(&stopper2->lock); | 273 | raw_spin_unlock(&stopper2->lock); |
| 265 | raw_spin_unlock_irq(&stopper1->lock); | 274 | raw_spin_unlock_irq(&stopper1->lock); |
| @@ -271,7 +280,6 @@ unlock: | |||
| 271 | } | 280 | } |
| 272 | 281 | ||
| 273 | if (!err) { | 282 | if (!err) { |
| 274 | preempt_disable(); | ||
| 275 | wake_up_q(&wakeq); | 283 | wake_up_q(&wakeq); |
| 276 | preempt_enable(); | 284 | preempt_enable(); |
| 277 | } | 285 | } |
