diff options
author | Peter Zijlstra <peterz@infradead.org> | 2018-07-30 07:21:40 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-08-02 09:25:20 -0400 |
commit | b80a2bfce85e1051056d98d04ecb2d0b55cbbc1c (patch) | |
tree | 14df7b9e65a904bdd3e5126cdd494e9ec16affa9 | |
parent | b6a60cf36d497e7fbde9dd5b86fabd96850249f6 (diff) |
stop_machine: Reflow cpu_stop_queue_two_works()
The code flow in cpu_stop_queue_two_works() is a little arcane; fix this by
lifting the preempt_disable() to the top to create more natural nesting wrt
the spinlocks and make the wake_up_q() and preempt_enable() unconditional
at the end.
Furthermore, enable preemption in the -EDEADLK case, such that we spin-wait
with preemption enabled.
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: isaacm@codeaurora.org
Cc: matt@codeblueprint.co.uk
Cc: psodagud@codeaurora.org
Cc: gregkh@linuxfoundation.org
Cc: pkondeti@codeaurora.org
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20180730112140.GH2494@hirez.programming.kicks-ass.net
-rw-r--r-- | kernel/stop_machine.c | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index e190d1ef3a23..34b6652e8677 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -236,13 +236,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, | |||
236 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); | 236 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); |
237 | DEFINE_WAKE_Q(wakeq); | 237 | DEFINE_WAKE_Q(wakeq); |
238 | int err; | 238 | int err; |
239 | |||
239 | retry: | 240 | retry: |
241 | /* | ||
242 | * The waking up of stopper threads has to happen in the same | ||
243 | * scheduling context as the queueing. Otherwise, there is a | ||
244 | * possibility of one of the above stoppers being woken up by another | ||
245 | * CPU, and preempting us. This will cause us to not wake up the other | ||
246 | * stopper forever. | ||
247 | */ | ||
248 | preempt_disable(); | ||
240 | raw_spin_lock_irq(&stopper1->lock); | 249 | raw_spin_lock_irq(&stopper1->lock); |
241 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); | 250 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); |
242 | 251 | ||
243 | err = -ENOENT; | 252 | if (!stopper1->enabled || !stopper2->enabled) { |
244 | if (!stopper1->enabled || !stopper2->enabled) | 253 | err = -ENOENT; |
245 | goto unlock; | 254 | goto unlock; |
255 | } | ||
256 | |||
246 | /* | 257 | /* |
247 | * Ensure that if we race with __stop_cpus() the stoppers won't get | 258 | * Ensure that if we race with __stop_cpus() the stoppers won't get |
248 | * queued up in reverse order leading to system deadlock. | 259 | * queued up in reverse order leading to system deadlock. |
@@ -253,36 +264,30 @@ retry: | |||
253 | * It can be falsely true but it is safe to spin until it is cleared, | 264 | * It can be falsely true but it is safe to spin until it is cleared, |
254 | * queue_stop_cpus_work() does everything under preempt_disable(). | 265 | * queue_stop_cpus_work() does everything under preempt_disable(). |
255 | */ | 266 | */ |
256 | err = -EDEADLK; | 267 | if (unlikely(stop_cpus_in_progress)) { |
257 | if (unlikely(stop_cpus_in_progress)) | 268 | err = -EDEADLK; |
258 | goto unlock; | 269 | goto unlock; |
270 | } | ||
259 | 271 | ||
260 | err = 0; | 272 | err = 0; |
261 | __cpu_stop_queue_work(stopper1, work1, &wakeq); | 273 | __cpu_stop_queue_work(stopper1, work1, &wakeq); |
262 | __cpu_stop_queue_work(stopper2, work2, &wakeq); | 274 | __cpu_stop_queue_work(stopper2, work2, &wakeq); |
263 | /* | 275 | |
264 | * The waking up of stopper threads has to happen | ||
265 | * in the same scheduling context as the queueing. | ||
266 | * Otherwise, there is a possibility of one of the | ||
267 | * above stoppers being woken up by another CPU, | ||
268 | * and preempting us. This will cause us to n ot | ||
269 | * wake up the other stopper forever. | ||
270 | */ | ||
271 | preempt_disable(); | ||
272 | unlock: | 276 | unlock: |
273 | raw_spin_unlock(&stopper2->lock); | 277 | raw_spin_unlock(&stopper2->lock); |
274 | raw_spin_unlock_irq(&stopper1->lock); | 278 | raw_spin_unlock_irq(&stopper1->lock); |
275 | 279 | ||
276 | if (unlikely(err == -EDEADLK)) { | 280 | if (unlikely(err == -EDEADLK)) { |
281 | preempt_enable(); | ||
282 | |||
277 | while (stop_cpus_in_progress) | 283 | while (stop_cpus_in_progress) |
278 | cpu_relax(); | 284 | cpu_relax(); |
285 | |||
279 | goto retry; | 286 | goto retry; |
280 | } | 287 | } |
281 | 288 | ||
282 | if (!err) { | 289 | wake_up_q(&wakeq); |
283 | wake_up_q(&wakeq); | 290 | preempt_enable(); |
284 | preempt_enable(); | ||
285 | } | ||
286 | 291 | ||
287 | return err; | 292 | return err; |
288 | } | 293 | } |