summaryrefslogtreecommitdiffstats
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index b7591261652d..64c0291b579c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -21,6 +21,7 @@
21#include <linux/smpboot.h> 21#include <linux/smpboot.h>
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <linux/nmi.h> 23#include <linux/nmi.h>
24#include <linux/sched/wake_q.h>
24 25
25/* 26/*
26 * Structure to determine completion condition and record errors. May 27 * Structure to determine completion condition and record errors. May
@@ -65,27 +66,31 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
65} 66}
66 67
67static void __cpu_stop_queue_work(struct cpu_stopper *stopper, 68static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
68 struct cpu_stop_work *work) 69 struct cpu_stop_work *work,
70 struct wake_q_head *wakeq)
69{ 71{
70 list_add_tail(&work->list, &stopper->works); 72 list_add_tail(&work->list, &stopper->works);
71 wake_up_process(stopper->thread); 73 wake_q_add(wakeq, stopper->thread);
72} 74}
73 75
74/* queue @work to @stopper. if offline, @work is completed immediately */ 76/* queue @work to @stopper. if offline, @work is completed immediately */
75static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) 77static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
76{ 78{
77 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
80 DEFINE_WAKE_Q(wakeq);
78 unsigned long flags; 81 unsigned long flags;
79 bool enabled; 82 bool enabled;
80 83
81 spin_lock_irqsave(&stopper->lock, flags); 84 spin_lock_irqsave(&stopper->lock, flags);
82 enabled = stopper->enabled; 85 enabled = stopper->enabled;
83 if (enabled) 86 if (enabled)
84 __cpu_stop_queue_work(stopper, work); 87 __cpu_stop_queue_work(stopper, work, &wakeq);
85 else if (work->done) 88 else if (work->done)
86 cpu_stop_signal_done(work->done); 89 cpu_stop_signal_done(work->done);
87 spin_unlock_irqrestore(&stopper->lock, flags); 90 spin_unlock_irqrestore(&stopper->lock, flags);
88 91
92 wake_up_q(&wakeq);
93
89 return enabled; 94 return enabled;
90} 95}
91 96
@@ -229,6 +234,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
229{ 234{
230 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); 235 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
231 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); 236 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
237 DEFINE_WAKE_Q(wakeq);
232 int err; 238 int err;
233retry: 239retry:
234 spin_lock_irq(&stopper1->lock); 240 spin_lock_irq(&stopper1->lock);
@@ -252,8 +258,8 @@ retry:
252 goto unlock; 258 goto unlock;
253 259
254 err = 0; 260 err = 0;
255 __cpu_stop_queue_work(stopper1, work1); 261 __cpu_stop_queue_work(stopper1, work1, &wakeq);
256 __cpu_stop_queue_work(stopper2, work2); 262 __cpu_stop_queue_work(stopper2, work2, &wakeq);
257unlock: 263unlock:
258 spin_unlock(&stopper2->lock); 264 spin_unlock(&stopper2->lock);
259 spin_unlock_irq(&stopper1->lock); 265 spin_unlock_irq(&stopper1->lock);
@@ -263,6 +269,9 @@ unlock:
263 cpu_relax(); 269 cpu_relax();
264 goto retry; 270 goto retry;
265 } 271 }
272
273 wake_up_q(&wakeq);
274
266 return err; 275 return err;
267} 276}
268/** 277/**