aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c288
1 files changed, 190 insertions, 98 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index c09f2955ae30..c530bc5be7cf 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -115,6 +115,182 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
115 return done.executed ? done.ret : -ENOENT; 115 return done.executed ? done.ret : -ENOENT;
116} 116}
117 117
118/* This controls the threads on each CPU. */
119enum multi_stop_state {
120 /* Dummy starting state for thread. */
121 MULTI_STOP_NONE,
122 /* Awaiting everyone to be scheduled. */
123 MULTI_STOP_PREPARE,
124 /* Disable interrupts. */
125 MULTI_STOP_DISABLE_IRQ,
126 /* Run the function */
127 MULTI_STOP_RUN,
128 /* Exit */
129 MULTI_STOP_EXIT,
130};
131
132struct multi_stop_data {
133 int (*fn)(void *);
134 void *data;
135 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
136 unsigned int num_threads;
137 const struct cpumask *active_cpus;
138
139 enum multi_stop_state state;
140 atomic_t thread_ack;
141};
142
143static void set_state(struct multi_stop_data *msdata,
144 enum multi_stop_state newstate)
145{
146 /* Reset ack counter. */
147 atomic_set(&msdata->thread_ack, msdata->num_threads);
148 smp_wmb();
149 msdata->state = newstate;
150}
151
152/* Last one to ack a state moves to the next state. */
153static void ack_state(struct multi_stop_data *msdata)
154{
155 if (atomic_dec_and_test(&msdata->thread_ack))
156 set_state(msdata, msdata->state + 1);
157}
158
159/* This is the cpu_stop function which stops the CPU. */
160static int multi_cpu_stop(void *data)
161{
162 struct multi_stop_data *msdata = data;
163 enum multi_stop_state curstate = MULTI_STOP_NONE;
164 int cpu = smp_processor_id(), err = 0;
165 unsigned long flags;
166 bool is_active;
167
168 /*
169 * When called from stop_machine_from_inactive_cpu(), irq might
170 * already be disabled. Save the state and restore it on exit.
171 */
172 local_save_flags(flags);
173
174 if (!msdata->active_cpus)
175 is_active = cpu == cpumask_first(cpu_online_mask);
176 else
177 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
178
179 /* Simple state machine */
180 do {
181 /* Chill out and ensure we re-read multi_stop_state. */
182 cpu_relax();
183 if (msdata->state != curstate) {
184 curstate = msdata->state;
185 switch (curstate) {
186 case MULTI_STOP_DISABLE_IRQ:
187 local_irq_disable();
188 hard_irq_disable();
189 break;
190 case MULTI_STOP_RUN:
191 if (is_active)
192 err = msdata->fn(msdata->data);
193 break;
194 default:
195 break;
196 }
197 ack_state(msdata);
198 }
199 } while (curstate != MULTI_STOP_EXIT);
200
201 local_irq_restore(flags);
202 return err;
203}
204
205struct irq_cpu_stop_queue_work_info {
206 int cpu1;
207 int cpu2;
208 struct cpu_stop_work *work1;
209 struct cpu_stop_work *work2;
210};
211
212/*
213 * This function is always run with irqs and preemption disabled.
214 * This guarantees that both work1 and work2 get queued, before
215 * our local migrate thread gets the chance to preempt us.
216 */
217static void irq_cpu_stop_queue_work(void *arg)
218{
219 struct irq_cpu_stop_queue_work_info *info = arg;
220 cpu_stop_queue_work(info->cpu1, info->work1);
221 cpu_stop_queue_work(info->cpu2, info->work2);
222}
223
224/**
225 * stop_two_cpus - stops two cpus
226 * @cpu1: the cpu to stop
227 * @cpu2: the other cpu to stop
228 * @fn: function to execute
229 * @arg: argument to @fn
230 *
231 * Stops both the current and specified CPU and runs @fn on one of them.
232 *
233 * returns when both are completed.
234 */
235int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
236{
237 struct cpu_stop_done done;
238 struct cpu_stop_work work1, work2;
239 struct irq_cpu_stop_queue_work_info call_args;
240 struct multi_stop_data msdata;
241
242 preempt_disable();
243 msdata = (struct multi_stop_data){
244 .fn = fn,
245 .data = arg,
246 .num_threads = 2,
247 .active_cpus = cpumask_of(cpu1),
248 };
249
250 work1 = work2 = (struct cpu_stop_work){
251 .fn = multi_cpu_stop,
252 .arg = &msdata,
253 .done = &done
254 };
255
256 call_args = (struct irq_cpu_stop_queue_work_info){
257 .cpu1 = cpu1,
258 .cpu2 = cpu2,
259 .work1 = &work1,
260 .work2 = &work2,
261 };
262
263 cpu_stop_init_done(&done, 2);
264 set_state(&msdata, MULTI_STOP_PREPARE);
265
266 /*
267 * If we observe both CPUs active we know _cpu_down() cannot yet have
268 * queued its stop_machine works and therefore ours will get executed
269 * first. Or its not either one of our CPUs that's getting unplugged,
270 * in which case we don't care.
271 *
272 * This relies on the stopper workqueues to be FIFO.
273 */
274 if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
275 preempt_enable();
276 return -ENOENT;
277 }
278
279 /*
280 * Queuing needs to be done by the lowest numbered CPU, to ensure
281 * that works are always queued in the same order on every CPU.
282 * This prevents deadlocks.
283 */
284 smp_call_function_single(min(cpu1, cpu2),
285 &irq_cpu_stop_queue_work,
286 &call_args, 0);
287 preempt_enable();
288
289 wait_for_completion(&done.completion);
290
291 return done.executed ? done.ret : -ENOENT;
292}
293
118/** 294/**
119 * stop_one_cpu_nowait - stop a cpu but don't wait for completion 295 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
120 * @cpu: cpu to stop 296 * @cpu: cpu to stop
@@ -359,98 +535,14 @@ early_initcall(cpu_stop_init);
359 535
360#ifdef CONFIG_STOP_MACHINE 536#ifdef CONFIG_STOP_MACHINE
361 537
362/* This controls the threads on each CPU. */
363enum stopmachine_state {
364 /* Dummy starting state for thread. */
365 STOPMACHINE_NONE,
366 /* Awaiting everyone to be scheduled. */
367 STOPMACHINE_PREPARE,
368 /* Disable interrupts. */
369 STOPMACHINE_DISABLE_IRQ,
370 /* Run the function */
371 STOPMACHINE_RUN,
372 /* Exit */
373 STOPMACHINE_EXIT,
374};
375
376struct stop_machine_data {
377 int (*fn)(void *);
378 void *data;
379 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
380 unsigned int num_threads;
381 const struct cpumask *active_cpus;
382
383 enum stopmachine_state state;
384 atomic_t thread_ack;
385};
386
387static void set_state(struct stop_machine_data *smdata,
388 enum stopmachine_state newstate)
389{
390 /* Reset ack counter. */
391 atomic_set(&smdata->thread_ack, smdata->num_threads);
392 smp_wmb();
393 smdata->state = newstate;
394}
395
396/* Last one to ack a state moves to the next state. */
397static void ack_state(struct stop_machine_data *smdata)
398{
399 if (atomic_dec_and_test(&smdata->thread_ack))
400 set_state(smdata, smdata->state + 1);
401}
402
403/* This is the cpu_stop function which stops the CPU. */
404static int stop_machine_cpu_stop(void *data)
405{
406 struct stop_machine_data *smdata = data;
407 enum stopmachine_state curstate = STOPMACHINE_NONE;
408 int cpu = smp_processor_id(), err = 0;
409 unsigned long flags;
410 bool is_active;
411
412 /*
413 * When called from stop_machine_from_inactive_cpu(), irq might
414 * already be disabled. Save the state and restore it on exit.
415 */
416 local_save_flags(flags);
417
418 if (!smdata->active_cpus)
419 is_active = cpu == cpumask_first(cpu_online_mask);
420 else
421 is_active = cpumask_test_cpu(cpu, smdata->active_cpus);
422
423 /* Simple state machine */
424 do {
425 /* Chill out and ensure we re-read stopmachine_state. */
426 cpu_relax();
427 if (smdata->state != curstate) {
428 curstate = smdata->state;
429 switch (curstate) {
430 case STOPMACHINE_DISABLE_IRQ:
431 local_irq_disable();
432 hard_irq_disable();
433 break;
434 case STOPMACHINE_RUN:
435 if (is_active)
436 err = smdata->fn(smdata->data);
437 break;
438 default:
439 break;
440 }
441 ack_state(smdata);
442 }
443 } while (curstate != STOPMACHINE_EXIT);
444
445 local_irq_restore(flags);
446 return err;
447}
448
449int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 538int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
450{ 539{
451 struct stop_machine_data smdata = { .fn = fn, .data = data, 540 struct multi_stop_data msdata = {
452 .num_threads = num_online_cpus(), 541 .fn = fn,
453 .active_cpus = cpus }; 542 .data = data,
543 .num_threads = num_online_cpus(),
544 .active_cpus = cpus,
545 };
454 546
455 if (!stop_machine_initialized) { 547 if (!stop_machine_initialized) {
456 /* 548 /*
@@ -461,7 +553,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
461 unsigned long flags; 553 unsigned long flags;
462 int ret; 554 int ret;
463 555
464 WARN_ON_ONCE(smdata.num_threads != 1); 556 WARN_ON_ONCE(msdata.num_threads != 1);
465 557
466 local_irq_save(flags); 558 local_irq_save(flags);
467 hard_irq_disable(); 559 hard_irq_disable();
@@ -472,8 +564,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
472 } 564 }
473 565
474 /* Set the initial state and stop all online cpus. */ 566 /* Set the initial state and stop all online cpus. */
475 set_state(&smdata, STOPMACHINE_PREPARE); 567 set_state(&msdata, MULTI_STOP_PREPARE);
476 return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata); 568 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
477} 569}
478 570
479int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 571int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
@@ -513,25 +605,25 @@ EXPORT_SYMBOL_GPL(stop_machine);
513int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, 605int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
514 const struct cpumask *cpus) 606 const struct cpumask *cpus)
515{ 607{
516 struct stop_machine_data smdata = { .fn = fn, .data = data, 608 struct multi_stop_data msdata = { .fn = fn, .data = data,
517 .active_cpus = cpus }; 609 .active_cpus = cpus };
518 struct cpu_stop_done done; 610 struct cpu_stop_done done;
519 int ret; 611 int ret;
520 612
521 /* Local CPU must be inactive and CPU hotplug in progress. */ 613 /* Local CPU must be inactive and CPU hotplug in progress. */
522 BUG_ON(cpu_active(raw_smp_processor_id())); 614 BUG_ON(cpu_active(raw_smp_processor_id()));
523 smdata.num_threads = num_active_cpus() + 1; /* +1 for local */ 615 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
524 616
525 /* No proper task established and can't sleep - busy wait for lock. */ 617 /* No proper task established and can't sleep - busy wait for lock. */
526 while (!mutex_trylock(&stop_cpus_mutex)) 618 while (!mutex_trylock(&stop_cpus_mutex))
527 cpu_relax(); 619 cpu_relax();
528 620
529 /* Schedule work on other CPUs and execute directly for local CPU */ 621 /* Schedule work on other CPUs and execute directly for local CPU */
530 set_state(&smdata, STOPMACHINE_PREPARE); 622 set_state(&msdata, MULTI_STOP_PREPARE);
531 cpu_stop_init_done(&done, num_active_cpus()); 623 cpu_stop_init_done(&done, num_active_cpus());
532 queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata, 624 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
533 &done); 625 &done);
534 ret = stop_machine_cpu_stop(&smdata); 626 ret = multi_cpu_stop(&msdata);
535 627
536 /* Busy wait for completion. */ 628 /* Busy wait for completion. */
537 while (!completion_done(&done.completion)) 629 while (!completion_done(&done.completion))