aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-01-31 07:11:14 -0500
committerThomas Gleixner <tglx@linutronix.de>2013-02-14 09:29:38 -0500
commit14e568e78f6f80ca1e27256641ddf524c7dbdc51 (patch)
tree1f75f09c7f8adfa6dd55ef9bd0b547fcaf700f45 /kernel/stop_machine.c
parent860a0ffaa3e1a9cf0ebb5f43d6a2a2ce67463e93 (diff)
stop_machine: Use smpboot threads
Use the smpboot thread infrastructure. Mark the stopper thread selfparking and park it after it has finished the take_cpu_down() work. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Arjan van de Veen <arjan@infradead.org> Cc: Paul Turner <pjt@google.com> Cc: Richard Weinberger <rw@linutronix.de> Cc: Magnus Damm <magnus.damm@gmail.com> Link: http://lkml.kernel.org/r/20130131120741.686315164@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c136
1 files changed, 50 insertions, 86 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index aaac68c5c3be..95d178c62d5a 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -18,7 +18,7 @@
18#include <linux/stop_machine.h> 18#include <linux/stop_machine.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/kallsyms.h> 20#include <linux/kallsyms.h>
21 21#include <linux/smpboot.h>
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23 23
24/* 24/*
@@ -245,20 +245,25 @@ int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
245 return ret; 245 return ret;
246} 246}
247 247
248static int cpu_stopper_thread(void *data) 248static int cpu_stop_should_run(unsigned int cpu)
249{
250 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
251 unsigned long flags;
252 int run;
253
254 spin_lock_irqsave(&stopper->lock, flags);
255 run = !list_empty(&stopper->works);
256 spin_unlock_irqrestore(&stopper->lock, flags);
257 return run;
258}
259
260static void cpu_stopper_thread(unsigned int cpu)
249{ 261{
250 struct cpu_stopper *stopper = data; 262 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
251 struct cpu_stop_work *work; 263 struct cpu_stop_work *work;
252 int ret; 264 int ret;
253 265
254repeat: 266repeat:
255 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
256
257 if (kthread_should_stop()) {
258 __set_current_state(TASK_RUNNING);
259 return 0;
260 }
261
262 work = NULL; 267 work = NULL;
263 spin_lock_irq(&stopper->lock); 268 spin_lock_irq(&stopper->lock);
264 if (!list_empty(&stopper->works)) { 269 if (!list_empty(&stopper->works)) {
@@ -274,8 +279,6 @@ repeat:
274 struct cpu_stop_done *done = work->done; 279 struct cpu_stop_done *done = work->done;
275 char ksym_buf[KSYM_NAME_LEN] __maybe_unused; 280 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
276 281
277 __set_current_state(TASK_RUNNING);
278
279 /* cpu stop callbacks are not allowed to sleep */ 282 /* cpu stop callbacks are not allowed to sleep */
280 preempt_disable(); 283 preempt_disable();
281 284
@@ -291,87 +294,55 @@ repeat:
291 ksym_buf), arg); 294 ksym_buf), arg);
292 295
293 cpu_stop_signal_done(done, true); 296 cpu_stop_signal_done(done, true);
294 } else 297 goto repeat;
295 schedule(); 298 }
296
297 goto repeat;
298} 299}
299 300
300extern void sched_set_stop_task(int cpu, struct task_struct *stop); 301extern void sched_set_stop_task(int cpu, struct task_struct *stop);
301 302
302/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ 303static void cpu_stop_create(unsigned int cpu)
303static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, 304{
304 unsigned long action, void *hcpu) 305 sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
306}
307
308static void cpu_stop_park(unsigned int cpu)
305{ 309{
306 unsigned int cpu = (unsigned long)hcpu;
307 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 310 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
308 struct task_struct *p = per_cpu(cpu_stopper_task, cpu); 311 struct cpu_stop_work *work;
312 unsigned long flags;
309 313
310 switch (action & ~CPU_TASKS_FROZEN) { 314 /* drain remaining works */
311 case CPU_UP_PREPARE: 315 spin_lock_irqsave(&stopper->lock, flags);
312 BUG_ON(p || stopper->enabled || !list_empty(&stopper->works)); 316 list_for_each_entry(work, &stopper->works, list)
313 p = kthread_create_on_node(cpu_stopper_thread, 317 cpu_stop_signal_done(work->done, false);
314 stopper, 318 stopper->enabled = false;
315 cpu_to_node(cpu), 319 spin_unlock_irqrestore(&stopper->lock, flags);
316 "migration/%d", cpu); 320}
317 if (IS_ERR(p))
318 return notifier_from_errno(PTR_ERR(p));
319 get_task_struct(p);
320 kthread_bind(p, cpu);
321 sched_set_stop_task(cpu, p);
322 per_cpu(cpu_stopper_task, cpu) = p;
323 break;
324
325 case CPU_ONLINE:
326 /* strictly unnecessary, as first user will wake it */
327 wake_up_process(p);
328 /* mark enabled */
329 spin_lock_irq(&stopper->lock);
330 stopper->enabled = true;
331 spin_unlock_irq(&stopper->lock);
332 break;
333
334#ifdef CONFIG_HOTPLUG_CPU
335 case CPU_UP_CANCELED:
336 case CPU_POST_DEAD:
337 {
338 struct cpu_stop_work *work;
339
340 sched_set_stop_task(cpu, NULL);
341 /* kill the stopper */
342 kthread_stop(p);
343 /* drain remaining works */
344 spin_lock_irq(&stopper->lock);
345 list_for_each_entry(work, &stopper->works, list)
346 cpu_stop_signal_done(work->done, false);
347 stopper->enabled = false;
348 spin_unlock_irq(&stopper->lock);
349 /* release the stopper */
350 put_task_struct(p);
351 per_cpu(cpu_stopper_task, cpu) = NULL;
352 break;
353 }
354#endif
355 }
356 321
357 return NOTIFY_OK; 322static void cpu_stop_unpark(unsigned int cpu)
323{
324 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
325
326 spin_lock_irq(&stopper->lock);
327 stopper->enabled = true;
328 spin_unlock_irq(&stopper->lock);
358} 329}
359 330
360/* 331static struct smp_hotplug_thread cpu_stop_threads = {
361 * Give it a higher priority so that cpu stopper is available to other 332 .store = &cpu_stopper_task,
362 * cpu notifiers. It currently shares the same priority as sched 333 .thread_should_run = cpu_stop_should_run,
363 * migration_notifier. 334 .thread_fn = cpu_stopper_thread,
364 */ 335 .thread_comm = "migration/%u",
365static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = { 336 .create = cpu_stop_create,
366 .notifier_call = cpu_stop_cpu_callback, 337 .setup = cpu_stop_unpark,
367 .priority = 10, 338 .park = cpu_stop_park,
339 .unpark = cpu_stop_unpark,
340 .selfparking = true,
368}; 341};
369 342
370static int __init cpu_stop_init(void) 343static int __init cpu_stop_init(void)
371{ 344{
372 void *bcpu = (void *)(long)smp_processor_id();
373 unsigned int cpu; 345 unsigned int cpu;
374 int err;
375 346
376 for_each_possible_cpu(cpu) { 347 for_each_possible_cpu(cpu) {
377 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 348 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
@@ -380,15 +351,8 @@ static int __init cpu_stop_init(void)
380 INIT_LIST_HEAD(&stopper->works); 351 INIT_LIST_HEAD(&stopper->works);
381 } 352 }
382 353
383 /* start one for the boot cpu */ 354 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
384 err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
385 bcpu);
386 BUG_ON(err != NOTIFY_OK);
387 cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
388 register_cpu_notifier(&cpu_stop_cpu_notifier);
389
390 stop_machine_initialized = true; 355 stop_machine_initialized = true;
391
392 return 0; 356 return 0;
393} 357}
394early_initcall(cpu_stop_init); 358early_initcall(cpu_stop_init);