diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2013-01-31 07:11:13 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2013-02-14 09:29:37 -0500 |
commit | 860a0ffaa3e1a9cf0ebb5f43d6a2a2ce67463e93 (patch) | |
tree | 004c5b928e59605276609459d92db21bb41204e3 /kernel | |
parent | 7d7e499f7333f68b7e7f67d14b9c1480913b4afb (diff) |
stop_machine: Store task reference in a separate per cpu variable
To allow the stopper thread being managed by the smpboot thread
infrastructure separate out the task storage from the stopper data
structure.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Arjan van de Veen <arjan@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Richard Weinberger <rw@linutronix.de>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130131120741.626690384@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/stop_machine.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 2f194e965715..aaac68c5c3be 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -37,10 +37,10 @@ struct cpu_stopper { | |||
37 | spinlock_t lock; | 37 | spinlock_t lock; |
38 | bool enabled; /* is this stopper enabled? */ | 38 | bool enabled; /* is this stopper enabled? */ |
39 | struct list_head works; /* list of pending works */ | 39 | struct list_head works; /* list of pending works */ |
40 | struct task_struct *thread; /* stopper thread */ | ||
41 | }; | 40 | }; |
42 | 41 | ||
43 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); | 42 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); |
43 | static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); | ||
44 | static bool stop_machine_initialized = false; | 44 | static bool stop_machine_initialized = false; |
45 | 45 | ||
46 | static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) | 46 | static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) |
@@ -62,16 +62,18 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | /* queue @work to @stopper. if offline, @work is completed immediately */ | 64 | /* queue @work to @stopper. if offline, @work is completed immediately */ |
65 | static void cpu_stop_queue_work(struct cpu_stopper *stopper, | 65 | static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) |
66 | struct cpu_stop_work *work) | ||
67 | { | 66 | { |
67 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | ||
68 | struct task_struct *p = per_cpu(cpu_stopper_task, cpu); | ||
69 | |||
68 | unsigned long flags; | 70 | unsigned long flags; |
69 | 71 | ||
70 | spin_lock_irqsave(&stopper->lock, flags); | 72 | spin_lock_irqsave(&stopper->lock, flags); |
71 | 73 | ||
72 | if (stopper->enabled) { | 74 | if (stopper->enabled) { |
73 | list_add_tail(&work->list, &stopper->works); | 75 | list_add_tail(&work->list, &stopper->works); |
74 | wake_up_process(stopper->thread); | 76 | wake_up_process(p); |
75 | } else | 77 | } else |
76 | cpu_stop_signal_done(work->done, false); | 78 | cpu_stop_signal_done(work->done, false); |
77 | 79 | ||
@@ -108,7 +110,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) | |||
108 | struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; | 110 | struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; |
109 | 111 | ||
110 | cpu_stop_init_done(&done, 1); | 112 | cpu_stop_init_done(&done, 1); |
111 | cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work); | 113 | cpu_stop_queue_work(cpu, &work); |
112 | wait_for_completion(&done.completion); | 114 | wait_for_completion(&done.completion); |
113 | return done.executed ? done.ret : -ENOENT; | 115 | return done.executed ? done.ret : -ENOENT; |
114 | } | 116 | } |
@@ -130,7 +132,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, | |||
130 | struct cpu_stop_work *work_buf) | 132 | struct cpu_stop_work *work_buf) |
131 | { | 133 | { |
132 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; | 134 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; |
133 | cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf); | 135 | cpu_stop_queue_work(cpu, work_buf); |
134 | } | 136 | } |
135 | 137 | ||
136 | /* static data for stop_cpus */ | 138 | /* static data for stop_cpus */ |
@@ -159,8 +161,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask, | |||
159 | */ | 161 | */ |
160 | preempt_disable(); | 162 | preempt_disable(); |
161 | for_each_cpu(cpu, cpumask) | 163 | for_each_cpu(cpu, cpumask) |
162 | cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), | 164 | cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); |
163 | &per_cpu(stop_cpus_work, cpu)); | ||
164 | preempt_enable(); | 165 | preempt_enable(); |
165 | } | 166 | } |
166 | 167 | ||
@@ -304,12 +305,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
304 | { | 305 | { |
305 | unsigned int cpu = (unsigned long)hcpu; | 306 | unsigned int cpu = (unsigned long)hcpu; |
306 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | 307 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
307 | struct task_struct *p; | 308 | struct task_struct *p = per_cpu(cpu_stopper_task, cpu); |
308 | 309 | ||
309 | switch (action & ~CPU_TASKS_FROZEN) { | 310 | switch (action & ~CPU_TASKS_FROZEN) { |
310 | case CPU_UP_PREPARE: | 311 | case CPU_UP_PREPARE: |
311 | BUG_ON(stopper->thread || stopper->enabled || | 312 | BUG_ON(p || stopper->enabled || !list_empty(&stopper->works)); |
312 | !list_empty(&stopper->works)); | ||
313 | p = kthread_create_on_node(cpu_stopper_thread, | 313 | p = kthread_create_on_node(cpu_stopper_thread, |
314 | stopper, | 314 | stopper, |
315 | cpu_to_node(cpu), | 315 | cpu_to_node(cpu), |
@@ -319,12 +319,12 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
319 | get_task_struct(p); | 319 | get_task_struct(p); |
320 | kthread_bind(p, cpu); | 320 | kthread_bind(p, cpu); |
321 | sched_set_stop_task(cpu, p); | 321 | sched_set_stop_task(cpu, p); |
322 | stopper->thread = p; | 322 | per_cpu(cpu_stopper_task, cpu) = p; |
323 | break; | 323 | break; |
324 | 324 | ||
325 | case CPU_ONLINE: | 325 | case CPU_ONLINE: |
326 | /* strictly unnecessary, as first user will wake it */ | 326 | /* strictly unnecessary, as first user will wake it */ |
327 | wake_up_process(stopper->thread); | 327 | wake_up_process(p); |
328 | /* mark enabled */ | 328 | /* mark enabled */ |
329 | spin_lock_irq(&stopper->lock); | 329 | spin_lock_irq(&stopper->lock); |
330 | stopper->enabled = true; | 330 | stopper->enabled = true; |
@@ -339,7 +339,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
339 | 339 | ||
340 | sched_set_stop_task(cpu, NULL); | 340 | sched_set_stop_task(cpu, NULL); |
341 | /* kill the stopper */ | 341 | /* kill the stopper */ |
342 | kthread_stop(stopper->thread); | 342 | kthread_stop(p); |
343 | /* drain remaining works */ | 343 | /* drain remaining works */ |
344 | spin_lock_irq(&stopper->lock); | 344 | spin_lock_irq(&stopper->lock); |
345 | list_for_each_entry(work, &stopper->works, list) | 345 | list_for_each_entry(work, &stopper->works, list) |
@@ -347,8 +347,8 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
347 | stopper->enabled = false; | 347 | stopper->enabled = false; |
348 | spin_unlock_irq(&stopper->lock); | 348 | spin_unlock_irq(&stopper->lock); |
349 | /* release the stopper */ | 349 | /* release the stopper */ |
350 | put_task_struct(stopper->thread); | 350 | put_task_struct(p); |
351 | stopper->thread = NULL; | 351 | per_cpu(cpu_stopper_task, cpu) = NULL; |
352 | break; | 352 | break; |
353 | } | 353 | } |
354 | #endif | 354 | #endif |