diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-11 08:38:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-16 08:22:16 -0400 |
commit | 6acce3ef84520537f8a09a12c9ddbe814a584dd2 (patch) | |
tree | b4e117df4a57be6a040529c148480227c3d100cc /kernel/stop_machine.c | |
parent | 746023159c40c523b08a3bc3d213dac212385895 (diff) |
sched: Remove get_online_cpus() usage
Remove get_online_cpus() usage from the scheduler; there's 4 sites that
use it:
- sched_init_smp(); where its completely superfluous since we're in
'early' boot and there simply cannot be any hotplugging.
- sched_getaffinity(); we already take a raw spinlock to protect the
task cpus_allowed mask, this disables preemption and therefore
also stabilizes cpu_online_mask as that's modified using
stop_machine. However switch to active mask for symmetry with
sched_setaffinity()/set_cpus_allowed_ptr(). We guarantee active
mask stability by inserting sync_rcu/sched() into _cpu_down.
- sched_setaffinity(); we don't appear to need get_online_cpus()
either, there's two sites where hotplug appears relevant:
* cpuset_cpus_allowed(); for the !cpuset case we use possible_mask,
for the cpuset case we hold task_lock, which is a spinlock and
thus for mainline disables preemption (might cause pain on RT).
* set_cpus_allowed_ptr(); Holds all scheduler locks and thus has
preemption properly disabled; also it already deals with hotplug
races explicitly where it releases them.
- migrate_swap(); we can make stop_two_cpus() do the heavy lifting for
us with a little trickery. By adding a sync_sched/rcu() after the
CPU_DOWN_PREPARE notifier we can provide preempt/rcu guarantees for
cpu_active_mask. Use these to validate that both our cpus are active
when queueing the stop work before we queue the stop_machine works
for take_cpu_down().
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Link: http://lkml.kernel.org/r/20131011123820.GV3081@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r-- | kernel/stop_machine.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 32a6c44d8f78..c530bc5be7cf 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -234,11 +234,13 @@ static void irq_cpu_stop_queue_work(void *arg) | |||
234 | */ | 234 | */ |
235 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) | 235 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) |
236 | { | 236 | { |
237 | int call_cpu; | ||
238 | struct cpu_stop_done done; | 237 | struct cpu_stop_done done; |
239 | struct cpu_stop_work work1, work2; | 238 | struct cpu_stop_work work1, work2; |
240 | struct irq_cpu_stop_queue_work_info call_args; | 239 | struct irq_cpu_stop_queue_work_info call_args; |
241 | struct multi_stop_data msdata = { | 240 | struct multi_stop_data msdata; |
241 | |||
242 | preempt_disable(); | ||
243 | msdata = (struct multi_stop_data){ | ||
242 | .fn = fn, | 244 | .fn = fn, |
243 | .data = arg, | 245 | .data = arg, |
244 | .num_threads = 2, | 246 | .num_threads = 2, |
@@ -262,16 +264,30 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * | |||
262 | set_state(&msdata, MULTI_STOP_PREPARE); | 264 | set_state(&msdata, MULTI_STOP_PREPARE); |
263 | 265 | ||
264 | /* | 266 | /* |
267 | * If we observe both CPUs active we know _cpu_down() cannot yet have | ||
268 | * queued its stop_machine works and therefore ours will get executed | ||
269 | * first. Or its not either one of our CPUs that's getting unplugged, | ||
270 | * in which case we don't care. | ||
271 | * | ||
272 | * This relies on the stopper workqueues to be FIFO. | ||
273 | */ | ||
274 | if (!cpu_active(cpu1) || !cpu_active(cpu2)) { | ||
275 | preempt_enable(); | ||
276 | return -ENOENT; | ||
277 | } | ||
278 | |||
279 | /* | ||
265 | * Queuing needs to be done by the lowest numbered CPU, to ensure | 280 | * Queuing needs to be done by the lowest numbered CPU, to ensure |
266 | * that works are always queued in the same order on every CPU. | 281 | * that works are always queued in the same order on every CPU. |
267 | * This prevents deadlocks. | 282 | * This prevents deadlocks. |
268 | */ | 283 | */ |
269 | call_cpu = min(cpu1, cpu2); | 284 | smp_call_function_single(min(cpu1, cpu2), |
270 | 285 | &irq_cpu_stop_queue_work, | |
271 | smp_call_function_single(call_cpu, &irq_cpu_stop_queue_work, | ||
272 | &call_args, 0); | 286 | &call_args, 0); |
287 | preempt_enable(); | ||
273 | 288 | ||
274 | wait_for_completion(&done.completion); | 289 | wait_for_completion(&done.completion); |
290 | |||
275 | return done.executed ? done.ret : -ENOENT; | 291 | return done.executed ? done.ret : -ENOENT; |
276 | } | 292 | } |
277 | 293 | ||