aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-16 12:04:38 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-16 13:01:57 -0500
commit5da9a0fb673a0ea0a093862f95f6b89b3390c31e (patch)
treefa4465d5ce050cdc42e63a2518ce2e1b58fa9c30 /kernel/sched.c
parent3802290628348674985d14914f9bfee7b9084548 (diff)
sched: Fix select_task_rq() vs hotplug issues
Since select_task_rq() is now responsible for guaranteeing ->cpus_allowed and cpu_active_mask, we need to verify this. select_task_rq_rt() can blindly return smp_processor_id()/task_cpu() without checking the valid masks, select_task_rq_fair() can do the same in the rare case that all SD_flags are disabled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <20091216170517.961475466@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c75
1 files changed, 40 insertions, 35 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 63e55ac242d1..cc40bdadee7a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2317,6 +2317,43 @@ void task_oncpu_function_call(struct task_struct *p,
2317} 2317}
2318 2318
2319#ifdef CONFIG_SMP 2319#ifdef CONFIG_SMP
2320static int select_fallback_rq(int cpu, struct task_struct *p)
2321{
2322 int dest_cpu;
2323 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2324
2325 /* Look for allowed, online CPU in same node. */
2326 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2327 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2328 return dest_cpu;
2329
2330 /* Any allowed, online CPU? */
2331 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2332 if (dest_cpu < nr_cpu_ids)
2333 return dest_cpu;
2334
2335 /* No more Mr. Nice Guy. */
2336 if (dest_cpu >= nr_cpu_ids) {
2337 rcu_read_lock();
2338 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2339 rcu_read_unlock();
2340 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2341
2342 /*
2343 * Don't tell them about moving exiting tasks or
2344 * kernel threads (both mm NULL), since they never
2345 * leave kernel.
2346 */
2347 if (p->mm && printk_ratelimit()) {
2348 printk(KERN_INFO "process %d (%s) no "
2349 "longer affine to cpu%d\n",
2350 task_pid_nr(p), p->comm, cpu);
2351 }
2352 }
2353
2354 return dest_cpu;
2355}
2356
2320/* 2357/*
2321 * Called from: 2358 * Called from:
2322 * 2359 *
@@ -2343,14 +2380,8 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2343 * not worry about this generic constraint ] 2380 * not worry about this generic constraint ]
2344 */ 2381 */
2345 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || 2382 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2346 !cpu_active(cpu))) { 2383 !cpu_active(cpu)))
2347 2384 cpu = select_fallback_rq(task_cpu(p), p);
2348 cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2349 /*
2350 * XXX: race against hot-plug modifying cpu_active_mask
2351 */
2352 BUG_ON(cpu >= nr_cpu_ids);
2353 }
2354 2385
2355 return cpu; 2386 return cpu;
2356} 2387}
@@ -7319,36 +7350,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
7319static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 7350static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
7320{ 7351{
7321 int dest_cpu; 7352 int dest_cpu;
7322 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
7323 7353
7324again: 7354again:
7325 /* Look for allowed, online CPU in same node. */ 7355 dest_cpu = select_fallback_rq(dead_cpu, p);
7326 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
7327 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7328 goto move;
7329
7330 /* Any allowed, online CPU? */
7331 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
7332 if (dest_cpu < nr_cpu_ids)
7333 goto move;
7334
7335 /* No more Mr. Nice Guy. */
7336 if (dest_cpu >= nr_cpu_ids) {
7337 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
7338 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
7339
7340 /*
7341 * Don't tell them about moving exiting tasks or
7342 * kernel threads (both mm NULL), since they never
7343 * leave kernel.
7344 */
7345 if (p->mm && printk_ratelimit()) {
7346 pr_info("process %d (%s) no longer affine to cpu%d\n",
7347 task_pid_nr(p), p->comm, dead_cpu);
7348 }
7349 }
7350 7356
7351move:
7352 /* It can have affinity changed while we were choosing. */ 7357 /* It can have affinity changed while we were choosing. */
7353 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 7358 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
7354 goto again; 7359 goto again;