diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 235c421631d6..678335a8b390 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4412,13 +4412,16 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); | |||
4412 | * | 4412 | * |
4413 | * So we race with normal scheduler movements, but that's OK, as long | 4413 | * So we race with normal scheduler movements, but that's OK, as long |
4414 | * as the task is no longer on this CPU. | 4414 | * as the task is no longer on this CPU. |
4415 | * | ||
4416 | * Returns non-zero if task was successfully migrated. | ||
4415 | */ | 4417 | */ |
4416 | static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | 4418 | static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
4417 | { | 4419 | { |
4418 | runqueue_t *rq_dest, *rq_src; | 4420 | runqueue_t *rq_dest, *rq_src; |
4421 | int ret = 0; | ||
4419 | 4422 | ||
4420 | if (unlikely(cpu_is_offline(dest_cpu))) | 4423 | if (unlikely(cpu_is_offline(dest_cpu))) |
4421 | return; | 4424 | return ret; |
4422 | 4425 | ||
4423 | rq_src = cpu_rq(src_cpu); | 4426 | rq_src = cpu_rq(src_cpu); |
4424 | rq_dest = cpu_rq(dest_cpu); | 4427 | rq_dest = cpu_rq(dest_cpu); |
@@ -4446,9 +4449,10 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4446 | if (TASK_PREEMPTS_CURR(p, rq_dest)) | 4449 | if (TASK_PREEMPTS_CURR(p, rq_dest)) |
4447 | resched_task(rq_dest->curr); | 4450 | resched_task(rq_dest->curr); |
4448 | } | 4451 | } |
4449 | 4452 | ret = 1; | |
4450 | out: | 4453 | out: |
4451 | double_rq_unlock(rq_src, rq_dest); | 4454 | double_rq_unlock(rq_src, rq_dest); |
4455 | return ret; | ||
4452 | } | 4456 | } |
4453 | 4457 | ||
4454 | /* | 4458 | /* |
@@ -4518,9 +4522,12 @@ wait_to_die: | |||
4518 | /* Figure out where task on dead CPU should go, use force if neccessary. */ | 4522 | /* Figure out where task on dead CPU should go, use force if neccessary. */ |
4519 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | 4523 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) |
4520 | { | 4524 | { |
4525 | runqueue_t *rq; | ||
4526 | unsigned long flags; | ||
4521 | int dest_cpu; | 4527 | int dest_cpu; |
4522 | cpumask_t mask; | 4528 | cpumask_t mask; |
4523 | 4529 | ||
4530 | restart: | ||
4524 | /* On same node? */ | 4531 | /* On same node? */ |
4525 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 4532 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); |
4526 | cpus_and(mask, mask, tsk->cpus_allowed); | 4533 | cpus_and(mask, mask, tsk->cpus_allowed); |
@@ -4532,8 +4539,10 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | |||
4532 | 4539 | ||
4533 | /* No more Mr. Nice Guy. */ | 4540 | /* No more Mr. Nice Guy. */ |
4534 | if (dest_cpu == NR_CPUS) { | 4541 | if (dest_cpu == NR_CPUS) { |
4542 | rq = task_rq_lock(tsk, &flags); | ||
4535 | cpus_setall(tsk->cpus_allowed); | 4543 | cpus_setall(tsk->cpus_allowed); |
4536 | dest_cpu = any_online_cpu(tsk->cpus_allowed); | 4544 | dest_cpu = any_online_cpu(tsk->cpus_allowed); |
4545 | task_rq_unlock(rq, &flags); | ||
4537 | 4546 | ||
4538 | /* | 4547 | /* |
4539 | * Don't tell them about moving exiting tasks or | 4548 | * Don't tell them about moving exiting tasks or |
@@ -4545,7 +4554,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | |||
4545 | "longer affine to cpu%d\n", | 4554 | "longer affine to cpu%d\n", |
4546 | tsk->pid, tsk->comm, dead_cpu); | 4555 | tsk->pid, tsk->comm, dead_cpu); |
4547 | } | 4556 | } |
4548 | __migrate_task(tsk, dead_cpu, dest_cpu); | 4557 | if (!__migrate_task(tsk, dead_cpu, dest_cpu)) |
4558 | goto restart; | ||
4549 | } | 4559 | } |
4550 | 4560 | ||
4551 | /* | 4561 | /* |