diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-16 12:04:37 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-16 13:01:56 -0500 |
commit | 3802290628348674985d14914f9bfee7b9084548 (patch) | |
tree | b6b513fa5651f570013f3eff86e843a9d52d1dcb /kernel/sched.c | |
parent | e2912009fb7b715728311b0d8fe327a1432b3f79 (diff) |
sched: Fix sched_exec() balancing
Since we access ->cpus_allowed without holding rq->lock we need
a retry loop to validate the result, this comes for near free
when we merge sched_migrate_task() into sched_exec() since that
already does the needed check.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
LKML-Reference: <20091216170517.884743662@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 45 |
1 files changed, 23 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 33d7965f63f0..63e55ac242d1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2322,7 +2322,7 @@ void task_oncpu_function_call(struct task_struct *p, | |||
2322 | * | 2322 | * |
2323 | * - fork, @p is stable because it isn't on the tasklist yet | 2323 | * - fork, @p is stable because it isn't on the tasklist yet |
2324 | * | 2324 | * |
2325 | * - exec, @p is unstable XXX | 2325 | * - exec, @p is unstable, retry loop |
2326 | * | 2326 | * |
2327 | * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so | 2327 | * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so |
2328 | * we should be good. | 2328 | * we should be good. |
@@ -3132,21 +3132,36 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
3132 | } | 3132 | } |
3133 | 3133 | ||
3134 | /* | 3134 | /* |
3135 | * If dest_cpu is allowed for this process, migrate the task to it. | 3135 | * sched_exec - execve() is a valuable balancing opportunity, because at |
3136 | * This is accomplished by forcing the cpu_allowed mask to only | 3136 | * this point the task has the smallest effective memory and cache footprint. |
3137 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | ||
3138 | * the cpu_allowed mask is restored. | ||
3139 | */ | 3137 | */ |
3140 | static void sched_migrate_task(struct task_struct *p, int dest_cpu) | 3138 | void sched_exec(void) |
3141 | { | 3139 | { |
3140 | struct task_struct *p = current; | ||
3142 | struct migration_req req; | 3141 | struct migration_req req; |
3142 | int dest_cpu, this_cpu; | ||
3143 | unsigned long flags; | 3143 | unsigned long flags; |
3144 | struct rq *rq; | 3144 | struct rq *rq; |
3145 | 3145 | ||
3146 | again: | ||
3147 | this_cpu = get_cpu(); | ||
3148 | dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0); | ||
3149 | if (dest_cpu == this_cpu) { | ||
3150 | put_cpu(); | ||
3151 | return; | ||
3152 | } | ||
3153 | |||
3146 | rq = task_rq_lock(p, &flags); | 3154 | rq = task_rq_lock(p, &flags); |
3155 | put_cpu(); | ||
3156 | |||
3157 | /* | ||
3158 | * select_task_rq() can race against ->cpus_allowed | ||
3159 | */ | ||
3147 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) | 3160 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
3148 | || unlikely(!cpu_active(dest_cpu))) | 3161 | || unlikely(!cpu_active(dest_cpu))) { |
3149 | goto out; | 3162 | task_rq_unlock(rq, &flags); |
3163 | goto again; | ||
3164 | } | ||
3150 | 3165 | ||
3151 | /* force the process onto the specified CPU */ | 3166 | /* force the process onto the specified CPU */ |
3152 | if (migrate_task(p, dest_cpu, &req)) { | 3167 | if (migrate_task(p, dest_cpu, &req)) { |
@@ -3161,24 +3176,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
3161 | 3176 | ||
3162 | return; | 3177 | return; |
3163 | } | 3178 | } |
3164 | out: | ||
3165 | task_rq_unlock(rq, &flags); | 3179 | task_rq_unlock(rq, &flags); |
3166 | } | 3180 | } |
3167 | 3181 | ||
3168 | /* | 3182 | /* |
3169 | * sched_exec - execve() is a valuable balancing opportunity, because at | ||
3170 | * this point the task has the smallest effective memory and cache footprint. | ||
3171 | */ | ||
3172 | void sched_exec(void) | ||
3173 | { | ||
3174 | int new_cpu, this_cpu = get_cpu(); | ||
3175 | new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0); | ||
3176 | put_cpu(); | ||
3177 | if (new_cpu != this_cpu) | ||
3178 | sched_migrate_task(current, new_cpu); | ||
3179 | } | ||
3180 | |||
3181 | /* | ||
3182 | * pull_task - move a task from a remote runqueue to the local runqueue. | 3183 | * pull_task - move a task from a remote runqueue to the local runqueue. |
3183 | * Both runqueues must be locked. | 3184 | * Both runqueues must be locked. |
3184 | */ | 3185 | */ |