diff options
| author | Oleg Nesterov <oleg@redhat.com> | 2010-03-15 05:10:19 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-04-02 14:12:02 -0400 |
| commit | 30da688ef6b76e01969b00608202fff1eed2accc (patch) | |
| tree | f4068cb8cf29f1d93d8489b162f41b7ac15a3d0c | |
| parent | c1804d547dc098363443667609c272d1e4d15ee8 (diff) | |
sched: sched_exec(): Remove the select_fallback_rq() logic
sched_exec()->select_task_rq() reads/updates ->cpus_allowed lockless.
This can race with other CPUs updating our ->cpus_allowed, and this
looks meaningless to me.
The task is current and running, it must have online cpus in ->cpus_allowed,
the fallback mode is bogus. And, if ->sched_class returns the "wrong" cpu,
this likely means we raced with set_cpus_allowed() which was called
for reason, why should sched_exec() retry and call ->select_task_rq()
again?
Change the code to call sched_class->select_task_rq() directly and do
nothing if the returned cpu is wrong after re-checking under rq->lock.
From now task_struct->cpus_allowed is always stable under TASK_WAKING,
select_fallback_rq() is always called under rq-lock or the caller or
the caller owns TASK_WAKING (select_task_rq).
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100315091019.GA9141@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | kernel/sched.c | 25 |
1 files changed, 8 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f475c608b073..165b532dd8c2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -2280,6 +2280,9 @@ void task_oncpu_function_call(struct task_struct *p, | |||
| 2280 | } | 2280 | } |
| 2281 | 2281 | ||
| 2282 | #ifdef CONFIG_SMP | 2282 | #ifdef CONFIG_SMP |
| 2283 | /* | ||
| 2284 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. | ||
| 2285 | */ | ||
| 2283 | static int select_fallback_rq(int cpu, struct task_struct *p) | 2286 | static int select_fallback_rq(int cpu, struct task_struct *p) |
| 2284 | { | 2287 | { |
| 2285 | int dest_cpu; | 2288 | int dest_cpu; |
| @@ -2316,12 +2319,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
| 2316 | } | 2319 | } |
| 2317 | 2320 | ||
| 2318 | /* | 2321 | /* |
| 2319 | * Gets called from 3 sites (exec, fork, wakeup), since it is called without | 2322 | * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. |
| 2320 | * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done | ||
| 2321 | * by: | ||
| 2322 | * | ||
| 2323 | * exec: is unstable, retry loop | ||
| 2324 | * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING | ||
| 2325 | */ | 2323 | */ |
| 2326 | static inline | 2324 | static inline |
| 2327 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | 2325 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) |
| @@ -3076,9 +3074,8 @@ void sched_exec(void) | |||
| 3076 | unsigned long flags; | 3074 | unsigned long flags; |
| 3077 | struct rq *rq; | 3075 | struct rq *rq; |
| 3078 | 3076 | ||
| 3079 | again: | ||
| 3080 | this_cpu = get_cpu(); | 3077 | this_cpu = get_cpu(); |
| 3081 | dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0); | 3078 | dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); |
| 3082 | if (dest_cpu == this_cpu) { | 3079 | if (dest_cpu == this_cpu) { |
| 3083 | put_cpu(); | 3080 | put_cpu(); |
| 3084 | return; | 3081 | return; |
| @@ -3086,18 +3083,12 @@ again: | |||
| 3086 | 3083 | ||
| 3087 | rq = task_rq_lock(p, &flags); | 3084 | rq = task_rq_lock(p, &flags); |
| 3088 | put_cpu(); | 3085 | put_cpu(); |
| 3089 | |||
| 3090 | /* | 3086 | /* |
| 3091 | * select_task_rq() can race against ->cpus_allowed | 3087 | * select_task_rq() can race against ->cpus_allowed |
| 3092 | */ | 3088 | */ |
| 3093 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) | 3089 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && |
| 3094 | || unlikely(!cpu_active(dest_cpu))) { | 3090 | likely(cpu_active(dest_cpu)) && |
| 3095 | task_rq_unlock(rq, &flags); | 3091 | migrate_task(p, dest_cpu, &req)) { |
| 3096 | goto again; | ||
| 3097 | } | ||
| 3098 | |||
| 3099 | /* force the process onto the specified CPU */ | ||
| 3100 | if (migrate_task(p, dest_cpu, &req)) { | ||
| 3101 | /* Need to wait for migration thread (might exit: take ref). */ | 3092 | /* Need to wait for migration thread (might exit: take ref). */ |
| 3102 | struct task_struct *mt = rq->migration_thread; | 3093 | struct task_struct *mt = rq->migration_thread; |
| 3103 | 3094 | ||
