diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-15 08:43:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-15 10:01:05 -0400 |
commit | e9c8431185d6c406887190519f6dbdd112641686 (patch) | |
tree | 3c502201451df7bbfcd653fee940fcb40bdaaeb6 | |
parent | 5f3edc1b1ead6d9bd45a85c551f44eff8fe76b9f (diff) |
sched: Add TASK_WAKING
We're going to want to drop rq->lock in try_to_wake_up() for a
longer period of time, however we also want to deal with concurrent
waking of the same task, which is currently handled by holding
rq->lock.
So introduce a new TASK state, namely TASK_WAKING, which indicates
someone is already waking the task (other wakers will fail p->state
& state).
We also keep preemption disabled over the whole ttwu().
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 31 |
2 files changed, 16 insertions, 16 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5d3c9900943e..3b0ca66bd6ce 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -190,6 +190,7 @@ extern unsigned long long time_sync_thresh; | |||
190 | /* in tsk->state again */ | 190 | /* in tsk->state again */ |
191 | #define TASK_DEAD 64 | 191 | #define TASK_DEAD 64 |
192 | #define TASK_WAKEKILL 128 | 192 | #define TASK_WAKEKILL 128 |
193 | #define TASK_WAKING 256 | ||
193 | 194 | ||
194 | /* Convenience macros for the sake of set_task_state */ | 195 | /* Convenience macros for the sake of set_task_state */ |
195 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 196 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
diff --git a/kernel/sched.c b/kernel/sched.c index 32b7a81230c2..fc6fda881d2e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2310,7 +2310,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2310 | { | 2310 | { |
2311 | int cpu, orig_cpu, this_cpu, success = 0; | 2311 | int cpu, orig_cpu, this_cpu, success = 0; |
2312 | unsigned long flags; | 2312 | unsigned long flags; |
2313 | long old_state; | ||
2314 | struct rq *rq; | 2313 | struct rq *rq; |
2315 | 2314 | ||
2316 | if (!sched_feat(SYNC_WAKEUPS)) | 2315 | if (!sched_feat(SYNC_WAKEUPS)) |
@@ -2332,11 +2331,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2332 | } | 2331 | } |
2333 | #endif | 2332 | #endif |
2334 | 2333 | ||
2334 | this_cpu = get_cpu(); | ||
2335 | |||
2335 | smp_wmb(); | 2336 | smp_wmb(); |
2336 | rq = task_rq_lock(p, &flags); | 2337 | rq = task_rq_lock(p, &flags); |
2337 | update_rq_clock(rq); | 2338 | update_rq_clock(rq); |
2338 | old_state = p->state; | 2339 | if (!(p->state & state)) |
2339 | if (!(old_state & state)) | ||
2340 | goto out; | 2340 | goto out; |
2341 | 2341 | ||
2342 | if (p->se.on_rq) | 2342 | if (p->se.on_rq) |
@@ -2344,27 +2344,25 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2344 | 2344 | ||
2345 | cpu = task_cpu(p); | 2345 | cpu = task_cpu(p); |
2346 | orig_cpu = cpu; | 2346 | orig_cpu = cpu; |
2347 | this_cpu = smp_processor_id(); | ||
2348 | 2347 | ||
2349 | #ifdef CONFIG_SMP | 2348 | #ifdef CONFIG_SMP |
2350 | if (unlikely(task_running(rq, p))) | 2349 | if (unlikely(task_running(rq, p))) |
2351 | goto out_activate; | 2350 | goto out_activate; |
2352 | 2351 | ||
2352 | /* | ||
2353 | * In order to handle concurrent wakeups and release the rq->lock | ||
2354 | * we put the task in TASK_WAKING state. | ||
2355 | */ | ||
2356 | p->state = TASK_WAKING; | ||
2357 | task_rq_unlock(rq, &flags); | ||
2358 | |||
2353 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync); | 2359 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync); |
2354 | if (cpu != orig_cpu) { | 2360 | if (cpu != orig_cpu) |
2355 | set_task_cpu(p, cpu); | 2361 | set_task_cpu(p, cpu); |
2356 | task_rq_unlock(rq, &flags); | ||
2357 | /* might preempt at this point */ | ||
2358 | rq = task_rq_lock(p, &flags); | ||
2359 | old_state = p->state; | ||
2360 | if (!(old_state & state)) | ||
2361 | goto out; | ||
2362 | if (p->se.on_rq) | ||
2363 | goto out_running; | ||
2364 | 2362 | ||
2365 | this_cpu = smp_processor_id(); | 2363 | rq = task_rq_lock(p, &flags); |
2366 | cpu = task_cpu(p); | 2364 | WARN_ON(p->state != TASK_WAKING); |
2367 | } | 2365 | cpu = task_cpu(p); |
2368 | 2366 | ||
2369 | #ifdef CONFIG_SCHEDSTATS | 2367 | #ifdef CONFIG_SCHEDSTATS |
2370 | schedstat_inc(rq, ttwu_count); | 2368 | schedstat_inc(rq, ttwu_count); |
@@ -2422,6 +2420,7 @@ out_running: | |||
2422 | #endif | 2420 | #endif |
2423 | out: | 2421 | out: |
2424 | task_rq_unlock(rq, &flags); | 2422 | task_rq_unlock(rq, &flags); |
2423 | put_cpu(); | ||
2425 | 2424 | ||
2426 | return success; | 2425 | return success; |
2427 | } | 2426 | } |