diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-16 12:04:35 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-16 13:01:55 -0500 |
commit | 06b83b5fbea273672822b6ee93e16781046553ec (patch) | |
tree | cd287173f5aacdf69289fe62f397a957c9f678fe /kernel/sched.c | |
parent | e4f4288842ee12747e10c354d72be7d424c0b627 (diff) |
sched: Use TASK_WAKING for fork wakups
For later convenience use TASK_WAKING for fresh tasks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
LKML-Reference: <20091216170517.732561278@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1d8ca25dd6fb..1672823aabfe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2540,14 +2540,6 @@ static void __sched_fork(struct task_struct *p) | |||
2540 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2540 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
2541 | INIT_HLIST_HEAD(&p->preempt_notifiers); | 2541 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
2542 | #endif | 2542 | #endif |
2543 | |||
2544 | /* | ||
2545 | * We mark the process as running here, but have not actually | ||
2546 | * inserted it onto the runqueue yet. This guarantees that | ||
2547 | * nobody will actually run it, and a signal or other external | ||
2548 | * event cannot wake it up and insert it on the runqueue either. | ||
2549 | */ | ||
2550 | p->state = TASK_RUNNING; | ||
2551 | } | 2543 | } |
2552 | 2544 | ||
2553 | /* | 2545 | /* |
@@ -2558,6 +2550,12 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2558 | int cpu = get_cpu(); | 2550 | int cpu = get_cpu(); |
2559 | 2551 | ||
2560 | __sched_fork(p); | 2552 | __sched_fork(p); |
2553 | /* | ||
2554 | * We mark the process as waking here. This guarantees that | ||
2555 | * nobody will actually run it, and a signal or other external | ||
2556 | * event cannot wake it up and insert it on the runqueue either. | ||
2557 | */ | ||
2558 | p->state = TASK_WAKING; | ||
2561 | 2559 | ||
2562 | /* | 2560 | /* |
2563 | * Revert to default priority/policy on fork if requested. | 2561 | * Revert to default priority/policy on fork if requested. |
@@ -2626,7 +2624,8 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2626 | struct rq *rq; | 2624 | struct rq *rq; |
2627 | 2625 | ||
2628 | rq = task_rq_lock(p, &flags); | 2626 | rq = task_rq_lock(p, &flags); |
2629 | BUG_ON(p->state != TASK_RUNNING); | 2627 | BUG_ON(p->state != TASK_WAKING); |
2628 | p->state = TASK_RUNNING; | ||
2630 | update_rq_clock(rq); | 2629 | update_rq_clock(rq); |
2631 | activate_task(rq, p, 0); | 2630 | activate_task(rq, p, 0); |
2632 | trace_sched_wakeup_new(rq, p, 1); | 2631 | trace_sched_wakeup_new(rq, p, 1); |
@@ -6984,6 +6983,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
6984 | raw_spin_lock_irqsave(&rq->lock, flags); | 6983 | raw_spin_lock_irqsave(&rq->lock, flags); |
6985 | 6984 | ||
6986 | __sched_fork(idle); | 6985 | __sched_fork(idle); |
6986 | idle->state = TASK_RUNNING; | ||
6987 | idle->se.exec_start = sched_clock(); | 6987 | idle->se.exec_start = sched_clock(); |
6988 | 6988 | ||
6989 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | 6989 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |