diff options
author | Matthew Wilcox <matthew@wil.cx> | 2007-12-06 11:07:07 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@linux.intel.com> | 2007-12-06 17:34:59 -0500 |
commit | d9514f6c6b95b5a747ba902858eff577281e8659 (patch) | |
tree | 9fb8130dd44f2a1e95f8de55ab1a24064bab83f0 /kernel/sched.c | |
parent | 6618a3e275519e10001a2ac4669f46141d4c108b (diff) |
sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 67d9d1799d86..50a0faae585f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -992,7 +992,7 @@ static int effective_prio(struct task_struct *p) | |||
992 | */ | 992 | */ |
993 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | 993 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) |
994 | { | 994 | { |
995 | if (p->state == TASK_UNINTERRUPTIBLE) | 995 | if (task_contributes_to_load(p)) |
996 | rq->nr_uninterruptible--; | 996 | rq->nr_uninterruptible--; |
997 | 997 | ||
998 | enqueue_task(rq, p, wakeup); | 998 | enqueue_task(rq, p, wakeup); |
@@ -1004,7 +1004,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
1004 | */ | 1004 | */ |
1005 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | 1005 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) |
1006 | { | 1006 | { |
1007 | if (p->state == TASK_UNINTERRUPTIBLE) | 1007 | if (task_contributes_to_load(p)) |
1008 | rq->nr_uninterruptible++; | 1008 | rq->nr_uninterruptible++; |
1009 | 1009 | ||
1010 | dequeue_task(rq, p, sleep); | 1010 | dequeue_task(rq, p, sleep); |
@@ -1646,8 +1646,7 @@ out: | |||
1646 | 1646 | ||
1647 | int fastcall wake_up_process(struct task_struct *p) | 1647 | int fastcall wake_up_process(struct task_struct *p) |
1648 | { | 1648 | { |
1649 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | | 1649 | return try_to_wake_up(p, TASK_ALL, 0); |
1650 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); | ||
1651 | } | 1650 | } |
1652 | EXPORT_SYMBOL(wake_up_process); | 1651 | EXPORT_SYMBOL(wake_up_process); |
1653 | 1652 | ||
@@ -3857,8 +3856,7 @@ void complete(struct completion *x) | |||
3857 | 3856 | ||
3858 | spin_lock_irqsave(&x->wait.lock, flags); | 3857 | spin_lock_irqsave(&x->wait.lock, flags); |
3859 | x->done++; | 3858 | x->done++; |
3860 | __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | 3859 | __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); |
3861 | 1, 0, NULL); | ||
3862 | spin_unlock_irqrestore(&x->wait.lock, flags); | 3860 | spin_unlock_irqrestore(&x->wait.lock, flags); |
3863 | } | 3861 | } |
3864 | EXPORT_SYMBOL(complete); | 3862 | EXPORT_SYMBOL(complete); |
@@ -3869,8 +3867,7 @@ void complete_all(struct completion *x) | |||
3869 | 3867 | ||
3870 | spin_lock_irqsave(&x->wait.lock, flags); | 3868 | spin_lock_irqsave(&x->wait.lock, flags); |
3871 | x->done += UINT_MAX/2; | 3869 | x->done += UINT_MAX/2; |
3872 | __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | 3870 | __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); |
3873 | 0, 0, NULL); | ||
3874 | spin_unlock_irqrestore(&x->wait.lock, flags); | 3871 | spin_unlock_irqrestore(&x->wait.lock, flags); |
3875 | } | 3872 | } |
3876 | EXPORT_SYMBOL(complete_all); | 3873 | EXPORT_SYMBOL(complete_all); |