diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-31 19:45:47 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-31 19:45:47 -0500 |
commit | 75659ca0c10992dcb39258518368a0f6f56e935d (patch) | |
tree | 5d014ceb2f10158061a23d0d976f9a613d85e659 /kernel/sched.c | |
parent | fbdde7bd274d74729954190f99afcb1e3d9bbfba (diff) | |
parent | 2dfe485a2c8afa54cb069fcf48476f6c90ea3fdf (diff) |
Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits)
Remove commented-out code copied from NFS
NFS: Switch from intr mount option to TASK_KILLABLE
Add wait_for_completion_killable
Add wait_event_killable
Add schedule_timeout_killable
Use mutex_lock_killable in vfs_readdir
Add mutex_lock_killable
Use lock_page_killable
Add lock_page_killable
Add fatal_signal_pending
Add TASK_WAKEKILL
exit: Use task_is_*
signal: Use task_is_*
sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL
ptrace: Use task_is_*
power: Use task_is_*
wait: Use TASK_NORMAL
proc/base.c: Use task_is_*
proc/array.c: Use TASK_REPORT
perfmon: Use task_is_*
...
Fixed up conflicts in NFS/sunrpc manually..
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8355e007e021..9474b23c28bf 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1350,7 +1350,7 @@ static int effective_prio(struct task_struct *p) | |||
1350 | */ | 1350 | */ |
1351 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | 1351 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) |
1352 | { | 1352 | { |
1353 | if (p->state == TASK_UNINTERRUPTIBLE) | 1353 | if (task_contributes_to_load(p)) |
1354 | rq->nr_uninterruptible--; | 1354 | rq->nr_uninterruptible--; |
1355 | 1355 | ||
1356 | enqueue_task(rq, p, wakeup); | 1356 | enqueue_task(rq, p, wakeup); |
@@ -1362,7 +1362,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
1362 | */ | 1362 | */ |
1363 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | 1363 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) |
1364 | { | 1364 | { |
1365 | if (p->state == TASK_UNINTERRUPTIBLE) | 1365 | if (task_contributes_to_load(p)) |
1366 | rq->nr_uninterruptible++; | 1366 | rq->nr_uninterruptible++; |
1367 | 1367 | ||
1368 | dequeue_task(rq, p, sleep); | 1368 | dequeue_task(rq, p, sleep); |
@@ -1895,8 +1895,7 @@ out: | |||
1895 | 1895 | ||
1896 | int fastcall wake_up_process(struct task_struct *p) | 1896 | int fastcall wake_up_process(struct task_struct *p) |
1897 | { | 1897 | { |
1898 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | | 1898 | return try_to_wake_up(p, TASK_ALL, 0); |
1899 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); | ||
1900 | } | 1899 | } |
1901 | EXPORT_SYMBOL(wake_up_process); | 1900 | EXPORT_SYMBOL(wake_up_process); |
1902 | 1901 | ||
@@ -4124,8 +4123,7 @@ void complete(struct completion *x) | |||
4124 | 4123 | ||
4125 | spin_lock_irqsave(&x->wait.lock, flags); | 4124 | spin_lock_irqsave(&x->wait.lock, flags); |
4126 | x->done++; | 4125 | x->done++; |
4127 | __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | 4126 | __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); |
4128 | 1, 0, NULL); | ||
4129 | spin_unlock_irqrestore(&x->wait.lock, flags); | 4127 | spin_unlock_irqrestore(&x->wait.lock, flags); |
4130 | } | 4128 | } |
4131 | EXPORT_SYMBOL(complete); | 4129 | EXPORT_SYMBOL(complete); |
@@ -4136,8 +4134,7 @@ void complete_all(struct completion *x) | |||
4136 | 4134 | ||
4137 | spin_lock_irqsave(&x->wait.lock, flags); | 4135 | spin_lock_irqsave(&x->wait.lock, flags); |
4138 | x->done += UINT_MAX/2; | 4136 | x->done += UINT_MAX/2; |
4139 | __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | 4137 | __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); |
4140 | 0, 0, NULL); | ||
4141 | spin_unlock_irqrestore(&x->wait.lock, flags); | 4138 | spin_unlock_irqrestore(&x->wait.lock, flags); |
4142 | } | 4139 | } |
4143 | EXPORT_SYMBOL(complete_all); | 4140 | EXPORT_SYMBOL(complete_all); |
@@ -4151,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state) | |||
4151 | wait.flags |= WQ_FLAG_EXCLUSIVE; | 4148 | wait.flags |= WQ_FLAG_EXCLUSIVE; |
4152 | __add_wait_queue_tail(&x->wait, &wait); | 4149 | __add_wait_queue_tail(&x->wait, &wait); |
4153 | do { | 4150 | do { |
4154 | if (state == TASK_INTERRUPTIBLE && | 4151 | if ((state == TASK_INTERRUPTIBLE && |
4155 | signal_pending(current)) { | 4152 | signal_pending(current)) || |
4153 | (state == TASK_KILLABLE && | ||
4154 | fatal_signal_pending(current))) { | ||
4156 | __remove_wait_queue(&x->wait, &wait); | 4155 | __remove_wait_queue(&x->wait, &wait); |
4157 | return -ERESTARTSYS; | 4156 | return -ERESTARTSYS; |
4158 | } | 4157 | } |
@@ -4212,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x, | |||
4212 | } | 4211 | } |
4213 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | 4212 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); |
4214 | 4213 | ||
4214 | int __sched wait_for_completion_killable(struct completion *x) | ||
4215 | { | ||
4216 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); | ||
4217 | if (t == -ERESTARTSYS) | ||
4218 | return t; | ||
4219 | return 0; | ||
4220 | } | ||
4221 | EXPORT_SYMBOL(wait_for_completion_killable); | ||
4222 | |||
4215 | static long __sched | 4223 | static long __sched |
4216 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | 4224 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
4217 | { | 4225 | { |