aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-31 19:45:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-31 19:45:47 -0500
commit75659ca0c10992dcb39258518368a0f6f56e935d (patch)
tree5d014ceb2f10158061a23d0d976f9a613d85e659
parentfbdde7bd274d74729954190f99afcb1e3d9bbfba (diff)
parent2dfe485a2c8afa54cb069fcf48476f6c90ea3fdf (diff)
Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits) Remove commented-out code copied from NFS NFS: Switch from intr mount option to TASK_KILLABLE Add wait_for_completion_killable Add wait_event_killable Add schedule_timeout_killable Use mutex_lock_killable in vfs_readdir Add mutex_lock_killable Use lock_page_killable Add lock_page_killable Add fatal_signal_pending Add TASK_WAKEKILL exit: Use task_is_* signal: Use task_is_* sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL ptrace: Use task_is_* power: Use task_is_* wait: Use TASK_NORMAL proc/base.c: Use task_is_* proc/array.c: Use TASK_REPORT perfmon: Use task_is_* ... Fixed up conflicts in NFS/sunrpc manually..
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--fs/eventpoll.c11
-rw-r--r--fs/nfs/client.c6
-rw-r--r--fs/nfs/direct.c7
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs3proc.c7
-rw-r--r--fs/nfs/nfs4proc.c27
-rw-r--r--fs/nfs/nfsroot.c3
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/proc/array.c7
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/readdir.c5
-rw-r--r--fs/smbfs/request.c2
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/nfs_fs.h9
-rw-r--r--include/linux/nfs_mount.h2
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--include/linux/sched.h36
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/wait.h52
-rw-r--r--kernel/exit.c88
-rw-r--r--kernel/mutex.c36
-rw-r--r--kernel/power/process.c6
-rw-r--r--kernel/ptrace.c8
-rw-r--r--kernel/sched.c28
-rw-r--r--kernel/signal.c19
-rw-r--r--kernel/timer.c7
-rw-r--r--kernel/wait.c2
-rw-r--r--mm/filemap.c25
-rw-r--r--net/sunrpc/auth.c3
-rw-r--r--net/sunrpc/clnt.c55
-rw-r--r--net/sunrpc/rpcb_clnt.c3
-rw-r--r--net/sunrpc/sched.c15
38 files changed, 282 insertions, 252 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 73e7c2e40b54..5ae177f557d8 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2631,7 +2631,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2631 */ 2631 */
2632 if (task == current) return 0; 2632 if (task == current) return 0;
2633 2633
2634 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { 2634 if (!task_is_stopped_or_traced(task)) {
2635 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state)); 2635 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2636 return -EBUSY; 2636 return -EBUSY;
2637 } 2637 }
@@ -4792,7 +4792,7 @@ recheck:
4792 * the task must be stopped. 4792 * the task must be stopped.
4793 */ 4793 */
4794 if (PFM_CMD_STOPPED(cmd)) { 4794 if (PFM_CMD_STOPPED(cmd)) {
4795 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { 4795 if (!task_is_stopped_or_traced(task)) {
4796 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task))); 4796 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4797 return -EBUSY; 4797 return -EBUSY;
4798 } 4798 }
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 34f68f3a069a..81c04abfb1aa 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -656,8 +656,7 @@ is_linked:
656 * wait list. 656 * wait list.
657 */ 657 */
658 if (waitqueue_active(&ep->wq)) 658 if (waitqueue_active(&ep->wq))
659 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | 659 wake_up_locked(&ep->wq);
660 TASK_INTERRUPTIBLE);
661 if (waitqueue_active(&ep->poll_wait)) 660 if (waitqueue_active(&ep->poll_wait))
662 pwake++; 661 pwake++;
663 662
@@ -780,7 +779,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
780 779
781 /* Notify waiting tasks that events are available */ 780 /* Notify waiting tasks that events are available */
782 if (waitqueue_active(&ep->wq)) 781 if (waitqueue_active(&ep->wq))
783 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE); 782 wake_up_locked(&ep->wq);
784 if (waitqueue_active(&ep->poll_wait)) 783 if (waitqueue_active(&ep->poll_wait))
785 pwake++; 784 pwake++;
786 } 785 }
@@ -854,8 +853,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
854 853
855 /* Notify waiting tasks that events are available */ 854 /* Notify waiting tasks that events are available */
856 if (waitqueue_active(&ep->wq)) 855 if (waitqueue_active(&ep->wq))
857 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | 856 wake_up_locked(&ep->wq);
858 TASK_INTERRUPTIBLE);
859 if (waitqueue_active(&ep->poll_wait)) 857 if (waitqueue_active(&ep->poll_wait))
860 pwake++; 858 pwake++;
861 } 859 }
@@ -978,8 +976,7 @@ errxit:
978 * wait list (delayed after we release the lock). 976 * wait list (delayed after we release the lock).
979 */ 977 */
980 if (waitqueue_active(&ep->wq)) 978 if (waitqueue_active(&ep->wq))
981 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | 979 wake_up_locked(&ep->wq);
982 TASK_INTERRUPTIBLE);
983 if (waitqueue_active(&ep->poll_wait)) 980 if (waitqueue_active(&ep->poll_wait))
984 pwake++; 981 pwake++;
985 } 982 }
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 685c43f810c1..c5c0175898f6 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -386,7 +386,7 @@ found_client:
386 if (new) 386 if (new)
387 nfs_free_client(new); 387 nfs_free_client(new);
388 388
389 error = wait_event_interruptible(nfs_client_active_wq, 389 error = wait_event_killable(nfs_client_active_wq,
390 clp->cl_cons_state != NFS_CS_INITING); 390 clp->cl_cons_state != NFS_CS_INITING);
391 if (error < 0) { 391 if (error < 0) {
392 nfs_put_client(clp); 392 nfs_put_client(clp);
@@ -589,10 +589,6 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
589 if (server->flags & NFS_MOUNT_SOFT) 589 if (server->flags & NFS_MOUNT_SOFT)
590 server->client->cl_softrtry = 1; 590 server->client->cl_softrtry = 1;
591 591
592 server->client->cl_intr = 0;
593 if (server->flags & NFS4_MOUNT_INTR)
594 server->client->cl_intr = 1;
595
596 return 0; 592 return 0;
597} 593}
598 594
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index f8e165c7d5a6..16844f98f50e 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -188,17 +188,12 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
188static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) 188static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
189{ 189{
190 ssize_t result = -EIOCBQUEUED; 190 ssize_t result = -EIOCBQUEUED;
191 struct rpc_clnt *clnt;
192 sigset_t oldset;
193 191
194 /* Async requests don't wait here */ 192 /* Async requests don't wait here */
195 if (dreq->iocb) 193 if (dreq->iocb)
196 goto out; 194 goto out;
197 195
198 clnt = NFS_CLIENT(dreq->inode); 196 result = wait_for_completion_killable(&dreq->completion);
199 rpc_clnt_sigmask(clnt, &oldset);
200 result = wait_for_completion_interruptible(&dreq->completion);
201 rpc_clnt_sigunmask(clnt, &oldset);
202 197
203 if (!result) 198 if (!result)
204 result = dreq->error; 199 result = dreq->error;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 3f332e54e760..966a8850aa30 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -433,15 +433,11 @@ static int nfs_wait_schedule(void *word)
433 */ 433 */
434static int nfs_wait_on_inode(struct inode *inode) 434static int nfs_wait_on_inode(struct inode *inode)
435{ 435{
436 struct rpc_clnt *clnt = NFS_CLIENT(inode);
437 struct nfs_inode *nfsi = NFS_I(inode); 436 struct nfs_inode *nfsi = NFS_I(inode);
438 sigset_t oldmask;
439 int error; 437 int error;
440 438
441 rpc_clnt_sigmask(clnt, &oldmask);
442 error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING, 439 error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
443 nfs_wait_schedule, TASK_INTERRUPTIBLE); 440 nfs_wait_schedule, TASK_KILLABLE);
444 rpc_clnt_sigunmask(clnt, &oldmask);
445 441
446 return error; 442 return error;
447} 443}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 8afd9f7e7a97..49c7cd0502cc 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -56,7 +56,7 @@ int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path,
56 .program = &mnt_program, 56 .program = &mnt_program,
57 .version = version, 57 .version = version,
58 .authflavor = RPC_AUTH_UNIX, 58 .authflavor = RPC_AUTH_UNIX,
59 .flags = RPC_CLNT_CREATE_INTR, 59 .flags = 0,
60 }; 60 };
61 struct rpc_clnt *mnt_clnt; 61 struct rpc_clnt *mnt_clnt;
62 int status; 62 int status;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index b353c1a05bfd..549dbce714a4 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -27,17 +27,14 @@
27static int 27static int
28nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 28nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
29{ 29{
30 sigset_t oldset;
31 int res; 30 int res;
32 rpc_clnt_sigmask(clnt, &oldset);
33 do { 31 do {
34 res = rpc_call_sync(clnt, msg, flags); 32 res = rpc_call_sync(clnt, msg, flags);
35 if (res != -EJUKEBOX) 33 if (res != -EJUKEBOX)
36 break; 34 break;
37 schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME); 35 schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
38 res = -ERESTARTSYS; 36 res = -ERESTARTSYS;
39 } while (!signalled()); 37 } while (!fatal_signal_pending(current));
40 rpc_clnt_sigunmask(clnt, &oldset);
41 return res; 38 return res;
42} 39}
43 40
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 5c189bd57eb2..027e1095256e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -316,12 +316,9 @@ static void nfs4_opendata_put(struct nfs4_opendata *p)
316 316
317static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 317static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
318{ 318{
319 sigset_t oldset;
320 int ret; 319 int ret;
321 320
322 rpc_clnt_sigmask(task->tk_client, &oldset);
323 ret = rpc_wait_for_completion_task(task); 321 ret = rpc_wait_for_completion_task(task);
324 rpc_clnt_sigunmask(task->tk_client, &oldset);
325 return ret; 322 return ret;
326} 323}
327 324
@@ -2785,9 +2782,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
2785 return 0; 2782 return 0;
2786} 2783}
2787 2784
2788static int nfs4_wait_bit_interruptible(void *word) 2785static int nfs4_wait_bit_killable(void *word)
2789{ 2786{
2790 if (signal_pending(current)) 2787 if (fatal_signal_pending(current))
2791 return -ERESTARTSYS; 2788 return -ERESTARTSYS;
2792 schedule(); 2789 schedule();
2793 return 0; 2790 return 0;
@@ -2795,18 +2792,14 @@ static int nfs4_wait_bit_interruptible(void *word)
2795 2792
2796static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp) 2793static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
2797{ 2794{
2798 sigset_t oldset;
2799 int res; 2795 int res;
2800 2796
2801 might_sleep(); 2797 might_sleep();
2802 2798
2803 rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_); 2799 rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
2804 2800
2805 rpc_clnt_sigmask(clnt, &oldset);
2806 res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, 2801 res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
2807 nfs4_wait_bit_interruptible, 2802 nfs4_wait_bit_killable, TASK_KILLABLE);
2808 TASK_INTERRUPTIBLE);
2809 rpc_clnt_sigunmask(clnt, &oldset);
2810 2803
2811 rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_); 2804 rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
2812 return res; 2805 return res;
@@ -2814,7 +2807,6 @@ static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
2814 2807
2815static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 2808static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
2816{ 2809{
2817 sigset_t oldset;
2818 int res = 0; 2810 int res = 0;
2819 2811
2820 might_sleep(); 2812 might_sleep();
@@ -2823,14 +2815,9 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
2823 *timeout = NFS4_POLL_RETRY_MIN; 2815 *timeout = NFS4_POLL_RETRY_MIN;
2824 if (*timeout > NFS4_POLL_RETRY_MAX) 2816 if (*timeout > NFS4_POLL_RETRY_MAX)
2825 *timeout = NFS4_POLL_RETRY_MAX; 2817 *timeout = NFS4_POLL_RETRY_MAX;
2826 rpc_clnt_sigmask(clnt, &oldset); 2818 schedule_timeout_killable(*timeout);
2827 if (clnt->cl_intr) { 2819 if (fatal_signal_pending(current))
2828 schedule_timeout_interruptible(*timeout); 2820 res = -ERESTARTSYS;
2829 if (signalled())
2830 res = -ERESTARTSYS;
2831 } else
2832 schedule_timeout_uninterruptible(*timeout);
2833 rpc_clnt_sigunmask(clnt, &oldset);
2834 *timeout <<= 1; 2821 *timeout <<= 1;
2835 return res; 2822 return res;
2836} 2823}
@@ -3069,7 +3056,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
3069static unsigned long 3056static unsigned long
3070nfs4_set_lock_task_retry(unsigned long timeout) 3057nfs4_set_lock_task_retry(unsigned long timeout)
3071{ 3058{
3072 schedule_timeout_interruptible(timeout); 3059 schedule_timeout_killable(timeout);
3073 timeout <<= 1; 3060 timeout <<= 1;
3074 if (timeout > NFS4_LOCK_MAXTIMEOUT) 3061 if (timeout > NFS4_LOCK_MAXTIMEOUT)
3075 return NFS4_LOCK_MAXTIMEOUT; 3062 return NFS4_LOCK_MAXTIMEOUT;
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 4b0334590ee5..531379d36823 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -228,10 +228,7 @@ static int __init root_nfs_parse(char *name, char *buf)
228 nfs_data.flags &= ~NFS_MOUNT_SOFT; 228 nfs_data.flags &= ~NFS_MOUNT_SOFT;
229 break; 229 break;
230 case Opt_intr: 230 case Opt_intr:
231 nfs_data.flags |= NFS_MOUNT_INTR;
232 break;
233 case Opt_nointr: 231 case Opt_nointr:
234 nfs_data.flags &= ~NFS_MOUNT_INTR;
235 break; 232 break;
236 case Opt_posix: 233 case Opt_posix:
237 nfs_data.flags |= NFS_MOUNT_POSIX; 234 nfs_data.flags |= NFS_MOUNT_POSIX;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 3b3dbb94393d..7f079209d70a 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -58,7 +58,6 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
58 struct page *page, 58 struct page *page,
59 unsigned int offset, unsigned int count) 59 unsigned int offset, unsigned int count)
60{ 60{
61 struct nfs_server *server = NFS_SERVER(inode);
62 struct nfs_page *req; 61 struct nfs_page *req;
63 62
64 for (;;) { 63 for (;;) {
@@ -67,7 +66,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
67 if (req != NULL) 66 if (req != NULL)
68 break; 67 break;
69 68
70 if (signalled() && (server->flags & NFS_MOUNT_INTR)) 69 if (fatal_signal_pending(current))
71 return ERR_PTR(-ERESTARTSYS); 70 return ERR_PTR(-ERESTARTSYS);
72 yield(); 71 yield();
73 } 72 }
@@ -177,11 +176,11 @@ void nfs_release_request(struct nfs_page *req)
177 kref_put(&req->wb_kref, nfs_free_request); 176 kref_put(&req->wb_kref, nfs_free_request);
178} 177}
179 178
180static int nfs_wait_bit_interruptible(void *word) 179static int nfs_wait_bit_killable(void *word)
181{ 180{
182 int ret = 0; 181 int ret = 0;
183 182
184 if (signal_pending(current)) 183 if (fatal_signal_pending(current))
185 ret = -ERESTARTSYS; 184 ret = -ERESTARTSYS;
186 else 185 else
187 schedule(); 186 schedule();
@@ -192,26 +191,18 @@ static int nfs_wait_bit_interruptible(void *word)
192 * nfs_wait_on_request - Wait for a request to complete. 191 * nfs_wait_on_request - Wait for a request to complete.
193 * @req: request to wait upon. 192 * @req: request to wait upon.
194 * 193 *
195 * Interruptible by signals only if mounted with intr flag. 194 * Interruptible by fatal signals only.
196 * The user is responsible for holding a count on the request. 195 * The user is responsible for holding a count on the request.
197 */ 196 */
198int 197int
199nfs_wait_on_request(struct nfs_page *req) 198nfs_wait_on_request(struct nfs_page *req)
200{ 199{
201 struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
202 sigset_t oldmask;
203 int ret = 0; 200 int ret = 0;
204 201
205 if (!test_bit(PG_BUSY, &req->wb_flags)) 202 if (!test_bit(PG_BUSY, &req->wb_flags))
206 goto out; 203 goto out;
207 /*
208 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
209 * are not interrupted if intr flag is not set
210 */
211 rpc_clnt_sigmask(clnt, &oldmask);
212 ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, 204 ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
213 nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE); 205 nfs_wait_bit_killable, TASK_KILLABLE);
214 rpc_clnt_sigunmask(clnt, &oldmask);
215out: 206out:
216 return ret; 207 return ret;
217} 208}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 22c49c02897d..7f4505f6ac6f 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -448,7 +448,6 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
448 const char *nostr; 448 const char *nostr;
449 } nfs_info[] = { 449 } nfs_info[] = {
450 { NFS_MOUNT_SOFT, ",soft", ",hard" }, 450 { NFS_MOUNT_SOFT, ",soft", ",hard" },
451 { NFS_MOUNT_INTR, ",intr", ",nointr" },
452 { NFS_MOUNT_NOCTO, ",nocto", "" }, 451 { NFS_MOUNT_NOCTO, ",nocto", "" },
453 { NFS_MOUNT_NOAC, ",noac", "" }, 452 { NFS_MOUNT_NOAC, ",noac", "" },
454 { NFS_MOUNT_NONLM, ",nolock", "" }, 453 { NFS_MOUNT_NONLM, ",nolock", "" },
@@ -708,10 +707,7 @@ static int nfs_parse_mount_options(char *raw,
708 mnt->flags &= ~NFS_MOUNT_SOFT; 707 mnt->flags &= ~NFS_MOUNT_SOFT;
709 break; 708 break;
710 case Opt_intr: 709 case Opt_intr:
711 mnt->flags |= NFS_MOUNT_INTR;
712 break;
713 case Opt_nointr: 710 case Opt_nointr:
714 mnt->flags &= ~NFS_MOUNT_INTR;
715 break; 711 break;
716 case Opt_posix: 712 case Opt_posix:
717 mnt->flags |= NFS_MOUNT_POSIX; 713 mnt->flags |= NFS_MOUNT_POSIX;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5ac5b27b639a..522efff3e2c5 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -488,7 +488,7 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
488/* 488/*
489 * Wait for a request to complete. 489 * Wait for a request to complete.
490 * 490 *
491 * Interruptible by signals only if mounted with intr flag. 491 * Interruptible by fatal signals only.
492 */ 492 */
493static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) 493static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
494{ 494{
diff --git a/fs/proc/array.c b/fs/proc/array.c
index eb97f2897e2b..b380313092bd 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -141,12 +141,7 @@ static const char *task_state_array[] = {
141 141
142static inline const char *get_task_state(struct task_struct *tsk) 142static inline const char *get_task_state(struct task_struct *tsk)
143{ 143{
144 unsigned int state = (tsk->state & (TASK_RUNNING | 144 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
145 TASK_INTERRUPTIBLE |
146 TASK_UNINTERRUPTIBLE |
147 TASK_STOPPED |
148 TASK_TRACED)) |
149 tsk->exit_state;
150 const char **p = &task_state_array[0]; 145 const char **p = &task_state_array[0];
151 146
152 while (state) { 147 while (state) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 91fa8e6ce8ad..9fa9708cc715 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -199,7 +199,7 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
199 (task == current || \ 199 (task == current || \
200 (task->parent == current && \ 200 (task->parent == current && \
201 (task->ptrace & PT_PTRACED) && \ 201 (task->ptrace & PT_PTRACED) && \
202 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \ 202 (task_is_stopped_or_traced(task)) && \
203 security_ptrace(current,task) == 0)) 203 security_ptrace(current,task) == 0))
204 204
205struct mm_struct *mm_for_maps(struct task_struct *task) 205struct mm_struct *mm_for_maps(struct task_struct *task)
diff --git a/fs/readdir.c b/fs/readdir.c
index efe52e676577..4e026e5407fb 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -30,7 +30,10 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
30 if (res) 30 if (res)
31 goto out; 31 goto out;
32 32
33 mutex_lock(&inode->i_mutex); 33 res = mutex_lock_killable(&inode->i_mutex);
34 if (res)
35 goto out;
36
34 res = -ENOENT; 37 res = -ENOENT;
35 if (!IS_DEADDIR(inode)) { 38 if (!IS_DEADDIR(inode)) {
36 res = file->f_op->readdir(file, buf, filler); 39 res = file->f_op->readdir(file, buf, filler);
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index ca4b2d59c0ca..45f45933e862 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -105,7 +105,7 @@ struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize)
105 if (nfs_try_to_free_pages(server)) 105 if (nfs_try_to_free_pages(server))
106 continue; 106 continue;
107 107
108 if (signalled() && (server->flags & NFS_MOUNT_INTR)) 108 if (fatal_signal_pending(current))
109 return ERR_PTR(-ERESTARTSYS); 109 return ERR_PTR(-ERESTARTSYS);
110 current->policy = SCHED_YIELD; 110 current->policy = SCHED_YIELD;
111 schedule(); 111 schedule();
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 33d6aaf94447..d2961b66d53d 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -44,6 +44,7 @@ static inline void init_completion(struct completion *x)
44 44
45extern void wait_for_completion(struct completion *); 45extern void wait_for_completion(struct completion *);
46extern int wait_for_completion_interruptible(struct completion *x); 46extern int wait_for_completion_interruptible(struct completion *x);
47extern int wait_for_completion_killable(struct completion *x);
47extern unsigned long wait_for_completion_timeout(struct completion *x, 48extern unsigned long wait_for_completion_timeout(struct completion *x,
48 unsigned long timeout); 49 unsigned long timeout);
49extern unsigned long wait_for_completion_interruptible_timeout( 50extern unsigned long wait_for_completion_interruptible_timeout(
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 601479772b98..05c590352dd7 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -125,15 +125,20 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
125extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 125extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
126extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 126extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
127 unsigned int subclass); 127 unsigned int subclass);
128extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
129 unsigned int subclass);
128 130
129#define mutex_lock(lock) mutex_lock_nested(lock, 0) 131#define mutex_lock(lock) mutex_lock_nested(lock, 0)
130#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 132#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
133#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
131#else 134#else
132extern void fastcall mutex_lock(struct mutex *lock); 135extern void fastcall mutex_lock(struct mutex *lock);
133extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); 136extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
137extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
134 138
135# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 139# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
136# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 140# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
141# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
137#endif 142#endif
138 143
139/* 144/*
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 099ddb4481c0..a69ba80f2dfe 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -556,14 +556,7 @@ extern void * nfs_root_data(void);
556 556
557#define nfs_wait_event(clnt, wq, condition) \ 557#define nfs_wait_event(clnt, wq, condition) \
558({ \ 558({ \
559 int __retval = 0; \ 559 int __retval = wait_event_killable(wq, condition); \
560 if (clnt->cl_intr) { \
561 sigset_t oldmask; \
562 rpc_clnt_sigmask(clnt, &oldmask); \
563 __retval = wait_event_interruptible(wq, condition); \
564 rpc_clnt_sigunmask(clnt, &oldmask); \
565 } else \
566 wait_event(wq, condition); \
567 __retval; \ 560 __retval; \
568}) 561})
569 562
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h
index a3ade89a64d2..df7c6b7a7ebb 100644
--- a/include/linux/nfs_mount.h
+++ b/include/linux/nfs_mount.h
@@ -48,7 +48,7 @@ struct nfs_mount_data {
48/* bits in the flags field */ 48/* bits in the flags field */
49 49
50#define NFS_MOUNT_SOFT 0x0001 /* 1 */ 50#define NFS_MOUNT_SOFT 0x0001 /* 1 */
51#define NFS_MOUNT_INTR 0x0002 /* 1 */ 51#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
52#define NFS_MOUNT_SECURE 0x0004 /* 1 */ 52#define NFS_MOUNT_SECURE 0x0004 /* 1 */
53#define NFS_MOUNT_POSIX 0x0008 /* 1 */ 53#define NFS_MOUNT_POSIX 0x0008 /* 1 */
54#define NFS_MOUNT_NOCTO 0x0010 /* 1 */ 54#define NFS_MOUNT_NOCTO 0x0010 /* 1 */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410ae9e1..4b62a105622b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
157} 157}
158 158
159extern void FASTCALL(__lock_page(struct page *page)); 159extern void FASTCALL(__lock_page(struct page *page));
160extern int FASTCALL(__lock_page_killable(struct page *page));
160extern void FASTCALL(__lock_page_nosync(struct page *page)); 161extern void FASTCALL(__lock_page_nosync(struct page *page));
161extern void FASTCALL(unlock_page(struct page *page)); 162extern void FASTCALL(unlock_page(struct page *page));
162 163
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page)
171} 172}
172 173
173/* 174/*
175 * lock_page_killable is like lock_page but can be interrupted by fatal
176 * signals. It returns 0 if it locked the page and -EINTR if it was
177 * killed while waiting.
178 */
179static inline int lock_page_killable(struct page *page)
180{
181 might_sleep();
182 if (TestSetPageLocked(page))
183 return __lock_page_killable(page);
184 return 0;
185}
186
187/*
174 * lock_page_nosync should only be used if we can't pin the page's inode. 188 * lock_page_nosync should only be used if we can't pin the page's inode.
175 * Doesn't play quite so well with block device plugging. 189 * Doesn't play quite so well with block device plugging.
176 */ 190 */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9d4797609aa5..6c333579d9da 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -172,13 +172,35 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
172#define TASK_RUNNING 0 172#define TASK_RUNNING 0
173#define TASK_INTERRUPTIBLE 1 173#define TASK_INTERRUPTIBLE 1
174#define TASK_UNINTERRUPTIBLE 2 174#define TASK_UNINTERRUPTIBLE 2
175#define TASK_STOPPED 4 175#define __TASK_STOPPED 4
176#define TASK_TRACED 8 176#define __TASK_TRACED 8
177/* in tsk->exit_state */ 177/* in tsk->exit_state */
178#define EXIT_ZOMBIE 16 178#define EXIT_ZOMBIE 16
179#define EXIT_DEAD 32 179#define EXIT_DEAD 32
180/* in tsk->state again */ 180/* in tsk->state again */
181#define TASK_DEAD 64 181#define TASK_DEAD 64
182#define TASK_WAKEKILL 128
183
184/* Convenience macros for the sake of set_task_state */
185#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
186#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
187#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
188
189/* Convenience macros for the sake of wake_up */
190#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
191#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
192
193/* get_task_state() */
194#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
195 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
196 __TASK_TRACED)
197
198#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
199#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
200#define task_is_stopped_or_traced(task) \
201 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
202#define task_contributes_to_load(task) \
203 ((task->state & TASK_UNINTERRUPTIBLE) != 0)
182 204
183#define __set_task_state(tsk, state_value) \ 205#define __set_task_state(tsk, state_value) \
184 do { (tsk)->state = (state_value); } while (0) 206 do { (tsk)->state = (state_value); } while (0)
@@ -302,6 +324,7 @@ extern int in_sched_functions(unsigned long addr);
302#define MAX_SCHEDULE_TIMEOUT LONG_MAX 324#define MAX_SCHEDULE_TIMEOUT LONG_MAX
303extern signed long FASTCALL(schedule_timeout(signed long timeout)); 325extern signed long FASTCALL(schedule_timeout(signed long timeout));
304extern signed long schedule_timeout_interruptible(signed long timeout); 326extern signed long schedule_timeout_interruptible(signed long timeout);
327extern signed long schedule_timeout_killable(signed long timeout);
305extern signed long schedule_timeout_uninterruptible(signed long timeout); 328extern signed long schedule_timeout_uninterruptible(signed long timeout);
306asmlinkage void schedule(void); 329asmlinkage void schedule(void);
307 330
@@ -1892,7 +1915,14 @@ static inline int signal_pending(struct task_struct *p)
1892{ 1915{
1893 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 1916 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
1894} 1917}
1895 1918
1919extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
1920
1921static inline int fatal_signal_pending(struct task_struct *p)
1922{
1923 return signal_pending(p) && __fatal_signal_pending(p);
1924}
1925
1896static inline int need_resched(void) 1926static inline int need_resched(void)
1897{ 1927{
1898 return unlikely(test_thread_flag(TIF_NEED_RESCHED)); 1928 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 3e9addc741c1..129a86e25d29 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -41,7 +41,6 @@ struct rpc_clnt {
41 struct rpc_iostats * cl_metrics; /* per-client statistics */ 41 struct rpc_iostats * cl_metrics; /* per-client statistics */
42 42
43 unsigned int cl_softrtry : 1,/* soft timeouts */ 43 unsigned int cl_softrtry : 1,/* soft timeouts */
44 cl_intr : 1,/* interruptible */
45 cl_discrtry : 1,/* disconnect before retry */ 44 cl_discrtry : 1,/* disconnect before retry */
46 cl_autobind : 1;/* use getport() */ 45 cl_autobind : 1;/* use getport() */
47 46
@@ -111,7 +110,6 @@ struct rpc_create_args {
111 110
112/* Values for "flags" field */ 111/* Values for "flags" field */
113#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) 112#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
114#define RPC_CLNT_CREATE_INTR (1UL << 1)
115#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) 113#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
116#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) 114#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
117#define RPC_CLNT_CREATE_NOPING (1UL << 4) 115#define RPC_CLNT_CREATE_NOPING (1UL << 4)
@@ -137,8 +135,6 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg,
137struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, 135struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
138 int flags); 136 int flags);
139void rpc_restart_call(struct rpc_task *); 137void rpc_restart_call(struct rpc_task *);
140void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset);
141void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset);
142void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); 138void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
143size_t rpc_max_payload(struct rpc_clnt *); 139size_t rpc_max_payload(struct rpc_clnt *);
144void rpc_force_rebind(struct rpc_clnt *); 140void rpc_force_rebind(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index ce3d1b132729..f689f02e6793 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -137,7 +137,6 @@ struct rpc_task_setup {
137#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ 137#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
138#define RPC_TASK_KILLED 0x0100 /* task was killed */ 138#define RPC_TASK_KILLED 0x0100 /* task was killed */
139#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ 139#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
140#define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
141 140
142#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) 141#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
143#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) 142#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
@@ -145,7 +144,6 @@ struct rpc_task_setup {
145#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) 144#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
146#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL) 145#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
147#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) 146#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
148#define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
149 147
150#define RPC_TASK_RUNNING 0 148#define RPC_TASK_RUNNING 0
151#define RPC_TASK_QUEUED 1 149#define RPC_TASK_QUEUED 1
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0e686280450b..1f4fb0a81ecd 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
152int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned)); 152int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
153wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int)); 153wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
154 154
155#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL) 155#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
156#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL) 156#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
157#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL) 157#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
158#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
159
158#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) 160#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
159#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) 161#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
160#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 162#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
161#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) 163#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
162#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
163 164
164#define __wait_event(wq, condition) \ 165#define __wait_event(wq, condition) \
165do { \ 166do { \
@@ -345,6 +346,47 @@ do { \
345 __ret; \ 346 __ret; \
346}) 347})
347 348
349#define __wait_event_killable(wq, condition, ret) \
350do { \
351 DEFINE_WAIT(__wait); \
352 \
353 for (;;) { \
354 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
355 if (condition) \
356 break; \
357 if (!fatal_signal_pending(current)) { \
358 schedule(); \
359 continue; \
360 } \
361 ret = -ERESTARTSYS; \
362 break; \
363 } \
364 finish_wait(&wq, &__wait); \
365} while (0)
366
367/**
368 * wait_event_killable - sleep until a condition gets true
369 * @wq: the waitqueue to wait on
370 * @condition: a C expression for the event to wait for
371 *
372 * The process is put to sleep (TASK_KILLABLE) until the
373 * @condition evaluates to true or a signal is received.
374 * The @condition is checked each time the waitqueue @wq is woken up.
375 *
376 * wake_up() has to be called after changing any variable that could
377 * change the result of the wait condition.
378 *
379 * The function will return -ERESTARTSYS if it was interrupted by a
380 * signal and 0 if @condition evaluated to true.
381 */
382#define wait_event_killable(wq, condition) \
383({ \
384 int __ret = 0; \
385 if (!(condition)) \
386 __wait_event_killable(wq, condition, __ret); \
387 __ret; \
388})
389
348/* 390/*
349 * Must be called with the spinlock in the wait_queue_head_t held. 391 * Must be called with the spinlock in the wait_queue_head_t held.
350 */ 392 */
diff --git a/kernel/exit.c b/kernel/exit.c
index 549c0558ba68..bfb1c0e940e8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
249 struct task_struct *p; 249 struct task_struct *p;
250 250
251 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 251 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
252 if (p->state != TASK_STOPPED) 252 if (!task_is_stopped(p))
253 continue; 253 continue;
254 retval = 1; 254 retval = 1;
255 break; 255 break;
@@ -614,7 +614,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
614 p->parent = p->real_parent; 614 p->parent = p->real_parent;
615 add_parent(p); 615 add_parent(p);
616 616
617 if (p->state == TASK_TRACED) { 617 if (task_is_traced(p)) {
618 /* 618 /*
619 * If it was at a trace stop, turn it into 619 * If it was at a trace stop, turn it into
620 * a normal stop since it's no longer being 620 * a normal stop since it's no longer being
@@ -1563,60 +1563,51 @@ repeat:
1563 } 1563 }
1564 allowed = 1; 1564 allowed = 1;
1565 1565
1566 switch (p->state) { 1566 if (task_is_stopped_or_traced(p)) {
1567 case TASK_TRACED:
1568 /*
1569 * When we hit the race with PTRACE_ATTACH,
1570 * we will not report this child. But the
1571 * race means it has not yet been moved to
1572 * our ptrace_children list, so we need to
1573 * set the flag here to avoid a spurious ECHILD
1574 * when the race happens with the only child.
1575 */
1576 flag = 1;
1577 if (!my_ptrace_child(p))
1578 continue;
1579 /*FALLTHROUGH*/
1580 case TASK_STOPPED:
1581 /* 1567 /*
1582 * It's stopped now, so it might later 1568 * It's stopped now, so it might later
1583 * continue, exit, or stop again. 1569 * continue, exit, or stop again.
1570 *
1571 * When we hit the race with PTRACE_ATTACH, we
1572 * will not report this child. But the race
1573 * means it has not yet been moved to our
1574 * ptrace_children list, so we need to set the
1575 * flag here to avoid a spurious ECHILD when
1576 * the race happens with the only child.
1584 */ 1577 */
1585 flag = 1; 1578 flag = 1;
1586 if (!(options & WUNTRACED) && 1579
1587 !my_ptrace_child(p)) 1580 if (!my_ptrace_child(p)) {
1588 continue; 1581 if (task_is_traced(p))
1582 continue;
1583 if (!(options & WUNTRACED))
1584 continue;
1585 }
1586
1589 retval = wait_task_stopped(p, ret == 2, 1587 retval = wait_task_stopped(p, ret == 2,
1590 (options & WNOWAIT), 1588 (options & WNOWAIT), infop,
1591 infop, 1589 stat_addr, ru);
1592 stat_addr, ru);
1593 if (retval == -EAGAIN) 1590 if (retval == -EAGAIN)
1594 goto repeat; 1591 goto repeat;
1595 if (retval != 0) /* He released the lock. */ 1592 if (retval != 0) /* He released the lock. */
1596 goto end; 1593 goto end;
1597 break; 1594 } else if (p->exit_state == EXIT_DEAD) {
1598 default: 1595 continue;
1599 // case EXIT_DEAD: 1596 } else if (p->exit_state == EXIT_ZOMBIE) {
1600 if (p->exit_state == EXIT_DEAD) 1597 /*
1598 * Eligible but we cannot release it yet:
1599 */
1600 if (ret == 2)
1601 goto check_continued;
1602 if (!likely(options & WEXITED))
1601 continue; 1603 continue;
1602 // case EXIT_ZOMBIE: 1604 retval = wait_task_zombie(p,
1603 if (p->exit_state == EXIT_ZOMBIE) { 1605 (options & WNOWAIT), infop,
1604 /* 1606 stat_addr, ru);
1605 * Eligible but we cannot release 1607 /* He released the lock. */
1606 * it yet: 1608 if (retval != 0)
1607 */ 1609 goto end;
1608 if (ret == 2) 1610 } else {
1609 goto check_continued;
1610 if (!likely(options & WEXITED))
1611 continue;
1612 retval = wait_task_zombie(
1613 p, (options & WNOWAIT),
1614 infop, stat_addr, ru);
1615 /* He released the lock. */
1616 if (retval != 0)
1617 goto end;
1618 break;
1619 }
1620check_continued: 1611check_continued:
1621 /* 1612 /*
1622 * It's running now, so it might later 1613 * It's running now, so it might later
@@ -1625,12 +1616,11 @@ check_continued:
1625 flag = 1; 1616 flag = 1;
1626 if (!unlikely(options & WCONTINUED)) 1617 if (!unlikely(options & WCONTINUED))
1627 continue; 1618 continue;
1628 retval = wait_task_continued( 1619 retval = wait_task_continued(p,
1629 p, (options & WNOWAIT), 1620 (options & WNOWAIT), infop,
1630 infop, stat_addr, ru); 1621 stat_addr, ru);
1631 if (retval != 0) /* He released the lock. */ 1622 if (retval != 0) /* He released the lock. */
1632 goto end; 1623 goto end;
1633 break;
1634 } 1624 }
1635 } 1625 }
1636 if (!flag) { 1626 if (!flag) {
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d7fe50cc556f..d9ec9b666250 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
166 * got a signal? (This code gets eliminated in the 166 * got a signal? (This code gets eliminated in the
167 * TASK_UNINTERRUPTIBLE case.) 167 * TASK_UNINTERRUPTIBLE case.)
168 */ 168 */
169 if (unlikely(state == TASK_INTERRUPTIBLE && 169 if (unlikely((state == TASK_INTERRUPTIBLE &&
170 signal_pending(task))) { 170 signal_pending(task)) ||
171 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 171 (state == TASK_KILLABLE &&
172 fatal_signal_pending(task)))) {
173 mutex_remove_waiter(lock, &waiter,
174 task_thread_info(task));
172 mutex_release(&lock->dep_map, 1, ip); 175 mutex_release(&lock->dep_map, 1, ip);
173 spin_unlock_mutex(&lock->wait_lock, flags); 176 spin_unlock_mutex(&lock->wait_lock, flags);
174 177
@@ -211,6 +214,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
211EXPORT_SYMBOL_GPL(mutex_lock_nested); 214EXPORT_SYMBOL_GPL(mutex_lock_nested);
212 215
213int __sched 216int __sched
217mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
218{
219 might_sleep();
220 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
221}
222EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
223
224int __sched
214mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 225mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
215{ 226{
216 might_sleep(); 227 might_sleep();
@@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
272 * mutex_lock_interruptible() and mutex_trylock(). 283 * mutex_lock_interruptible() and mutex_trylock().
273 */ 284 */
274static int fastcall noinline __sched 285static int fastcall noinline __sched
286__mutex_lock_killable_slowpath(atomic_t *lock_count);
287
288static noinline int fastcall __sched
275__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 289__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
276 290
277/*** 291/***
@@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
294 308
295EXPORT_SYMBOL(mutex_lock_interruptible); 309EXPORT_SYMBOL(mutex_lock_interruptible);
296 310
311int fastcall __sched mutex_lock_killable(struct mutex *lock)
312{
313 might_sleep();
314 return __mutex_fastpath_lock_retval
315 (&lock->count, __mutex_lock_killable_slowpath);
316}
317EXPORT_SYMBOL(mutex_lock_killable);
318
297static void fastcall noinline __sched 319static void fastcall noinline __sched
298__mutex_lock_slowpath(atomic_t *lock_count) 320__mutex_lock_slowpath(atomic_t *lock_count)
299{ 321{
@@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
303} 325}
304 326
305static int fastcall noinline __sched 327static int fastcall noinline __sched
328__mutex_lock_killable_slowpath(atomic_t *lock_count)
329{
330 struct mutex *lock = container_of(lock_count, struct mutex, count);
331
332 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
333}
334
335static noinline int fastcall __sched
306__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 336__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
307{ 337{
308 struct mutex *lock = container_of(lock_count, struct mutex, count); 338 struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 6533923e711b..7c2118f9597f 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -86,9 +86,9 @@ static void fake_signal_wake_up(struct task_struct *p, int resume)
86 86
87static void send_fake_signal(struct task_struct *p) 87static void send_fake_signal(struct task_struct *p)
88{ 88{
89 if (p->state == TASK_STOPPED) 89 if (task_is_stopped(p))
90 force_sig_specific(SIGSTOP, p); 90 force_sig_specific(SIGSTOP, p);
91 fake_signal_wake_up(p, p->state == TASK_STOPPED); 91 fake_signal_wake_up(p, task_is_stopped(p));
92} 92}
93 93
94static int has_mm(struct task_struct *p) 94static int has_mm(struct task_struct *p)
@@ -182,7 +182,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
182 if (frozen(p) || !freezeable(p)) 182 if (frozen(p) || !freezeable(p))
183 continue; 183 continue;
184 184
185 if (p->state == TASK_TRACED && frozen(p->parent)) { 185 if (task_is_traced(p) && frozen(p->parent)) {
186 cancel_freezing(p); 186 cancel_freezing(p);
187 continue; 187 continue;
188 } 188 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index e6e9b8be4b05..b0d4ab4dfd3d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -51,7 +51,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
51void ptrace_untrace(struct task_struct *child) 51void ptrace_untrace(struct task_struct *child)
52{ 52{
53 spin_lock(&child->sighand->siglock); 53 spin_lock(&child->sighand->siglock);
54 if (child->state == TASK_TRACED) { 54 if (task_is_traced(child)) {
55 if (child->signal->flags & SIGNAL_STOP_STOPPED) { 55 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
56 child->state = TASK_STOPPED; 56 child->state = TASK_STOPPED;
57 } else { 57 } else {
@@ -79,7 +79,7 @@ void __ptrace_unlink(struct task_struct *child)
79 add_parent(child); 79 add_parent(child);
80 } 80 }
81 81
82 if (child->state == TASK_TRACED) 82 if (task_is_traced(child))
83 ptrace_untrace(child); 83 ptrace_untrace(child);
84} 84}
85 85
@@ -103,9 +103,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
103 && child->signal != NULL) { 103 && child->signal != NULL) {
104 ret = 0; 104 ret = 0;
105 spin_lock_irq(&child->sighand->siglock); 105 spin_lock_irq(&child->sighand->siglock);
106 if (child->state == TASK_STOPPED) { 106 if (task_is_stopped(child)) {
107 child->state = TASK_TRACED; 107 child->state = TASK_TRACED;
108 } else if (child->state != TASK_TRACED && !kill) { 108 } else if (!task_is_traced(child) && !kill) {
109 ret = -ESRCH; 109 ret = -ESRCH;
110 } 110 }
111 spin_unlock_irq(&child->sighand->siglock); 111 spin_unlock_irq(&child->sighand->siglock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 8355e007e021..9474b23c28bf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1350,7 +1350,7 @@ static int effective_prio(struct task_struct *p)
1350 */ 1350 */
1351static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) 1351static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1352{ 1352{
1353 if (p->state == TASK_UNINTERRUPTIBLE) 1353 if (task_contributes_to_load(p))
1354 rq->nr_uninterruptible--; 1354 rq->nr_uninterruptible--;
1355 1355
1356 enqueue_task(rq, p, wakeup); 1356 enqueue_task(rq, p, wakeup);
@@ -1362,7 +1362,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1362 */ 1362 */
1363static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) 1363static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1364{ 1364{
1365 if (p->state == TASK_UNINTERRUPTIBLE) 1365 if (task_contributes_to_load(p))
1366 rq->nr_uninterruptible++; 1366 rq->nr_uninterruptible++;
1367 1367
1368 dequeue_task(rq, p, sleep); 1368 dequeue_task(rq, p, sleep);
@@ -1895,8 +1895,7 @@ out:
1895 1895
1896int fastcall wake_up_process(struct task_struct *p) 1896int fastcall wake_up_process(struct task_struct *p)
1897{ 1897{
1898 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | 1898 return try_to_wake_up(p, TASK_ALL, 0);
1899 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1900} 1899}
1901EXPORT_SYMBOL(wake_up_process); 1900EXPORT_SYMBOL(wake_up_process);
1902 1901
@@ -4124,8 +4123,7 @@ void complete(struct completion *x)
4124 4123
4125 spin_lock_irqsave(&x->wait.lock, flags); 4124 spin_lock_irqsave(&x->wait.lock, flags);
4126 x->done++; 4125 x->done++;
4127 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 4126 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
4128 1, 0, NULL);
4129 spin_unlock_irqrestore(&x->wait.lock, flags); 4127 spin_unlock_irqrestore(&x->wait.lock, flags);
4130} 4128}
4131EXPORT_SYMBOL(complete); 4129EXPORT_SYMBOL(complete);
@@ -4136,8 +4134,7 @@ void complete_all(struct completion *x)
4136 4134
4137 spin_lock_irqsave(&x->wait.lock, flags); 4135 spin_lock_irqsave(&x->wait.lock, flags);
4138 x->done += UINT_MAX/2; 4136 x->done += UINT_MAX/2;
4139 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 4137 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
4140 0, 0, NULL);
4141 spin_unlock_irqrestore(&x->wait.lock, flags); 4138 spin_unlock_irqrestore(&x->wait.lock, flags);
4142} 4139}
4143EXPORT_SYMBOL(complete_all); 4140EXPORT_SYMBOL(complete_all);
@@ -4151,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4151 wait.flags |= WQ_FLAG_EXCLUSIVE; 4148 wait.flags |= WQ_FLAG_EXCLUSIVE;
4152 __add_wait_queue_tail(&x->wait, &wait); 4149 __add_wait_queue_tail(&x->wait, &wait);
4153 do { 4150 do {
4154 if (state == TASK_INTERRUPTIBLE && 4151 if ((state == TASK_INTERRUPTIBLE &&
4155 signal_pending(current)) { 4152 signal_pending(current)) ||
4153 (state == TASK_KILLABLE &&
4154 fatal_signal_pending(current))) {
4156 __remove_wait_queue(&x->wait, &wait); 4155 __remove_wait_queue(&x->wait, &wait);
4157 return -ERESTARTSYS; 4156 return -ERESTARTSYS;
4158 } 4157 }
@@ -4212,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x,
4212} 4211}
4213EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); 4212EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4214 4213
4214int __sched wait_for_completion_killable(struct completion *x)
4215{
4216 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4217 if (t == -ERESTARTSYS)
4218 return t;
4219 return 0;
4220}
4221EXPORT_SYMBOL(wait_for_completion_killable);
4222
4215static long __sched 4223static long __sched
4216sleep_on_common(wait_queue_head_t *q, int state, long timeout) 4224sleep_on_common(wait_queue_head_t *q, int state, long timeout)
4217{ 4225{
diff --git a/kernel/signal.c b/kernel/signal.c
index bf49ce6f016b..8054dd4e2d76 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -456,15 +456,15 @@ void signal_wake_up(struct task_struct *t, int resume)
456 set_tsk_thread_flag(t, TIF_SIGPENDING); 456 set_tsk_thread_flag(t, TIF_SIGPENDING);
457 457
458 /* 458 /*
459 * For SIGKILL, we want to wake it up in the stopped/traced case. 459 * For SIGKILL, we want to wake it up in the stopped/traced/killable
460 * We don't check t->state here because there is a race with it 460 * case. We don't check t->state here because there is a race with it
461 * executing another processor and just now entering stopped state. 461 * executing another processor and just now entering stopped state.
462 * By using wake_up_state, we ensure the process will wake up and 462 * By using wake_up_state, we ensure the process will wake up and
463 * handle its death signal. 463 * handle its death signal.
464 */ 464 */
465 mask = TASK_INTERRUPTIBLE; 465 mask = TASK_INTERRUPTIBLE;
466 if (resume) 466 if (resume)
467 mask |= TASK_STOPPED | TASK_TRACED; 467 mask |= TASK_WAKEKILL;
468 if (!wake_up_state(t, mask)) 468 if (!wake_up_state(t, mask))
469 kick_process(t); 469 kick_process(t);
470} 470}
@@ -620,7 +620,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
620 * Wake up the stopped thread _after_ setting 620 * Wake up the stopped thread _after_ setting
621 * TIF_SIGPENDING 621 * TIF_SIGPENDING
622 */ 622 */
623 state = TASK_STOPPED; 623 state = __TASK_STOPPED;
624 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { 624 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
625 set_tsk_thread_flag(t, TIF_SIGPENDING); 625 set_tsk_thread_flag(t, TIF_SIGPENDING);
626 state |= TASK_INTERRUPTIBLE; 626 state |= TASK_INTERRUPTIBLE;
@@ -838,7 +838,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
838 return 0; 838 return 0;
839 if (sig == SIGKILL) 839 if (sig == SIGKILL)
840 return 1; 840 return 1;
841 if (p->state & (TASK_STOPPED | TASK_TRACED)) 841 if (task_is_stopped_or_traced(p))
842 return 0; 842 return 0;
843 return task_curr(p) || !signal_pending(p); 843 return task_curr(p) || !signal_pending(p);
844} 844}
@@ -994,6 +994,11 @@ void zap_other_threads(struct task_struct *p)
994 } 994 }
995} 995}
996 996
997int fastcall __fatal_signal_pending(struct task_struct *tsk)
998{
999 return sigismember(&tsk->pending.signal, SIGKILL);
1000}
1001
997/* 1002/*
998 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 1003 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
999 */ 1004 */
@@ -1441,7 +1446,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1441 BUG_ON(sig == -1); 1446 BUG_ON(sig == -1);
1442 1447
1443 /* do_notify_parent_cldstop should have been called instead. */ 1448 /* do_notify_parent_cldstop should have been called instead. */
1444 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); 1449 BUG_ON(task_is_stopped_or_traced(tsk));
1445 1450
1446 BUG_ON(!tsk->ptrace && 1451 BUG_ON(!tsk->ptrace &&
1447 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1452 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1729,7 +1734,7 @@ static int do_signal_stop(int signr)
1729 * so this check has no races. 1734 * so this check has no races.
1730 */ 1735 */
1731 if (!t->exit_state && 1736 if (!t->exit_state &&
1732 !(t->state & (TASK_STOPPED|TASK_TRACED))) { 1737 !task_is_stopped_or_traced(t)) {
1733 stop_count++; 1738 stop_count++;
1734 signal_wake_up(t, 0); 1739 signal_wake_up(t, 0);
1735 } 1740 }
diff --git a/kernel/timer.c b/kernel/timer.c
index 23f7ead78fae..9fbb472b8cf0 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1099,6 +1099,13 @@ signed long __sched schedule_timeout_interruptible(signed long timeout)
1099} 1099}
1100EXPORT_SYMBOL(schedule_timeout_interruptible); 1100EXPORT_SYMBOL(schedule_timeout_interruptible);
1101 1101
1102signed long __sched schedule_timeout_killable(signed long timeout)
1103{
1104 __set_current_state(TASK_KILLABLE);
1105 return schedule_timeout(timeout);
1106}
1107EXPORT_SYMBOL(schedule_timeout_killable);
1108
1102signed long __sched schedule_timeout_uninterruptible(signed long timeout) 1109signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1103{ 1110{
1104 __set_current_state(TASK_UNINTERRUPTIBLE); 1111 __set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbfaefc4..f9876888a569 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
215{ 215{
216 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); 216 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
217 if (waitqueue_active(wq)) 217 if (waitqueue_active(wq))
218 __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key); 218 __wake_up(wq, TASK_NORMAL, 1, &key);
219} 219}
220EXPORT_SYMBOL(__wake_up_bit); 220EXPORT_SYMBOL(__wake_up_bit);
221 221
diff --git a/mm/filemap.c b/mm/filemap.c
index f4d0cded0e10..89ce6fe5f8be 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -185,6 +185,12 @@ static int sync_page(void *word)
185 return 0; 185 return 0;
186} 186}
187 187
188static int sync_page_killable(void *word)
189{
190 sync_page(word);
191 return fatal_signal_pending(current) ? -EINTR : 0;
192}
193
188/** 194/**
189 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 195 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
190 * @mapping: address space structure to write 196 * @mapping: address space structure to write
@@ -589,6 +595,14 @@ void fastcall __lock_page(struct page *page)
589} 595}
590EXPORT_SYMBOL(__lock_page); 596EXPORT_SYMBOL(__lock_page);
591 597
598int fastcall __lock_page_killable(struct page *page)
599{
600 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
601
602 return __wait_on_bit_lock(page_waitqueue(page), &wait,
603 sync_page_killable, TASK_KILLABLE);
604}
605
592/* 606/*
593 * Variant of lock_page that does not require the caller to hold a reference 607 * Variant of lock_page that does not require the caller to hold a reference
594 * on the page's mapping. 608 * on the page's mapping.
@@ -980,7 +994,8 @@ page_ok:
980 994
981page_not_up_to_date: 995page_not_up_to_date:
982 /* Get exclusive access to the page ... */ 996 /* Get exclusive access to the page ... */
983 lock_page(page); 997 if (lock_page_killable(page))
998 goto readpage_eio;
984 999
985 /* Did it get truncated before we got the lock? */ 1000 /* Did it get truncated before we got the lock? */
986 if (!page->mapping) { 1001 if (!page->mapping) {
@@ -1008,7 +1023,8 @@ readpage:
1008 } 1023 }
1009 1024
1010 if (!PageUptodate(page)) { 1025 if (!PageUptodate(page)) {
1011 lock_page(page); 1026 if (lock_page_killable(page))
1027 goto readpage_eio;
1012 if (!PageUptodate(page)) { 1028 if (!PageUptodate(page)) {
1013 if (page->mapping == NULL) { 1029 if (page->mapping == NULL) {
1014 /* 1030 /*
@@ -1019,15 +1035,16 @@ readpage:
1019 goto find_page; 1035 goto find_page;
1020 } 1036 }
1021 unlock_page(page); 1037 unlock_page(page);
1022 error = -EIO;
1023 shrink_readahead_size_eio(filp, ra); 1038 shrink_readahead_size_eio(filp, ra);
1024 goto readpage_error; 1039 goto readpage_eio;
1025 } 1040 }
1026 unlock_page(page); 1041 unlock_page(page);
1027 } 1042 }
1028 1043
1029 goto page_ok; 1044 goto page_ok;
1030 1045
1046readpage_eio:
1047 error = -EIO;
1031readpage_error: 1048readpage_error:
1032 /* UHHUH! A synchronous read error occurred. Report it */ 1049 /* UHHUH! A synchronous read error occurred. Report it */
1033 desc->error = error; 1050 desc->error = error;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index bcd9abdb031c..eca941ce298b 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -385,7 +385,6 @@ rpcauth_bindcred(struct rpc_task *task)
385 .group_info = current->group_info, 385 .group_info = current->group_info,
386 }; 386 };
387 struct rpc_cred *ret; 387 struct rpc_cred *ret;
388 sigset_t oldset;
389 int flags = 0; 388 int flags = 0;
390 389
391 dprintk("RPC: %5u looking up %s cred\n", 390 dprintk("RPC: %5u looking up %s cred\n",
@@ -393,9 +392,7 @@ rpcauth_bindcred(struct rpc_task *task)
393 get_group_info(acred.group_info); 392 get_group_info(acred.group_info);
394 if (task->tk_flags & RPC_TASK_ROOTCREDS) 393 if (task->tk_flags & RPC_TASK_ROOTCREDS)
395 flags |= RPCAUTH_LOOKUP_ROOTCREDS; 394 flags |= RPCAUTH_LOOKUP_ROOTCREDS;
396 rpc_clnt_sigmask(task->tk_client, &oldset);
397 ret = auth->au_ops->lookup_cred(auth, &acred, flags); 395 ret = auth->au_ops->lookup_cred(auth, &acred, flags);
398 rpc_clnt_sigunmask(task->tk_client, &oldset);
399 if (!IS_ERR(ret)) 396 if (!IS_ERR(ret))
400 task->tk_msg.rpc_cred = ret; 397 task->tk_msg.rpc_cred = ret;
401 else 398 else
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 924916ceaa43..0998e6d09664 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -313,7 +313,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
313 return clnt; 313 return clnt;
314 314
315 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 315 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
316 int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 316 int err = rpc_ping(clnt, RPC_TASK_SOFT);
317 if (err != 0) { 317 if (err != 0) {
318 rpc_shutdown_client(clnt); 318 rpc_shutdown_client(clnt);
319 return ERR_PTR(err); 319 return ERR_PTR(err);
@@ -324,8 +324,6 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
324 if (args->flags & RPC_CLNT_CREATE_HARDRTRY) 324 if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
325 clnt->cl_softrtry = 0; 325 clnt->cl_softrtry = 0;
326 326
327 if (args->flags & RPC_CLNT_CREATE_INTR)
328 clnt->cl_intr = 1;
329 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 327 if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
330 clnt->cl_autobind = 1; 328 clnt->cl_autobind = 1;
331 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 329 if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
@@ -493,7 +491,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
493 clnt->cl_prog = program->number; 491 clnt->cl_prog = program->number;
494 clnt->cl_vers = version->number; 492 clnt->cl_vers = version->number;
495 clnt->cl_stats = program->stats; 493 clnt->cl_stats = program->stats;
496 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 494 err = rpc_ping(clnt, RPC_TASK_SOFT);
497 if (err != 0) { 495 if (err != 0) {
498 rpc_shutdown_client(clnt); 496 rpc_shutdown_client(clnt);
499 clnt = ERR_PTR(err); 497 clnt = ERR_PTR(err);
@@ -515,46 +513,6 @@ static const struct rpc_call_ops rpc_default_ops = {
515 .rpc_call_done = rpc_default_callback, 513 .rpc_call_done = rpc_default_callback,
516}; 514};
517 515
518/*
519 * Export the signal mask handling for synchronous code that
520 * sleeps on RPC calls
521 */
522#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
523
524static void rpc_save_sigmask(sigset_t *oldset, int intr)
525{
526 unsigned long sigallow = sigmask(SIGKILL);
527 sigset_t sigmask;
528
529 /* Block all signals except those listed in sigallow */
530 if (intr)
531 sigallow |= RPC_INTR_SIGNALS;
532 siginitsetinv(&sigmask, sigallow);
533 sigprocmask(SIG_BLOCK, &sigmask, oldset);
534}
535
536static void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
537{
538 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
539}
540
541static void rpc_restore_sigmask(sigset_t *oldset)
542{
543 sigprocmask(SIG_SETMASK, oldset, NULL);
544}
545
546void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
547{
548 rpc_save_sigmask(oldset, clnt->cl_intr);
549}
550EXPORT_SYMBOL_GPL(rpc_clnt_sigmask);
551
552void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
553{
554 rpc_restore_sigmask(oldset);
555}
556EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
557
558/** 516/**
559 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 517 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
560 * @task_setup_data: pointer to task initialisation data 518 * @task_setup_data: pointer to task initialisation data
@@ -562,7 +520,6 @@ EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
562struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 520struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
563{ 521{
564 struct rpc_task *task, *ret; 522 struct rpc_task *task, *ret;
565 sigset_t oldset;
566 523
567 task = rpc_new_task(task_setup_data); 524 task = rpc_new_task(task_setup_data);
568 if (task == NULL) { 525 if (task == NULL) {
@@ -578,13 +535,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
578 goto out; 535 goto out;
579 } 536 }
580 atomic_inc(&task->tk_count); 537 atomic_inc(&task->tk_count);
581 /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */ 538 rpc_execute(task);
582 if (!RPC_IS_ASYNC(task)) {
583 rpc_task_sigmask(task, &oldset);
584 rpc_execute(task);
585 rpc_restore_sigmask(&oldset);
586 } else
587 rpc_execute(task);
588 ret = task; 539 ret = task;
589out: 540out:
590 return ret; 541 return ret;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa5b8f202d5b..3164a0871cf0 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -120,8 +120,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
120 .program = &rpcb_program, 120 .program = &rpcb_program,
121 .version = version, 121 .version = version,
122 .authflavor = RPC_AUTH_UNIX, 122 .authflavor = RPC_AUTH_UNIX,
123 .flags = (RPC_CLNT_CREATE_NOPING | 123 .flags = RPC_CLNT_CREATE_NOPING,
124 RPC_CLNT_CREATE_INTR),
125 }; 124 };
126 125
127 switch (srvaddr->sa_family) { 126 switch (srvaddr->sa_family) {
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 40ce6f6672d6..4c669121e607 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -245,9 +245,9 @@ void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
245} 245}
246EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 246EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
247 247
248static int rpc_wait_bit_interruptible(void *word) 248static int rpc_wait_bit_killable(void *word)
249{ 249{
250 if (signal_pending(current)) 250 if (fatal_signal_pending(current))
251 return -ERESTARTSYS; 251 return -ERESTARTSYS;
252 schedule(); 252 schedule();
253 return 0; 253 return 0;
@@ -299,9 +299,9 @@ static void rpc_mark_complete_task(struct rpc_task *task)
299int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) 299int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
300{ 300{
301 if (action == NULL) 301 if (action == NULL)
302 action = rpc_wait_bit_interruptible; 302 action = rpc_wait_bit_killable;
303 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 303 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
304 action, TASK_INTERRUPTIBLE); 304 action, TASK_KILLABLE);
305} 305}
306EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 306EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
307 307
@@ -696,10 +696,9 @@ static void __rpc_execute(struct rpc_task *task)
696 696
697 /* sync task: sleep here */ 697 /* sync task: sleep here */
698 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); 698 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
699 /* Note: Caller should be using rpc_clnt_sigmask() */
700 status = out_of_line_wait_on_bit(&task->tk_runstate, 699 status = out_of_line_wait_on_bit(&task->tk_runstate,
701 RPC_TASK_QUEUED, rpc_wait_bit_interruptible, 700 RPC_TASK_QUEUED, rpc_wait_bit_killable,
702 TASK_INTERRUPTIBLE); 701 TASK_KILLABLE);
703 if (status == -ERESTARTSYS) { 702 if (status == -ERESTARTSYS) {
704 /* 703 /*
705 * When a sync task receives a signal, it exits with 704 * When a sync task receives a signal, it exits with
@@ -840,8 +839,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
840 kref_get(&task->tk_client->cl_kref); 839 kref_get(&task->tk_client->cl_kref);
841 if (task->tk_client->cl_softrtry) 840 if (task->tk_client->cl_softrtry)
842 task->tk_flags |= RPC_TASK_SOFT; 841 task->tk_flags |= RPC_TASK_SOFT;
843 if (!task->tk_client->cl_intr)
844 task->tk_flags |= RPC_TASK_NOINTR;
845 } 842 }
846 843
847 if (task->tk_ops->rpc_call_prepare != NULL) 844 if (task->tk_ops->rpc_call_prepare != NULL)