diff options
author | Matthew Wilcox <matthew@wil.cx> | 2007-12-06 16:24:39 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@linux.intel.com> | 2007-12-06 17:40:25 -0500 |
commit | 150030b78a454ba50d5e267b0dcf01b162809192 (patch) | |
tree | 4de766e7abbfd73a052f14f8efd3a26eb7b59d87 /fs/nfs/nfs4proc.c | |
parent | 009e577e079656d51d0fe9b15e61e41b00816c29 (diff) |
NFS: Switch from intr mount option to TASK_KILLABLE
By using the TASK_KILLABLE infrastructure, we can get rid of the 'intr'
mount option. We have to use _killable everywhere instead of _interruptible
as we get rid of rpc_clnt_sigmask/sigunmask.
Signed-off-by: Liam R. Howlett <howlett@gmail.com>
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Diffstat (limited to 'fs/nfs/nfs4proc.c')
-rw-r--r-- | fs/nfs/nfs4proc.c | 27 |
1 files changed, 7 insertions, 20 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f03d9d5f5ba4..c4faa43b36de 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -316,12 +316,9 @@ static void nfs4_opendata_put(struct nfs4_opendata *p) | |||
316 | 316 | ||
317 | static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) | 317 | static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) |
318 | { | 318 | { |
319 | sigset_t oldset; | ||
320 | int ret; | 319 | int ret; |
321 | 320 | ||
322 | rpc_clnt_sigmask(task->tk_client, &oldset); | ||
323 | ret = rpc_wait_for_completion_task(task); | 321 | ret = rpc_wait_for_completion_task(task); |
324 | rpc_clnt_sigunmask(task->tk_client, &oldset); | ||
325 | return ret; | 322 | return ret; |
326 | } | 323 | } |
327 | 324 | ||
@@ -2806,9 +2803,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) | |||
2806 | return 0; | 2803 | return 0; |
2807 | } | 2804 | } |
2808 | 2805 | ||
2809 | static int nfs4_wait_bit_interruptible(void *word) | 2806 | static int nfs4_wait_bit_killable(void *word) |
2810 | { | 2807 | { |
2811 | if (signal_pending(current)) | 2808 | if (fatal_signal_pending(current)) |
2812 | return -ERESTARTSYS; | 2809 | return -ERESTARTSYS; |
2813 | schedule(); | 2810 | schedule(); |
2814 | return 0; | 2811 | return 0; |
@@ -2816,18 +2813,14 @@ static int nfs4_wait_bit_interruptible(void *word) | |||
2816 | 2813 | ||
2817 | static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp) | 2814 | static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp) |
2818 | { | 2815 | { |
2819 | sigset_t oldset; | ||
2820 | int res; | 2816 | int res; |
2821 | 2817 | ||
2822 | might_sleep(); | 2818 | might_sleep(); |
2823 | 2819 | ||
2824 | rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_); | 2820 | rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_); |
2825 | 2821 | ||
2826 | rpc_clnt_sigmask(clnt, &oldset); | ||
2827 | res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, | 2822 | res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, |
2828 | nfs4_wait_bit_interruptible, | 2823 | nfs4_wait_bit_killable, TASK_KILLABLE); |
2829 | TASK_INTERRUPTIBLE); | ||
2830 | rpc_clnt_sigunmask(clnt, &oldset); | ||
2831 | 2824 | ||
2832 | rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_); | 2825 | rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_); |
2833 | return res; | 2826 | return res; |
@@ -2835,7 +2828,6 @@ static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp) | |||
2835 | 2828 | ||
2836 | static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) | 2829 | static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) |
2837 | { | 2830 | { |
2838 | sigset_t oldset; | ||
2839 | int res = 0; | 2831 | int res = 0; |
2840 | 2832 | ||
2841 | might_sleep(); | 2833 | might_sleep(); |
@@ -2844,14 +2836,9 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) | |||
2844 | *timeout = NFS4_POLL_RETRY_MIN; | 2836 | *timeout = NFS4_POLL_RETRY_MIN; |
2845 | if (*timeout > NFS4_POLL_RETRY_MAX) | 2837 | if (*timeout > NFS4_POLL_RETRY_MAX) |
2846 | *timeout = NFS4_POLL_RETRY_MAX; | 2838 | *timeout = NFS4_POLL_RETRY_MAX; |
2847 | rpc_clnt_sigmask(clnt, &oldset); | 2839 | schedule_timeout_killable(*timeout); |
2848 | if (clnt->cl_intr) { | 2840 | if (fatal_signal_pending(current)) |
2849 | schedule_timeout_interruptible(*timeout); | 2841 | res = -ERESTARTSYS; |
2850 | if (signalled()) | ||
2851 | res = -ERESTARTSYS; | ||
2852 | } else | ||
2853 | schedule_timeout_uninterruptible(*timeout); | ||
2854 | rpc_clnt_sigunmask(clnt, &oldset); | ||
2855 | *timeout <<= 1; | 2842 | *timeout <<= 1; |
2856 | return res; | 2843 | return res; |
2857 | } | 2844 | } |
@@ -3085,7 +3072,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4 | |||
3085 | static unsigned long | 3072 | static unsigned long |
3086 | nfs4_set_lock_task_retry(unsigned long timeout) | 3073 | nfs4_set_lock_task_retry(unsigned long timeout) |
3087 | { | 3074 | { |
3088 | schedule_timeout_interruptible(timeout); | 3075 | schedule_timeout_killable(timeout); |
3089 | timeout <<= 1; | 3076 | timeout <<= 1; |
3090 | if (timeout > NFS4_LOCK_MAXTIMEOUT) | 3077 | if (timeout > NFS4_LOCK_MAXTIMEOUT) |
3091 | return NFS4_LOCK_MAXTIMEOUT; | 3078 | return NFS4_LOCK_MAXTIMEOUT; |