aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2008-02-20 08:55:30 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-04-23 16:13:40 -0400
commita277e33cbe3fdfb9a77b448ea3043be22f000dfd (patch)
tree2da3b18f69c7f2d8979a33c666977cd23592e98d /fs/nfs
parent3ba1514815817f93a4f09615726dd4bcd0ddbbc9 (diff)
NFS: convert nfs4 callback thread to kthread API
There's a general push to convert kernel threads to use the (much cleaner) kthread API. This patch converts the NFSv4 callback kernel thread to the kthread API. In addition to being generally cleaner this also removes the dependency on signals when shutting down the thread. Note that this patch depends on the recent patches to svc_recv() to make it check kthread_should_stop() periodically. Those patches are in Bruce's tree at the moment and are slated for 2.6.26 along with the lockd conversion, so this conversion is probably also appropriate for 2.6.26. Signed-off-by: Jeff Layton <jlayton@redhat.com> Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/callback.c73
1 files changed, 35 insertions, 38 deletions
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 66648dd92d97..2e5de77ff030 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -15,6 +15,7 @@
15#include <linux/nfs_fs.h> 15#include <linux/nfs_fs.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/kthread.h>
18 19
19#include <net/inet_sock.h> 20#include <net/inet_sock.h>
20 21
@@ -27,9 +28,7 @@
27struct nfs_callback_data { 28struct nfs_callback_data {
28 unsigned int users; 29 unsigned int users;
29 struct svc_serv *serv; 30 struct svc_serv *serv;
30 pid_t pid; 31 struct task_struct *task;
31 struct completion started;
32 struct completion stopped;
33}; 32};
34 33
35static struct nfs_callback_data nfs_callback_info; 34static struct nfs_callback_data nfs_callback_info;
@@ -57,27 +56,20 @@ module_param_call(callback_tcpport, param_set_port, param_get_int,
57/* 56/*
58 * This is the callback kernel thread. 57 * This is the callback kernel thread.
59 */ 58 */
60static void nfs_callback_svc(struct svc_rqst *rqstp) 59static int
60nfs_callback_svc(void *vrqstp)
61{ 61{
62 int err; 62 int err;
63 struct svc_rqst *rqstp = vrqstp;
63 64
64 __module_get(THIS_MODULE);
65 lock_kernel();
66
67 nfs_callback_info.pid = current->pid;
68 daemonize("nfsv4-svc");
69 /* Process request with signals blocked, but allow SIGKILL. */
70 allow_signal(SIGKILL);
71 set_freezable(); 65 set_freezable();
72 66
73 complete(&nfs_callback_info.started); 67 /*
74 68 * FIXME: do we really need to run this under the BKL? If so, please
75 for(;;) { 69 * add a comment about what it's intended to protect.
76 if (signalled()) { 70 */
77 if (nfs_callback_info.users == 0) 71 lock_kernel();
78 break; 72 while (!kthread_should_stop()) {
79 flush_signals(current);
80 }
81 /* 73 /*
82 * Listen for a request on the socket 74 * Listen for a request on the socket
83 */ 75 */
@@ -92,13 +84,10 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
92 } 84 }
93 svc_process(rqstp); 85 svc_process(rqstp);
94 } 86 }
95
96 flush_signals(current);
97 svc_exit_thread(rqstp);
98 nfs_callback_info.pid = 0;
99 complete(&nfs_callback_info.stopped);
100 unlock_kernel(); 87 unlock_kernel();
101 module_put_and_exit(0); 88 nfs_callback_info.task = NULL;
89 svc_exit_thread(rqstp);
90 return 0;
102} 91}
103 92
104/* 93/*
@@ -107,14 +96,13 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
107int nfs_callback_up(void) 96int nfs_callback_up(void)
108{ 97{
109 struct svc_serv *serv = NULL; 98 struct svc_serv *serv = NULL;
99 struct svc_rqst *rqstp;
110 int ret = 0; 100 int ret = 0;
111 101
112 lock_kernel(); 102 lock_kernel();
113 mutex_lock(&nfs_callback_mutex); 103 mutex_lock(&nfs_callback_mutex);
114 if (nfs_callback_info.users++ || nfs_callback_info.pid != 0) 104 if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
115 goto out; 105 goto out;
116 init_completion(&nfs_callback_info.started);
117 init_completion(&nfs_callback_info.stopped);
118 serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL); 106 serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
119 ret = -ENOMEM; 107 ret = -ENOMEM;
120 if (!serv) 108 if (!serv)
@@ -127,15 +115,28 @@ int nfs_callback_up(void)
127 nfs_callback_tcpport = ret; 115 nfs_callback_tcpport = ret;
128 dprintk("Callback port = 0x%x\n", nfs_callback_tcpport); 116 dprintk("Callback port = 0x%x\n", nfs_callback_tcpport);
129 117
130 ret = svc_create_thread(nfs_callback_svc, serv); 118 rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
131 if (ret < 0) 119 if (IS_ERR(rqstp)) {
120 ret = PTR_ERR(rqstp);
132 goto out_err; 121 goto out_err;
122 }
123
124 svc_sock_update_bufs(serv);
133 nfs_callback_info.serv = serv; 125 nfs_callback_info.serv = serv;
134 wait_for_completion(&nfs_callback_info.started); 126
127 nfs_callback_info.task = kthread_run(nfs_callback_svc, rqstp,
128 "nfsv4-svc");
129 if (IS_ERR(nfs_callback_info.task)) {
130 ret = PTR_ERR(nfs_callback_info.task);
131 nfs_callback_info.serv = NULL;
132 nfs_callback_info.task = NULL;
133 svc_exit_thread(rqstp);
134 goto out_err;
135 }
135out: 136out:
136 /* 137 /*
137 * svc_create creates the svc_serv with sv_nrthreads == 1, and then 138 * svc_create creates the svc_serv with sv_nrthreads == 1, and then
138 * svc_create_thread increments that. So we need to call svc_destroy 139 * svc_prepare_thread increments that. So we need to call svc_destroy
139 * on both success and failure so that the refcount is 1 when the 140 * on both success and failure so that the refcount is 1 when the
140 * thread exits. 141 * thread exits.
141 */ 142 */
@@ -159,12 +160,8 @@ void nfs_callback_down(void)
159 lock_kernel(); 160 lock_kernel();
160 mutex_lock(&nfs_callback_mutex); 161 mutex_lock(&nfs_callback_mutex);
161 nfs_callback_info.users--; 162 nfs_callback_info.users--;
162 do { 163 if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL)
163 if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0) 164 kthread_stop(nfs_callback_info.task);
164 break;
165 if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0)
166 break;
167 } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0);
168 mutex_unlock(&nfs_callback_mutex); 165 mutex_unlock(&nfs_callback_mutex);
169 unlock_kernel(); 166 unlock_kernel();
170} 167}