aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2008-02-07 16:34:55 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-04-23 16:13:36 -0400
commitd751a7cd0695554498f25d3026ca6710dbb3698f (patch)
tree31ec617dfb9fb70160bb8d2d9c9fd2e5c1e50c15 /fs
parent7086721f9c8b59331e164e534f588e075cfd9d3f (diff)
NLM: Convert lockd to use kthreads
Have lockd_up start lockd using kthread_run. With this change, lockd_down now blocks until lockd actually exits, so there's no longer need for the waitqueue code at the end of lockd_down. This also means that only one lockd can be running at a time which simplifies the code within lockd's main loop. This also adds a check for kthread_should_stop in the main loop of nlmsvc_retry_blocked and after that function returns. There's no sense continuing to retry blocks if lockd is coming down anyway. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'fs')
-rw-r--r--fs/lockd/svc.c132
-rw-r--r--fs/lockd/svclock.c3
2 files changed, 60 insertions, 75 deletions
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 1ed8bd4de941..66b5c98c7ff5 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
25#include <linux/smp.h> 25#include <linux/smp.h>
26#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/kthread.h>
28#include <linux/freezer.h> 29#include <linux/freezer.h>
29 30
30#include <linux/sunrpc/types.h> 31#include <linux/sunrpc/types.h>
@@ -48,14 +49,11 @@ EXPORT_SYMBOL(nlmsvc_ops);
48 49
49static DEFINE_MUTEX(nlmsvc_mutex); 50static DEFINE_MUTEX(nlmsvc_mutex);
50static unsigned int nlmsvc_users; 51static unsigned int nlmsvc_users;
51static pid_t nlmsvc_pid; 52static struct task_struct *nlmsvc_task;
52static struct svc_serv *nlmsvc_serv; 53static struct svc_serv *nlmsvc_serv;
53int nlmsvc_grace_period; 54int nlmsvc_grace_period;
54unsigned long nlmsvc_timeout; 55unsigned long nlmsvc_timeout;
55 56
56static DECLARE_COMPLETION(lockd_start_done);
57static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
58
59/* 57/*
60 * These can be set at insmod time (useful for NFS as root filesystem), 58 * These can be set at insmod time (useful for NFS as root filesystem),
61 * and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003 59 * and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
@@ -111,35 +109,30 @@ static inline void clear_grace_period(void)
111/* 109/*
112 * This is the lockd kernel thread 110 * This is the lockd kernel thread
113 */ 111 */
114static void 112static int
115lockd(struct svc_rqst *rqstp) 113lockd(void *vrqstp)
116{ 114{
117 int err = 0; 115 int err = 0;
116 struct svc_rqst *rqstp = vrqstp;
118 unsigned long grace_period_expire; 117 unsigned long grace_period_expire;
119 118
120 /* Lock module and set up kernel thread */ 119 /* try_to_freeze() is called from svc_recv() */
121 /* lockd_up is waiting for us to startup, so will
122 * be holding a reference to this module, so it
123 * is safe to just claim another reference
124 */
125 __module_get(THIS_MODULE);
126 lock_kernel();
127
128 /*
129 * Let our maker know we're running.
130 */
131 nlmsvc_pid = current->pid;
132 nlmsvc_serv = rqstp->rq_server;
133 complete(&lockd_start_done);
134
135 daemonize("lockd");
136 set_freezable(); 120 set_freezable();
137 121
138 /* Process request with signals blocked, but allow SIGKILL. */ 122 /* Allow SIGKILL to tell lockd to drop all of its locks */
139 allow_signal(SIGKILL); 123 allow_signal(SIGKILL);
140 124
141 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); 125 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
142 126
127 /*
128 * FIXME: it would be nice if lockd didn't spend its entire life
129 * running under the BKL. At the very least, it would be good to
130 * have someone clarify what it's intended to protect here. I've
131 * seen some handwavy posts about posix locking needing to be
132 * done under the BKL, but it's far from clear.
133 */
134 lock_kernel();
135
143 if (!nlm_timeout) 136 if (!nlm_timeout)
144 nlm_timeout = LOCKD_DFLT_TIMEO; 137 nlm_timeout = LOCKD_DFLT_TIMEO;
145 nlmsvc_timeout = nlm_timeout * HZ; 138 nlmsvc_timeout = nlm_timeout * HZ;
@@ -148,10 +141,9 @@ lockd(struct svc_rqst *rqstp)
148 141
149 /* 142 /*
150 * The main request loop. We don't terminate until the last 143 * The main request loop. We don't terminate until the last
151 * NFS mount or NFS daemon has gone away, and we've been sent a 144 * NFS mount or NFS daemon has gone away.
152 * signal, or else another process has taken over our job.
153 */ 145 */
154 while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) { 146 while (!kthread_should_stop()) {
155 long timeout = MAX_SCHEDULE_TIMEOUT; 147 long timeout = MAX_SCHEDULE_TIMEOUT;
156 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); 148 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
157 149
@@ -161,6 +153,7 @@ lockd(struct svc_rqst *rqstp)
161 nlmsvc_invalidate_all(); 153 nlmsvc_invalidate_all();
162 grace_period_expire = set_grace_period(); 154 grace_period_expire = set_grace_period();
163 } 155 }
156 continue;
164 } 157 }
165 158
166 /* 159 /*
@@ -195,28 +188,19 @@ lockd(struct svc_rqst *rqstp)
195 } 188 }
196 189
197 flush_signals(current); 190 flush_signals(current);
191 if (nlmsvc_ops)
192 nlmsvc_invalidate_all();
193 nlm_shutdown_hosts();
198 194
199 /* 195 unlock_kernel();
200 * Check whether there's a new lockd process before 196
201 * shutting down the hosts and clearing the slot. 197 nlmsvc_task = NULL;
202 */ 198 nlmsvc_serv = NULL;
203 if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
204 if (nlmsvc_ops)
205 nlmsvc_invalidate_all();
206 nlm_shutdown_hosts();
207 nlmsvc_pid = 0;
208 nlmsvc_serv = NULL;
209 } else
210 printk(KERN_DEBUG
211 "lockd: new process, skipping host shutdown\n");
212 wake_up(&lockd_exit);
213 199
214 /* Exit the RPC thread */ 200 /* Exit the RPC thread */
215 svc_exit_thread(rqstp); 201 svc_exit_thread(rqstp);
216 202
217 /* Release module */ 203 return 0;
218 unlock_kernel();
219 module_put_and_exit(0);
220} 204}
221 205
222/* 206/*
@@ -261,14 +245,15 @@ static int make_socks(struct svc_serv *serv, int proto)
261int 245int
262lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */ 246lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */
263{ 247{
264 struct svc_serv * serv; 248 struct svc_serv *serv;
265 int error = 0; 249 struct svc_rqst *rqstp;
250 int error = 0;
266 251
267 mutex_lock(&nlmsvc_mutex); 252 mutex_lock(&nlmsvc_mutex);
268 /* 253 /*
269 * Check whether we're already up and running. 254 * Check whether we're already up and running.
270 */ 255 */
271 if (nlmsvc_pid) { 256 if (nlmsvc_serv) {
272 if (proto) 257 if (proto)
273 error = make_socks(nlmsvc_serv, proto); 258 error = make_socks(nlmsvc_serv, proto);
274 goto out; 259 goto out;
@@ -295,13 +280,28 @@ lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */
295 /* 280 /*
296 * Create the kernel thread and wait for it to start. 281 * Create the kernel thread and wait for it to start.
297 */ 282 */
298 error = svc_create_thread(lockd, serv); 283 rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
299 if (error) { 284 if (IS_ERR(rqstp)) {
285 error = PTR_ERR(rqstp);
300 printk(KERN_WARNING 286 printk(KERN_WARNING
301 "lockd_up: create thread failed, error=%d\n", error); 287 "lockd_up: svc_rqst allocation failed, error=%d\n",
288 error);
289 goto destroy_and_out;
290 }
291
292 svc_sock_update_bufs(serv);
293 nlmsvc_serv = rqstp->rq_server;
294
295 nlmsvc_task = kthread_run(lockd, rqstp, serv->sv_name);
296 if (IS_ERR(nlmsvc_task)) {
297 error = PTR_ERR(nlmsvc_task);
298 nlmsvc_task = NULL;
299 nlmsvc_serv = NULL;
300 printk(KERN_WARNING
301 "lockd_up: kthread_run failed, error=%d\n", error);
302 svc_exit_thread(rqstp);
302 goto destroy_and_out; 303 goto destroy_and_out;
303 } 304 }
304 wait_for_completion(&lockd_start_done);
305 305
306 /* 306 /*
307 * Note: svc_serv structures have an initial use count of 1, 307 * Note: svc_serv structures have an initial use count of 1,
@@ -323,37 +323,21 @@ EXPORT_SYMBOL(lockd_up);
323void 323void
324lockd_down(void) 324lockd_down(void)
325{ 325{
326 static int warned;
327
328 mutex_lock(&nlmsvc_mutex); 326 mutex_lock(&nlmsvc_mutex);
329 if (nlmsvc_users) { 327 if (nlmsvc_users) {
330 if (--nlmsvc_users) 328 if (--nlmsvc_users)
331 goto out; 329 goto out;
332 } else 330 } else {
333 printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid); 331 printk(KERN_ERR "lockd_down: no users! task=%p\n",
334 332 nlmsvc_task);
335 if (!nlmsvc_pid) { 333 BUG();
336 if (warned++ == 0)
337 printk(KERN_WARNING "lockd_down: no lockd running.\n");
338 goto out;
339 } 334 }
340 warned = 0;
341 335
342 kill_proc(nlmsvc_pid, SIGKILL, 1); 336 if (!nlmsvc_task) {
343 /* 337 printk(KERN_ERR "lockd_down: no lockd running.\n");
344 * Wait for the lockd process to exit, but since we're holding 338 BUG();
345 * the lockd semaphore, we can't wait around forever ...
346 */
347 clear_thread_flag(TIF_SIGPENDING);
348 interruptible_sleep_on_timeout(&lockd_exit, HZ);
349 if (nlmsvc_pid) {
350 printk(KERN_WARNING
351 "lockd_down: lockd failed to exit, clearing pid\n");
352 nlmsvc_pid = 0;
353 } 339 }
354 spin_lock_irq(&current->sighand->siglock); 340 kthread_stop(nlmsvc_task);
355 recalc_sigpending();
356 spin_unlock_irq(&current->sighand->siglock);
357out: 341out:
358 mutex_unlock(&nlmsvc_mutex); 342 mutex_unlock(&nlmsvc_mutex);
359} 343}
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index fe9bdb4a220c..4da7c4c27064 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -29,6 +29,7 @@
29#include <linux/sunrpc/svc.h> 29#include <linux/sunrpc/svc.h>
30#include <linux/lockd/nlm.h> 30#include <linux/lockd/nlm.h>
31#include <linux/lockd/lockd.h> 31#include <linux/lockd/lockd.h>
32#include <linux/kthread.h>
32 33
33#define NLMDBG_FACILITY NLMDBG_SVCLOCK 34#define NLMDBG_FACILITY NLMDBG_SVCLOCK
34 35
@@ -887,7 +888,7 @@ nlmsvc_retry_blocked(void)
887 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 888 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
888 struct nlm_block *block; 889 struct nlm_block *block;
889 890
890 while (!list_empty(&nlm_blocked)) { 891 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
891 block = list_entry(nlm_blocked.next, struct nlm_block, b_list); 892 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
892 893
893 if (block->b_when == NLM_NEVER) 894 if (block->b_when == NLM_NEVER)