aboutsummaryrefslogtreecommitdiffstats
path: root/fs/lockd
diff options
context:
space:
mode:
Diffstat (limited to 'fs/lockd')
-rw-r--r--fs/lockd/host.c73
-rw-r--r--fs/lockd/svc.c150
-rw-r--r--fs/lockd/svclock.c6
-rw-r--r--fs/lockd/svcshare.c3
4 files changed, 111 insertions, 121 deletions
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index f23750db1650..a17664c7eacc 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -19,12 +19,11 @@
19 19
20 20
21#define NLMDBG_FACILITY NLMDBG_HOSTCACHE 21#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
22#define NLM_HOST_MAX 64
23#define NLM_HOST_NRHASH 32 22#define NLM_HOST_NRHASH 32
24#define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) 23#define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
25#define NLM_HOST_REBIND (60 * HZ) 24#define NLM_HOST_REBIND (60 * HZ)
26#define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) 25#define NLM_HOST_EXPIRE (300 * HZ)
27#define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) 26#define NLM_HOST_COLLECT (120 * HZ)
28 27
29static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; 28static struct hlist_head nlm_hosts[NLM_HOST_NRHASH];
30static unsigned long next_gc; 29static unsigned long next_gc;
@@ -143,9 +142,7 @@ static struct nlm_host *nlm_lookup_host(int server,
143 INIT_LIST_HEAD(&host->h_granted); 142 INIT_LIST_HEAD(&host->h_granted);
144 INIT_LIST_HEAD(&host->h_reclaim); 143 INIT_LIST_HEAD(&host->h_reclaim);
145 144
146 if (++nrhosts > NLM_HOST_MAX) 145 nrhosts++;
147 next_gc = 0;
148
149out: 146out:
150 mutex_unlock(&nlm_host_mutex); 147 mutex_unlock(&nlm_host_mutex);
151 return host; 148 return host;
@@ -462,7 +459,7 @@ nlm_gc_hosts(void)
462 * Manage NSM handles 459 * Manage NSM handles
463 */ 460 */
464static LIST_HEAD(nsm_handles); 461static LIST_HEAD(nsm_handles);
465static DEFINE_MUTEX(nsm_mutex); 462static DEFINE_SPINLOCK(nsm_lock);
466 463
467static struct nsm_handle * 464static struct nsm_handle *
468__nsm_find(const struct sockaddr_in *sin, 465__nsm_find(const struct sockaddr_in *sin,
@@ -470,7 +467,7 @@ __nsm_find(const struct sockaddr_in *sin,
470 int create) 467 int create)
471{ 468{
472 struct nsm_handle *nsm = NULL; 469 struct nsm_handle *nsm = NULL;
473 struct list_head *pos; 470 struct nsm_handle *pos;
474 471
475 if (!sin) 472 if (!sin)
476 return NULL; 473 return NULL;
@@ -484,38 +481,43 @@ __nsm_find(const struct sockaddr_in *sin,
484 return NULL; 481 return NULL;
485 } 482 }
486 483
487 mutex_lock(&nsm_mutex); 484retry:
488 list_for_each(pos, &nsm_handles) { 485 spin_lock(&nsm_lock);
489 nsm = list_entry(pos, struct nsm_handle, sm_link); 486 list_for_each_entry(pos, &nsm_handles, sm_link) {
490 487
491 if (hostname && nsm_use_hostnames) { 488 if (hostname && nsm_use_hostnames) {
492 if (strlen(nsm->sm_name) != hostname_len 489 if (strlen(pos->sm_name) != hostname_len
493 || memcmp(nsm->sm_name, hostname, hostname_len)) 490 || memcmp(pos->sm_name, hostname, hostname_len))
494 continue; 491 continue;
495 } else if (!nlm_cmp_addr(&nsm->sm_addr, sin)) 492 } else if (!nlm_cmp_addr(&pos->sm_addr, sin))
496 continue; 493 continue;
497 atomic_inc(&nsm->sm_count); 494 atomic_inc(&pos->sm_count);
498 goto out; 495 kfree(nsm);
496 nsm = pos;
497 goto found;
499 } 498 }
500 499 if (nsm) {
501 if (!create) { 500 list_add(&nsm->sm_link, &nsm_handles);
502 nsm = NULL; 501 goto found;
503 goto out;
504 } 502 }
503 spin_unlock(&nsm_lock);
504
505 if (!create)
506 return NULL;
505 507
506 nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL); 508 nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
507 if (nsm != NULL) { 509 if (nsm == NULL)
508 nsm->sm_addr = *sin; 510 return NULL;
509 nsm->sm_name = (char *) (nsm + 1);
510 memcpy(nsm->sm_name, hostname, hostname_len);
511 nsm->sm_name[hostname_len] = '\0';
512 atomic_set(&nsm->sm_count, 1);
513 511
514 list_add(&nsm->sm_link, &nsm_handles); 512 nsm->sm_addr = *sin;
515 } 513 nsm->sm_name = (char *) (nsm + 1);
514 memcpy(nsm->sm_name, hostname, hostname_len);
515 nsm->sm_name[hostname_len] = '\0';
516 atomic_set(&nsm->sm_count, 1);
517 goto retry;
516 518
517out: 519found:
518 mutex_unlock(&nsm_mutex); 520 spin_unlock(&nsm_lock);
519 return nsm; 521 return nsm;
520} 522}
521 523
@@ -534,12 +536,9 @@ nsm_release(struct nsm_handle *nsm)
534{ 536{
535 if (!nsm) 537 if (!nsm)
536 return; 538 return;
537 if (atomic_dec_and_test(&nsm->sm_count)) { 539 if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
538 mutex_lock(&nsm_mutex); 540 list_del(&nsm->sm_link);
539 if (atomic_read(&nsm->sm_count) == 0) { 541 spin_unlock(&nsm_lock);
540 list_del(&nsm->sm_link); 542 kfree(nsm);
541 kfree(nsm);
542 }
543 mutex_unlock(&nsm_mutex);
544 } 543 }
545} 544}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 38c2f0b1dd7d..2169af4d5455 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
25#include <linux/smp.h> 25#include <linux/smp.h>
26#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/kthread.h>
28#include <linux/freezer.h> 29#include <linux/freezer.h>
29 30
30#include <linux/sunrpc/types.h> 31#include <linux/sunrpc/types.h>
@@ -48,14 +49,11 @@ EXPORT_SYMBOL(nlmsvc_ops);
48 49
49static DEFINE_MUTEX(nlmsvc_mutex); 50static DEFINE_MUTEX(nlmsvc_mutex);
50static unsigned int nlmsvc_users; 51static unsigned int nlmsvc_users;
51static pid_t nlmsvc_pid; 52static struct task_struct *nlmsvc_task;
52static struct svc_serv *nlmsvc_serv; 53static struct svc_serv *nlmsvc_serv;
53int nlmsvc_grace_period; 54int nlmsvc_grace_period;
54unsigned long nlmsvc_timeout; 55unsigned long nlmsvc_timeout;
55 56
56static DECLARE_COMPLETION(lockd_start_done);
57static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
58
59/* 57/*
60 * These can be set at insmod time (useful for NFS as root filesystem), 58 * These can be set at insmod time (useful for NFS as root filesystem),
61 * and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003 59 * and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
@@ -113,35 +111,30 @@ static inline void clear_grace_period(void)
113/* 111/*
114 * This is the lockd kernel thread 112 * This is the lockd kernel thread
115 */ 113 */
116static void 114static int
117lockd(struct svc_rqst *rqstp) 115lockd(void *vrqstp)
118{ 116{
119 int err = 0; 117 int err = 0, preverr = 0;
118 struct svc_rqst *rqstp = vrqstp;
120 unsigned long grace_period_expire; 119 unsigned long grace_period_expire;
121 120
122 /* Lock module and set up kernel thread */ 121 /* try_to_freeze() is called from svc_recv() */
123 /* lockd_up is waiting for us to startup, so will
124 * be holding a reference to this module, so it
125 * is safe to just claim another reference
126 */
127 __module_get(THIS_MODULE);
128 lock_kernel();
129
130 /*
131 * Let our maker know we're running.
132 */
133 nlmsvc_pid = current->pid;
134 nlmsvc_serv = rqstp->rq_server;
135 complete(&lockd_start_done);
136
137 daemonize("lockd");
138 set_freezable(); 122 set_freezable();
139 123
140 /* Process request with signals blocked, but allow SIGKILL. */ 124 /* Allow SIGKILL to tell lockd to drop all of its locks */
141 allow_signal(SIGKILL); 125 allow_signal(SIGKILL);
142 126
143 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); 127 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
144 128
129 /*
130 * FIXME: it would be nice if lockd didn't spend its entire life
131 * running under the BKL. At the very least, it would be good to
132 * have someone clarify what it's intended to protect here. I've
133 * seen some handwavy posts about posix locking needing to be
134 * done under the BKL, but it's far from clear.
135 */
136 lock_kernel();
137
145 if (!nlm_timeout) 138 if (!nlm_timeout)
146 nlm_timeout = LOCKD_DFLT_TIMEO; 139 nlm_timeout = LOCKD_DFLT_TIMEO;
147 nlmsvc_timeout = nlm_timeout * HZ; 140 nlmsvc_timeout = nlm_timeout * HZ;
@@ -150,10 +143,9 @@ lockd(struct svc_rqst *rqstp)
150 143
151 /* 144 /*
152 * The main request loop. We don't terminate until the last 145 * The main request loop. We don't terminate until the last
153 * NFS mount or NFS daemon has gone away, and we've been sent a 146 * NFS mount or NFS daemon has gone away.
154 * signal, or else another process has taken over our job.
155 */ 147 */
156 while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) { 148 while (!kthread_should_stop()) {
157 long timeout = MAX_SCHEDULE_TIMEOUT; 149 long timeout = MAX_SCHEDULE_TIMEOUT;
158 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); 150 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
159 151
@@ -163,6 +155,7 @@ lockd(struct svc_rqst *rqstp)
163 nlmsvc_invalidate_all(); 155 nlmsvc_invalidate_all();
164 grace_period_expire = set_grace_period(); 156 grace_period_expire = set_grace_period();
165 } 157 }
158 continue;
166 } 159 }
167 160
168 /* 161 /*
@@ -181,14 +174,20 @@ lockd(struct svc_rqst *rqstp)
181 * recvfrom routine. 174 * recvfrom routine.
182 */ 175 */
183 err = svc_recv(rqstp, timeout); 176 err = svc_recv(rqstp, timeout);
184 if (err == -EAGAIN || err == -EINTR) 177 if (err == -EAGAIN || err == -EINTR) {
178 preverr = err;
185 continue; 179 continue;
180 }
186 if (err < 0) { 181 if (err < 0) {
187 printk(KERN_WARNING 182 if (err != preverr) {
188 "lockd: terminating on error %d\n", 183 printk(KERN_WARNING "%s: unexpected error "
189 -err); 184 "from svc_recv (%d)\n", __func__, err);
190 break; 185 preverr = err;
186 }
187 schedule_timeout_interruptible(HZ);
188 continue;
191 } 189 }
190 preverr = err;
192 191
193 dprintk("lockd: request from %s\n", 192 dprintk("lockd: request from %s\n",
194 svc_print_addr(rqstp, buf, sizeof(buf))); 193 svc_print_addr(rqstp, buf, sizeof(buf)));
@@ -197,28 +196,19 @@ lockd(struct svc_rqst *rqstp)
197 } 196 }
198 197
199 flush_signals(current); 198 flush_signals(current);
199 if (nlmsvc_ops)
200 nlmsvc_invalidate_all();
201 nlm_shutdown_hosts();
200 202
201 /* 203 unlock_kernel();
202 * Check whether there's a new lockd process before 204
203 * shutting down the hosts and clearing the slot. 205 nlmsvc_task = NULL;
204 */ 206 nlmsvc_serv = NULL;
205 if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
206 if (nlmsvc_ops)
207 nlmsvc_invalidate_all();
208 nlm_shutdown_hosts();
209 nlmsvc_pid = 0;
210 nlmsvc_serv = NULL;
211 } else
212 printk(KERN_DEBUG
213 "lockd: new process, skipping host shutdown\n");
214 wake_up(&lockd_exit);
215 207
216 /* Exit the RPC thread */ 208 /* Exit the RPC thread */
217 svc_exit_thread(rqstp); 209 svc_exit_thread(rqstp);
218 210
219 /* Release module */ 211 return 0;
220 unlock_kernel();
221 module_put_and_exit(0);
222} 212}
223 213
224/* 214/*
@@ -263,14 +253,15 @@ static int make_socks(struct svc_serv *serv, int proto)
263int 253int
264lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */ 254lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */
265{ 255{
266 struct svc_serv * serv; 256 struct svc_serv *serv;
267 int error = 0; 257 struct svc_rqst *rqstp;
258 int error = 0;
268 259
269 mutex_lock(&nlmsvc_mutex); 260 mutex_lock(&nlmsvc_mutex);
270 /* 261 /*
271 * Check whether we're already up and running. 262 * Check whether we're already up and running.
272 */ 263 */
273 if (nlmsvc_pid) { 264 if (nlmsvc_serv) {
274 if (proto) 265 if (proto)
275 error = make_socks(nlmsvc_serv, proto); 266 error = make_socks(nlmsvc_serv, proto);
276 goto out; 267 goto out;
@@ -297,13 +288,28 @@ lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */
297 /* 288 /*
298 * Create the kernel thread and wait for it to start. 289 * Create the kernel thread and wait for it to start.
299 */ 290 */
300 error = svc_create_thread(lockd, serv); 291 rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
301 if (error) { 292 if (IS_ERR(rqstp)) {
293 error = PTR_ERR(rqstp);
302 printk(KERN_WARNING 294 printk(KERN_WARNING
303 "lockd_up: create thread failed, error=%d\n", error); 295 "lockd_up: svc_rqst allocation failed, error=%d\n",
296 error);
297 goto destroy_and_out;
298 }
299
300 svc_sock_update_bufs(serv);
301 nlmsvc_serv = rqstp->rq_server;
302
303 nlmsvc_task = kthread_run(lockd, rqstp, serv->sv_name);
304 if (IS_ERR(nlmsvc_task)) {
305 error = PTR_ERR(nlmsvc_task);
306 nlmsvc_task = NULL;
307 nlmsvc_serv = NULL;
308 printk(KERN_WARNING
309 "lockd_up: kthread_run failed, error=%d\n", error);
310 svc_exit_thread(rqstp);
304 goto destroy_and_out; 311 goto destroy_and_out;
305 } 312 }
306 wait_for_completion(&lockd_start_done);
307 313
308 /* 314 /*
309 * Note: svc_serv structures have an initial use count of 1, 315 * Note: svc_serv structures have an initial use count of 1,
@@ -325,37 +331,21 @@ EXPORT_SYMBOL(lockd_up);
325void 331void
326lockd_down(void) 332lockd_down(void)
327{ 333{
328 static int warned;
329
330 mutex_lock(&nlmsvc_mutex); 334 mutex_lock(&nlmsvc_mutex);
331 if (nlmsvc_users) { 335 if (nlmsvc_users) {
332 if (--nlmsvc_users) 336 if (--nlmsvc_users)
333 goto out; 337 goto out;
334 } else 338 } else {
335 printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid); 339 printk(KERN_ERR "lockd_down: no users! task=%p\n",
336 340 nlmsvc_task);
337 if (!nlmsvc_pid) { 341 BUG();
338 if (warned++ == 0)
339 printk(KERN_WARNING "lockd_down: no lockd running.\n");
340 goto out;
341 } 342 }
342 warned = 0;
343 343
344 kill_proc(nlmsvc_pid, SIGKILL, 1); 344 if (!nlmsvc_task) {
345 /* 345 printk(KERN_ERR "lockd_down: no lockd running.\n");
346 * Wait for the lockd process to exit, but since we're holding 346 BUG();
347 * the lockd semaphore, we can't wait around forever ...
348 */
349 clear_thread_flag(TIF_SIGPENDING);
350 interruptible_sleep_on_timeout(&lockd_exit, HZ);
351 if (nlmsvc_pid) {
352 printk(KERN_WARNING
353 "lockd_down: lockd failed to exit, clearing pid\n");
354 nlmsvc_pid = 0;
355 } 347 }
356 spin_lock_irq(&current->sighand->siglock); 348 kthread_stop(nlmsvc_task);
357 recalc_sigpending();
358 spin_unlock_irq(&current->sighand->siglock);
359out: 349out:
360 mutex_unlock(&nlmsvc_mutex); 350 mutex_unlock(&nlmsvc_mutex);
361} 351}
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index fe9bdb4a220c..1f122c1940af 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -29,6 +29,7 @@
29#include <linux/sunrpc/svc.h> 29#include <linux/sunrpc/svc.h>
30#include <linux/lockd/nlm.h> 30#include <linux/lockd/nlm.h>
31#include <linux/lockd/lockd.h> 31#include <linux/lockd/lockd.h>
32#include <linux/kthread.h>
32 33
33#define NLMDBG_FACILITY NLMDBG_SVCLOCK 34#define NLMDBG_FACILITY NLMDBG_SVCLOCK
34 35
@@ -226,8 +227,7 @@ failed:
226} 227}
227 228
228/* 229/*
229 * Delete a block. If the lock was cancelled or the grant callback 230 * Delete a block.
230 * failed, unlock is set to 1.
231 * It is the caller's responsibility to check whether the file 231 * It is the caller's responsibility to check whether the file
232 * can be closed hereafter. 232 * can be closed hereafter.
233 */ 233 */
@@ -887,7 +887,7 @@ nlmsvc_retry_blocked(void)
887 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 887 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
888 struct nlm_block *block; 888 struct nlm_block *block;
889 889
890 while (!list_empty(&nlm_blocked)) { 890 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
891 block = list_entry(nlm_blocked.next, struct nlm_block, b_list); 891 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
892 892
893 if (block->b_when == NLM_NEVER) 893 if (block->b_when == NLM_NEVER)
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
index 068886de4dda..b0ae07008700 100644
--- a/fs/lockd/svcshare.c
+++ b/fs/lockd/svcshare.c
@@ -71,7 +71,8 @@ nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
71 struct nlm_share *share, **shpp; 71 struct nlm_share *share, **shpp;
72 struct xdr_netobj *oh = &argp->lock.oh; 72 struct xdr_netobj *oh = &argp->lock.oh;
73 73
74 for (shpp = &file->f_shares; (share = *shpp) != 0; shpp = &share->s_next) { 74 for (shpp = &file->f_shares; (share = *shpp) != NULL;
75 shpp = &share->s_next) {
75 if (share->s_host == host && nlm_cmp_owner(share, oh)) { 76 if (share->s_host == host && nlm_cmp_owner(share, oh)) {
76 *shpp = share->s_next; 77 *shpp = share->s_next;
77 kfree(share); 78 kfree(share);