aboutsummaryrefslogtreecommitdiffstats
path: root/fs/lockd
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-09 09:40:27 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-09 09:40:27 -0400
commit28df955a2ad484d602314b30183ea8496a9aa34a (patch)
treec62632b2a0a49df114283f10764244c1b1b5f506 /fs/lockd
parent5046791417dcac1ba126b77b8062af15a2f0b8e1 (diff)
NLM: Fix reclaim races
Currently it is possible for a task to remove its locks at the same time as the NLM recovery thread is trying to recover them. This quickly leads to an Oops. Protect the locks using an rw semaphore while they are being recovered. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/lockd')
-rw-r--r--fs/lockd/clntlock.c39
-rw-r--r--fs/lockd/clntproc.c14
-rw-r--r--fs/lockd/host.c1
3 files changed, 39 insertions, 15 deletions
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index bce744468708..52774feab93f 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -147,11 +147,10 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock)
147 * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, 147 * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
148 * that we mark locks for reclaiming, and that we bump the pseudo NSM state. 148 * that we mark locks for reclaiming, and that we bump the pseudo NSM state.
149 */ 149 */
150static inline 150static void nlmclnt_prepare_reclaim(struct nlm_host *host)
151void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate)
152{ 151{
152 down_write(&host->h_rwsem);
153 host->h_monitored = 0; 153 host->h_monitored = 0;
154 host->h_nsmstate = newstate;
155 host->h_state++; 154 host->h_state++;
156 host->h_nextrebind = 0; 155 host->h_nextrebind = 0;
157 nlm_rebind_host(host); 156 nlm_rebind_host(host);
@@ -164,6 +163,13 @@ void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate)
164 dprintk("NLM: reclaiming locks for host %s", host->h_name); 163 dprintk("NLM: reclaiming locks for host %s", host->h_name);
165} 164}
166 165
166static void nlmclnt_finish_reclaim(struct nlm_host *host)
167{
168 host->h_reclaiming = 0;
169 up_write(&host->h_rwsem);
170 dprintk("NLM: done reclaiming locks for host %s", host->h_name);
171}
172
167/* 173/*
168 * Reclaim all locks on server host. We do this by spawning a separate 174 * Reclaim all locks on server host. We do this by spawning a separate
169 * reclaimer thread. 175 * reclaimer thread.
@@ -171,12 +177,10 @@ void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate)
171void 177void
172nlmclnt_recovery(struct nlm_host *host, u32 newstate) 178nlmclnt_recovery(struct nlm_host *host, u32 newstate)
173{ 179{
174 if (host->h_reclaiming++) { 180 if (host->h_nsmstate == newstate)
175 if (host->h_nsmstate == newstate) 181 return;
176 return; 182 host->h_nsmstate = newstate;
177 nlmclnt_prepare_reclaim(host, newstate); 183 if (!host->h_reclaiming++) {
178 } else {
179 nlmclnt_prepare_reclaim(host, newstate);
180 nlm_get_host(host); 184 nlm_get_host(host);
181 __module_get(THIS_MODULE); 185 __module_get(THIS_MODULE);
182 if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) 186 if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0)
@@ -190,6 +194,7 @@ reclaimer(void *ptr)
190 struct nlm_host *host = (struct nlm_host *) ptr; 194 struct nlm_host *host = (struct nlm_host *) ptr;
191 struct nlm_wait *block; 195 struct nlm_wait *block;
192 struct file_lock *fl, *next; 196 struct file_lock *fl, *next;
197 u32 nsmstate;
193 198
194 daemonize("%s-reclaim", host->h_name); 199 daemonize("%s-reclaim", host->h_name);
195 allow_signal(SIGKILL); 200 allow_signal(SIGKILL);
@@ -199,19 +204,25 @@ reclaimer(void *ptr)
199 lock_kernel(); 204 lock_kernel();
200 lockd_up(); 205 lockd_up();
201 206
207 nlmclnt_prepare_reclaim(host);
202 /* First, reclaim all locks that have been marked. */ 208 /* First, reclaim all locks that have been marked. */
203restart: 209restart:
210 nsmstate = host->h_nsmstate;
204 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { 211 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
205 list_del_init(&fl->fl_u.nfs_fl.list); 212 list_del_init(&fl->fl_u.nfs_fl.list);
206 213
207 if (signalled()) 214 if (signalled())
208 continue; 215 continue;
209 if (nlmclnt_reclaim(host, fl) == 0) 216 if (nlmclnt_reclaim(host, fl) != 0)
210 list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); 217 continue;
211 goto restart; 218 list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
219 if (host->h_nsmstate != nsmstate) {
220 /* Argh! The server rebooted again! */
221 list_splice_init(&host->h_granted, &host->h_reclaim);
222 goto restart;
223 }
212 } 224 }
213 225 nlmclnt_finish_reclaim(host);
214 host->h_reclaiming = 0;
215 226
216 /* Now, wake up all processes that sleep on a blocked lock */ 227 /* Now, wake up all processes that sleep on a blocked lock */
217 list_for_each_entry(block, &nlm_blocked, b_list) { 228 list_for_each_entry(block, &nlm_blocked, b_list) {
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index f96e38155b5c..4db62098d3f4 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -508,7 +508,10 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
508 } 508 }
509 509
510 block = nlmclnt_prepare_block(host, fl); 510 block = nlmclnt_prepare_block(host, fl);
511again:
511 for(;;) { 512 for(;;) {
513 /* Reboot protection */
514 fl->fl_u.nfs_fl.state = host->h_state;
512 status = nlmclnt_call(req, NLMPROC_LOCK); 515 status = nlmclnt_call(req, NLMPROC_LOCK);
513 if (status < 0) 516 if (status < 0)
514 goto out_unblock; 517 goto out_unblock;
@@ -531,10 +534,16 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
531 } 534 }
532 535
533 if (resp->status == NLM_LCK_GRANTED) { 536 if (resp->status == NLM_LCK_GRANTED) {
534 fl->fl_u.nfs_fl.state = host->h_state; 537 down_read(&host->h_rwsem);
538 /* Check whether or not the server has rebooted */
539 if (fl->fl_u.nfs_fl.state != host->h_state) {
540 up_read(&host->h_rwsem);
541 goto again;
542 }
535 fl->fl_flags |= FL_SLEEP; 543 fl->fl_flags |= FL_SLEEP;
536 /* Ensure the resulting lock will get added to granted list */ 544 /* Ensure the resulting lock will get added to granted list */
537 do_vfs_lock(fl); 545 do_vfs_lock(fl);
546 up_read(&host->h_rwsem);
538 } 547 }
539 status = nlm_stat_to_errno(resp->status); 548 status = nlm_stat_to_errno(resp->status);
540out_unblock: 549out_unblock:
@@ -596,6 +605,7 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
596static int 605static int
597nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) 606nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
598{ 607{
608 struct nlm_host *host = req->a_host;
599 struct nlm_res *resp = &req->a_res; 609 struct nlm_res *resp = &req->a_res;
600 int status; 610 int status;
601 611
@@ -604,7 +614,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
604 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either 614 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
605 * case, we want to unlock. 615 * case, we want to unlock.
606 */ 616 */
617 down_read(&host->h_rwsem);
607 do_vfs_lock(fl); 618 do_vfs_lock(fl);
619 up_read(&host->h_rwsem);
608 620
609 if (req->a_flags & RPC_TASK_ASYNC) 621 if (req->a_flags & RPC_TASK_ASYNC)
610 return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); 622 return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 5242743c9403..38b0e8a1aec0 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -117,6 +117,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
117 host->h_expires = jiffies + NLM_HOST_EXPIRE; 117 host->h_expires = jiffies + NLM_HOST_EXPIRE;
118 atomic_set(&host->h_count, 1); 118 atomic_set(&host->h_count, 1);
119 init_waitqueue_head(&host->h_gracewait); 119 init_waitqueue_head(&host->h_gracewait);
120 init_rwsem(&host->h_rwsem);
120 host->h_state = 0; /* pseudo NSM state */ 121 host->h_state = 0; /* pseudo NSM state */
121 host->h_nsmstate = 0; /* real NSM state */ 122 host->h_nsmstate = 0; /* real NSM state */
122 host->h_server = server; 123 host->h_server = server;