diff options
-rw-r--r-- | fs/lockd/clntlock.c | 39 | ||||
-rw-r--r-- | fs/lockd/clntproc.c | 14 | ||||
-rw-r--r-- | fs/lockd/host.c | 1 | ||||
-rw-r--r-- | include/linux/lockd/lockd.h | 1 |
4 files changed, 40 insertions, 15 deletions
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index bce744468708..52774feab93f 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c | |||
@@ -147,11 +147,10 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) | |||
147 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, | 147 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, |
148 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. | 148 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. |
149 | */ | 149 | */ |
150 | static inline | 150 | static void nlmclnt_prepare_reclaim(struct nlm_host *host) |
151 | void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | ||
152 | { | 151 | { |
152 | down_write(&host->h_rwsem); | ||
153 | host->h_monitored = 0; | 153 | host->h_monitored = 0; |
154 | host->h_nsmstate = newstate; | ||
155 | host->h_state++; | 154 | host->h_state++; |
156 | host->h_nextrebind = 0; | 155 | host->h_nextrebind = 0; |
157 | nlm_rebind_host(host); | 156 | nlm_rebind_host(host); |
@@ -164,6 +163,13 @@ void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | |||
164 | dprintk("NLM: reclaiming locks for host %s", host->h_name); | 163 | dprintk("NLM: reclaiming locks for host %s", host->h_name); |
165 | } | 164 | } |
166 | 165 | ||
166 | static void nlmclnt_finish_reclaim(struct nlm_host *host) | ||
167 | { | ||
168 | host->h_reclaiming = 0; | ||
169 | up_write(&host->h_rwsem); | ||
170 | dprintk("NLM: done reclaiming locks for host %s", host->h_name); | ||
171 | } | ||
172 | |||
167 | /* | 173 | /* |
168 | * Reclaim all locks on server host. We do this by spawning a separate | 174 | * Reclaim all locks on server host. We do this by spawning a separate |
169 | * reclaimer thread. | 175 | * reclaimer thread. |
@@ -171,12 +177,10 @@ void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | |||
171 | void | 177 | void |
172 | nlmclnt_recovery(struct nlm_host *host, u32 newstate) | 178 | nlmclnt_recovery(struct nlm_host *host, u32 newstate) |
173 | { | 179 | { |
174 | if (host->h_reclaiming++) { | 180 | if (host->h_nsmstate == newstate) |
175 | if (host->h_nsmstate == newstate) | 181 | return; |
176 | return; | 182 | host->h_nsmstate = newstate; |
177 | nlmclnt_prepare_reclaim(host, newstate); | 183 | if (!host->h_reclaiming++) { |
178 | } else { | ||
179 | nlmclnt_prepare_reclaim(host, newstate); | ||
180 | nlm_get_host(host); | 184 | nlm_get_host(host); |
181 | __module_get(THIS_MODULE); | 185 | __module_get(THIS_MODULE); |
182 | if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) | 186 | if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) |
@@ -190,6 +194,7 @@ reclaimer(void *ptr) | |||
190 | struct nlm_host *host = (struct nlm_host *) ptr; | 194 | struct nlm_host *host = (struct nlm_host *) ptr; |
191 | struct nlm_wait *block; | 195 | struct nlm_wait *block; |
192 | struct file_lock *fl, *next; | 196 | struct file_lock *fl, *next; |
197 | u32 nsmstate; | ||
193 | 198 | ||
194 | daemonize("%s-reclaim", host->h_name); | 199 | daemonize("%s-reclaim", host->h_name); |
195 | allow_signal(SIGKILL); | 200 | allow_signal(SIGKILL); |
@@ -199,19 +204,25 @@ reclaimer(void *ptr) | |||
199 | lock_kernel(); | 204 | lock_kernel(); |
200 | lockd_up(); | 205 | lockd_up(); |
201 | 206 | ||
207 | nlmclnt_prepare_reclaim(host); | ||
202 | /* First, reclaim all locks that have been marked. */ | 208 | /* First, reclaim all locks that have been marked. */ |
203 | restart: | 209 | restart: |
210 | nsmstate = host->h_nsmstate; | ||
204 | list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { | 211 | list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { |
205 | list_del_init(&fl->fl_u.nfs_fl.list); | 212 | list_del_init(&fl->fl_u.nfs_fl.list); |
206 | 213 | ||
207 | if (signalled()) | 214 | if (signalled()) |
208 | continue; | 215 | continue; |
209 | if (nlmclnt_reclaim(host, fl) == 0) | 216 | if (nlmclnt_reclaim(host, fl) != 0) |
210 | list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); | 217 | continue; |
211 | goto restart; | 218 | list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); |
219 | if (host->h_nsmstate != nsmstate) { | ||
220 | /* Argh! The server rebooted again! */ | ||
221 | list_splice_init(&host->h_granted, &host->h_reclaim); | ||
222 | goto restart; | ||
223 | } | ||
212 | } | 224 | } |
213 | 225 | nlmclnt_finish_reclaim(host); | |
214 | host->h_reclaiming = 0; | ||
215 | 226 | ||
216 | /* Now, wake up all processes that sleep on a blocked lock */ | 227 | /* Now, wake up all processes that sleep on a blocked lock */ |
217 | list_for_each_entry(block, &nlm_blocked, b_list) { | 228 | list_for_each_entry(block, &nlm_blocked, b_list) { |
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index f96e38155b5c..4db62098d3f4 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -508,7 +508,10 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) | |||
508 | } | 508 | } |
509 | 509 | ||
510 | block = nlmclnt_prepare_block(host, fl); | 510 | block = nlmclnt_prepare_block(host, fl); |
511 | again: | ||
511 | for(;;) { | 512 | for(;;) { |
513 | /* Reboot protection */ | ||
514 | fl->fl_u.nfs_fl.state = host->h_state; | ||
512 | status = nlmclnt_call(req, NLMPROC_LOCK); | 515 | status = nlmclnt_call(req, NLMPROC_LOCK); |
513 | if (status < 0) | 516 | if (status < 0) |
514 | goto out_unblock; | 517 | goto out_unblock; |
@@ -531,10 +534,16 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) | |||
531 | } | 534 | } |
532 | 535 | ||
533 | if (resp->status == NLM_LCK_GRANTED) { | 536 | if (resp->status == NLM_LCK_GRANTED) { |
534 | fl->fl_u.nfs_fl.state = host->h_state; | 537 | down_read(&host->h_rwsem); |
538 | /* Check whether or not the server has rebooted */ | ||
539 | if (fl->fl_u.nfs_fl.state != host->h_state) { | ||
540 | up_read(&host->h_rwsem); | ||
541 | goto again; | ||
542 | } | ||
535 | fl->fl_flags |= FL_SLEEP; | 543 | fl->fl_flags |= FL_SLEEP; |
536 | /* Ensure the resulting lock will get added to granted list */ | 544 | /* Ensure the resulting lock will get added to granted list */ |
537 | do_vfs_lock(fl); | 545 | do_vfs_lock(fl); |
546 | up_read(&host->h_rwsem); | ||
538 | } | 547 | } |
539 | status = nlm_stat_to_errno(resp->status); | 548 | status = nlm_stat_to_errno(resp->status); |
540 | out_unblock: | 549 | out_unblock: |
@@ -596,6 +605,7 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) | |||
596 | static int | 605 | static int |
597 | nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | 606 | nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) |
598 | { | 607 | { |
608 | struct nlm_host *host = req->a_host; | ||
599 | struct nlm_res *resp = &req->a_res; | 609 | struct nlm_res *resp = &req->a_res; |
600 | int status; | 610 | int status; |
601 | 611 | ||
@@ -604,7 +614,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |||
604 | * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either | 614 | * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either |
605 | * case, we want to unlock. | 615 | * case, we want to unlock. |
606 | */ | 616 | */ |
617 | down_read(&host->h_rwsem); | ||
607 | do_vfs_lock(fl); | 618 | do_vfs_lock(fl); |
619 | up_read(&host->h_rwsem); | ||
608 | 620 | ||
609 | if (req->a_flags & RPC_TASK_ASYNC) | 621 | if (req->a_flags & RPC_TASK_ASYNC) |
610 | return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); | 622 | return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); |
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 5242743c9403..38b0e8a1aec0 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -117,6 +117,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
117 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | 117 | host->h_expires = jiffies + NLM_HOST_EXPIRE; |
118 | atomic_set(&host->h_count, 1); | 118 | atomic_set(&host->h_count, 1); |
119 | init_waitqueue_head(&host->h_gracewait); | 119 | init_waitqueue_head(&host->h_gracewait); |
120 | init_rwsem(&host->h_rwsem); | ||
120 | host->h_state = 0; /* pseudo NSM state */ | 121 | host->h_state = 0; /* pseudo NSM state */ |
121 | host->h_nsmstate = 0; /* real NSM state */ | 122 | host->h_nsmstate = 0; /* real NSM state */ |
122 | host->h_server = server; | 123 | host->h_server = server; |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index a6c1a33e5ae3..6b2684763fc7 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -50,6 +50,7 @@ struct nlm_host { | |||
50 | h_killed : 1, | 50 | h_killed : 1, |
51 | h_monitored : 1; | 51 | h_monitored : 1; |
52 | wait_queue_head_t h_gracewait; /* wait while reclaiming */ | 52 | wait_queue_head_t h_gracewait; /* wait while reclaiming */ |
53 | struct rw_semaphore h_rwsem; /* Reboot recovery lock */ | ||
53 | u32 h_state; /* pseudo-state counter */ | 54 | u32 h_state; /* pseudo-state counter */ |
54 | u32 h_nsmstate; /* true remote NSM state */ | 55 | u32 h_nsmstate; /* true remote NSM state */ |
55 | u32 h_pidcount; /* Pseudopids */ | 56 | u32 h_pidcount; /* Pseudopids */ |