aboutsummaryrefslogtreecommitdiffstats
path: root/fs/lockd/clntproc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/lockd/clntproc.c')
-rw-r--r--fs/lockd/clntproc.c195
1 files changed, 74 insertions, 121 deletions
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 7a239864b8bf..3f8ad7c54efa 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -152,9 +152,8 @@ static void nlmclnt_release_lockargs(struct nlm_rqst *req)
152int 152int
153nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) 153nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
154{ 154{
155 struct nfs_server *nfssrv = NFS_SERVER(inode);
156 struct nlm_host *host; 155 struct nlm_host *host;
157 struct nlm_rqst reqst, *call = &reqst; 156 struct nlm_rqst *call;
158 sigset_t oldset; 157 sigset_t oldset;
159 unsigned long flags; 158 unsigned long flags;
160 int status, proto, vers; 159 int status, proto, vers;
@@ -168,23 +167,17 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
168 /* Retrieve transport protocol from NFS client */ 167 /* Retrieve transport protocol from NFS client */
169 proto = NFS_CLIENT(inode)->cl_xprt->prot; 168 proto = NFS_CLIENT(inode)->cl_xprt->prot;
170 169
171 if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers))) 170 host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers);
171 if (host == NULL)
172 return -ENOLCK; 172 return -ENOLCK;
173 173
174 /* Create RPC client handle if not there, and copy soft 174 call = nlm_alloc_call(host);
175 * and intr flags from NFS client. */ 175 if (call == NULL)
176 if (host->h_rpcclnt == NULL) { 176 return -ENOMEM;
177 struct rpc_clnt *clnt;
178 177
179 /* Bind an rpc client to this host handle (does not 178 nlmclnt_locks_init_private(fl, host);
180 * perform a portmapper lookup) */ 179 /* Set up the argument struct */
181 if (!(clnt = nlm_bind_host(host))) { 180 nlmclnt_setlockargs(call, fl);
182 status = -ENOLCK;
183 goto done;
184 }
185 clnt->cl_softrtry = nfssrv->client->cl_softrtry;
186 clnt->cl_intr = nfssrv->client->cl_intr;
187 }
188 181
189 /* Keep the old signal mask */ 182 /* Keep the old signal mask */
190 spin_lock_irqsave(&current->sighand->siglock, flags); 183 spin_lock_irqsave(&current->sighand->siglock, flags);
@@ -197,26 +190,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
197 && (current->flags & PF_EXITING)) { 190 && (current->flags & PF_EXITING)) {
198 sigfillset(&current->blocked); /* Mask all signals */ 191 sigfillset(&current->blocked); /* Mask all signals */
199 recalc_sigpending(); 192 recalc_sigpending();
200 spin_unlock_irqrestore(&current->sighand->siglock, flags);
201 193
202 call = nlmclnt_alloc_call();
203 if (!call) {
204 status = -ENOMEM;
205 goto out_restore;
206 }
207 call->a_flags = RPC_TASK_ASYNC; 194 call->a_flags = RPC_TASK_ASYNC;
208 } else {
209 spin_unlock_irqrestore(&current->sighand->siglock, flags);
210 memset(call, 0, sizeof(*call));
211 locks_init_lock(&call->a_args.lock.fl);
212 locks_init_lock(&call->a_res.lock.fl);
213 } 195 }
214 call->a_host = host; 196 spin_unlock_irqrestore(&current->sighand->siglock, flags);
215
216 nlmclnt_locks_init_private(fl, host);
217
218 /* Set up the argument struct */
219 nlmclnt_setlockargs(call, fl);
220 197
221 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { 198 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
222 if (fl->fl_type != F_UNLCK) { 199 if (fl->fl_type != F_UNLCK) {
@@ -229,24 +206,26 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
229 else 206 else
230 status = -EINVAL; 207 status = -EINVAL;
231 208
232 out_restore: 209 fl->fl_ops->fl_release_private(fl);
210 fl->fl_ops = NULL;
211
233 spin_lock_irqsave(&current->sighand->siglock, flags); 212 spin_lock_irqsave(&current->sighand->siglock, flags);
234 current->blocked = oldset; 213 current->blocked = oldset;
235 recalc_sigpending(); 214 recalc_sigpending();
236 spin_unlock_irqrestore(&current->sighand->siglock, flags); 215 spin_unlock_irqrestore(&current->sighand->siglock, flags);
237 216
238done:
239 dprintk("lockd: clnt proc returns %d\n", status); 217 dprintk("lockd: clnt proc returns %d\n", status);
240 nlm_release_host(host);
241 return status; 218 return status;
242} 219}
243EXPORT_SYMBOL(nlmclnt_proc); 220EXPORT_SYMBOL(nlmclnt_proc);
244 221
245/* 222/*
246 * Allocate an NLM RPC call struct 223 * Allocate an NLM RPC call struct
224 *
225 * Note: the caller must hold a reference to host. In case of failure,
226 * this reference will be released.
247 */ 227 */
248struct nlm_rqst * 228struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
249nlmclnt_alloc_call(void)
250{ 229{
251 struct nlm_rqst *call; 230 struct nlm_rqst *call;
252 231
@@ -255,16 +234,30 @@ nlmclnt_alloc_call(void)
255 if (call != NULL) { 234 if (call != NULL) {
256 locks_init_lock(&call->a_args.lock.fl); 235 locks_init_lock(&call->a_args.lock.fl);
257 locks_init_lock(&call->a_res.lock.fl); 236 locks_init_lock(&call->a_res.lock.fl);
237 call->a_host = host;
258 return call; 238 return call;
259 } 239 }
260 if (signalled()) 240 if (signalled())
261 break; 241 break;
262 printk("nlmclnt_alloc_call: failed, waiting for memory\n"); 242 printk("nlm_alloc_call: failed, waiting for memory\n");
263 schedule_timeout_interruptible(5*HZ); 243 schedule_timeout_interruptible(5*HZ);
264 } 244 }
245 nlm_release_host(host);
265 return NULL; 246 return NULL;
266} 247}
267 248
249void nlm_release_call(struct nlm_rqst *call)
250{
251 nlm_release_host(call->a_host);
252 nlmclnt_release_lockargs(call);
253 kfree(call);
254}
255
256static void nlmclnt_rpc_release(void *data)
257{
258 return nlm_release_call(data);
259}
260
268static int nlm_wait_on_grace(wait_queue_head_t *queue) 261static int nlm_wait_on_grace(wait_queue_head_t *queue)
269{ 262{
270 DEFINE_WAIT(wait); 263 DEFINE_WAIT(wait);
@@ -361,7 +354,7 @@ in_grace_period:
361/* 354/*
362 * Generic NLM call, async version. 355 * Generic NLM call, async version.
363 */ 356 */
364int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) 357int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
365{ 358{
366 struct nlm_host *host = req->a_host; 359 struct nlm_host *host = req->a_host;
367 struct rpc_clnt *clnt; 360 struct rpc_clnt *clnt;
@@ -369,48 +362,23 @@ int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops
369 .rpc_argp = &req->a_args, 362 .rpc_argp = &req->a_args,
370 .rpc_resp = &req->a_res, 363 .rpc_resp = &req->a_res,
371 }; 364 };
372 int status; 365 int status = -ENOLCK;
373
374 dprintk("lockd: call procedure %d on %s (async)\n",
375 (int)proc, host->h_name);
376
377 /* If we have no RPC client yet, create one. */
378 if ((clnt = nlm_bind_host(host)) == NULL)
379 return -ENOLCK;
380 msg.rpc_proc = &clnt->cl_procinfo[proc];
381
382 /* bootstrap and kick off the async RPC call */
383 status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
384
385 return status;
386}
387
388static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
389{
390 struct nlm_host *host = req->a_host;
391 struct rpc_clnt *clnt;
392 struct nlm_args *argp = &req->a_args;
393 struct nlm_res *resp = &req->a_res;
394 struct rpc_message msg = {
395 .rpc_argp = argp,
396 .rpc_resp = resp,
397 };
398 int status;
399 366
400 dprintk("lockd: call procedure %d on %s (async)\n", 367 dprintk("lockd: call procedure %d on %s (async)\n",
401 (int)proc, host->h_name); 368 (int)proc, host->h_name);
402 369
403 /* If we have no RPC client yet, create one. */ 370 /* If we have no RPC client yet, create one. */
404 if ((clnt = nlm_bind_host(host)) == NULL) 371 clnt = nlm_bind_host(host);
405 return -ENOLCK; 372 if (clnt == NULL)
373 goto out_err;
406 msg.rpc_proc = &clnt->cl_procinfo[proc]; 374 msg.rpc_proc = &clnt->cl_procinfo[proc];
407 375
408 /* Increment host refcount */
409 nlm_get_host(host);
410 /* bootstrap and kick off the async RPC call */ 376 /* bootstrap and kick off the async RPC call */
411 status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req); 377 status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
412 if (status < 0) 378 if (status == 0)
413 nlm_release_host(host); 379 return 0;
380out_err:
381 nlm_release_call(req);
414 return status; 382 return status;
415} 383}
416 384
@@ -423,26 +391,28 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
423 int status; 391 int status;
424 392
425 status = nlmclnt_call(req, NLMPROC_TEST); 393 status = nlmclnt_call(req, NLMPROC_TEST);
426 nlmclnt_release_lockargs(req);
427 if (status < 0) 394 if (status < 0)
428 return status; 395 goto out;
429 396
430 status = req->a_res.status; 397 switch (req->a_res.status) {
431 if (status == NLM_LCK_GRANTED) { 398 case NLM_LCK_GRANTED:
432 fl->fl_type = F_UNLCK; 399 fl->fl_type = F_UNLCK;
433 } if (status == NLM_LCK_DENIED) { 400 break;
434 /* 401 case NLM_LCK_DENIED:
435 * Report the conflicting lock back to the application. 402 /*
436 */ 403 * Report the conflicting lock back to the application.
437 fl->fl_start = req->a_res.lock.fl.fl_start; 404 */
438 fl->fl_end = req->a_res.lock.fl.fl_start; 405 fl->fl_start = req->a_res.lock.fl.fl_start;
439 fl->fl_type = req->a_res.lock.fl.fl_type; 406 fl->fl_end = req->a_res.lock.fl.fl_start;
440 fl->fl_pid = 0; 407 fl->fl_type = req->a_res.lock.fl.fl_type;
441 } else { 408 fl->fl_pid = 0;
442 return nlm_stat_to_errno(req->a_res.status); 409 break;
410 default:
411 status = nlm_stat_to_errno(req->a_res.status);
443 } 412 }
444 413out:
445 return 0; 414 nlm_release_call(req);
415 return status;
446} 416}
447 417
448static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) 418static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
@@ -560,7 +530,7 @@ out_unblock:
560 if (resp->status == NLM_LCK_BLOCKED) 530 if (resp->status == NLM_LCK_BLOCKED)
561 nlmclnt_cancel(host, req->a_args.block, fl); 531 nlmclnt_cancel(host, req->a_args.block, fl);
562out: 532out:
563 nlmclnt_release_lockargs(req); 533 nlm_release_call(req);
564 return status; 534 return status;
565} 535}
566 536
@@ -623,32 +593,24 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
623 */ 593 */
624 do_vfs_lock(fl); 594 do_vfs_lock(fl);
625 595
626 if (req->a_flags & RPC_TASK_ASYNC) { 596 if (req->a_flags & RPC_TASK_ASYNC)
627 status = nlmclnt_async_call(req, NLMPROC_UNLOCK, 597 return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
628 &nlmclnt_unlock_ops);
629 /* Hrmf... Do the unlock early since locks_remove_posix()
630 * really expects us to free the lock synchronously */
631 if (status < 0) {
632 nlmclnt_release_lockargs(req);
633 kfree(req);
634 }
635 return status;
636 }
637 598
638 status = nlmclnt_call(req, NLMPROC_UNLOCK); 599 status = nlmclnt_call(req, NLMPROC_UNLOCK);
639 nlmclnt_release_lockargs(req);
640 if (status < 0) 600 if (status < 0)
641 return status; 601 goto out;
642 602
603 status = 0;
643 if (resp->status == NLM_LCK_GRANTED) 604 if (resp->status == NLM_LCK_GRANTED)
644 return 0; 605 goto out;
645 606
646 if (resp->status != NLM_LCK_DENIED_NOLOCKS) 607 if (resp->status != NLM_LCK_DENIED_NOLOCKS)
647 printk("lockd: unexpected unlock status: %d\n", resp->status); 608 printk("lockd: unexpected unlock status: %d\n", resp->status);
648
649 /* What to do now? I'm out of my depth... */ 609 /* What to do now? I'm out of my depth... */
650 610 status = -ENOLCK;
651 return -ENOLCK; 611out:
612 nlm_release_call(req);
613 return status;
652} 614}
653 615
654static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) 616static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
@@ -670,9 +632,6 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
670 if (status != NLM_LCK_GRANTED) 632 if (status != NLM_LCK_GRANTED)
671 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); 633 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
672die: 634die:
673 nlm_release_host(req->a_host);
674 nlmclnt_release_lockargs(req);
675 kfree(req);
676 return; 635 return;
677 retry_rebind: 636 retry_rebind:
678 nlm_rebind_host(req->a_host); 637 nlm_rebind_host(req->a_host);
@@ -682,6 +641,7 @@ die:
682 641
683static const struct rpc_call_ops nlmclnt_unlock_ops = { 642static const struct rpc_call_ops nlmclnt_unlock_ops = {
684 .rpc_call_done = nlmclnt_unlock_callback, 643 .rpc_call_done = nlmclnt_unlock_callback,
644 .rpc_release = nlmclnt_rpc_release,
685}; 645};
686 646
687/* 647/*
@@ -703,20 +663,15 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
703 recalc_sigpending(); 663 recalc_sigpending();
704 spin_unlock_irqrestore(&current->sighand->siglock, flags); 664 spin_unlock_irqrestore(&current->sighand->siglock, flags);
705 665
706 req = nlmclnt_alloc_call(); 666 req = nlm_alloc_call(nlm_get_host(host));
707 if (!req) 667 if (!req)
708 return -ENOMEM; 668 return -ENOMEM;
709 req->a_host = host;
710 req->a_flags = RPC_TASK_ASYNC; 669 req->a_flags = RPC_TASK_ASYNC;
711 670
712 nlmclnt_setlockargs(req, fl); 671 nlmclnt_setlockargs(req, fl);
713 req->a_args.block = block; 672 req->a_args.block = block;
714 673
715 status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); 674 status = nlm_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
716 if (status < 0) {
717 nlmclnt_release_lockargs(req);
718 kfree(req);
719 }
720 675
721 spin_lock_irqsave(&current->sighand->siglock, flags); 676 spin_lock_irqsave(&current->sighand->siglock, flags);
722 current->blocked = oldset; 677 current->blocked = oldset;
@@ -757,9 +712,6 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
757 } 712 }
758 713
759die: 714die:
760 nlm_release_host(req->a_host);
761 nlmclnt_release_lockargs(req);
762 kfree(req);
763 return; 715 return;
764 716
765retry_cancel: 717retry_cancel:
@@ -773,6 +725,7 @@ retry_cancel:
773 725
774static const struct rpc_call_ops nlmclnt_cancel_ops = { 726static const struct rpc_call_ops nlmclnt_cancel_ops = {
775 .rpc_call_done = nlmclnt_cancel_callback, 727 .rpc_call_done = nlmclnt_cancel_callback,
728 .rpc_release = nlmclnt_rpc_release,
776}; 729};
777 730
778/* 731/*