diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-03-28 16:04:36 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-04-19 16:53:39 -0400 |
commit | dc9d8d048168ff61c458bec06b28996cb90b182a (patch) | |
tree | f8160237a79f96837696bc7dfa91efc96e9b84ed /fs/lockd | |
parent | 5e7f37a76fa5b604949020b7317962262812b2dd (diff) |
NLM/lockd: convert __nlm_async_call to use rpc_run_task()
Peter Staubach comments:
> In the course of investigating testing failures in the locking phase of
> the Connectathon testsuite, I discovered a couple of things. One was
> that one of the tests in the locking tests was racy when it didn't seem
> to need to be and two, that the NFS client asynchronously releases locks
> when a process is exiting.
...
> The Single UNIX Specification Version 3 specifies that: "All locks
> associated with a file for a given process shall be removed when a file
> descriptor for that file is closed by that process or the process holding
> that file descriptor terminates.".
>
> This does not specify whether those locks must be released prior to the
> completion of the exit processing for the process or not. However,
> general assumptions seem to be that those locks will be released. This
> leads to more deterministic behavior under normal circumstances.
The following patch converts the NFSv2/v3 locking code to use the same
mechanism as NFSv4 for sending asynchronous RPC calls and then waiting for
them to complete. This ensures that the UNLOCK and CANCEL RPC calls will
complete even if the user interrupts the call, yet satisfies the
above request for synchronous behaviour on process exit.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/lockd')
-rw-r--r-- | fs/lockd/clntproc.c | 64 |
1 files changed, 54 insertions, 10 deletions
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 749eb5328cb0..a34b709006a1 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -346,10 +346,16 @@ in_grace_period: | |||
346 | /* | 346 | /* |
347 | * Generic NLM call, async version. | 347 | * Generic NLM call, async version. |
348 | */ | 348 | */ |
349 | static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) | 349 | static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) |
350 | { | 350 | { |
351 | struct nlm_host *host = req->a_host; | 351 | struct nlm_host *host = req->a_host; |
352 | struct rpc_clnt *clnt; | 352 | struct rpc_clnt *clnt; |
353 | struct rpc_task_setup task_setup_data = { | ||
354 | .rpc_message = msg, | ||
355 | .callback_ops = tk_ops, | ||
356 | .callback_data = req, | ||
357 | .flags = RPC_TASK_ASYNC, | ||
358 | }; | ||
353 | 359 | ||
354 | dprintk("lockd: call procedure %d on %s (async)\n", | 360 | dprintk("lockd: call procedure %d on %s (async)\n", |
355 | (int)proc, host->h_name); | 361 | (int)proc, host->h_name); |
@@ -359,21 +365,36 @@ static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message * | |||
359 | if (clnt == NULL) | 365 | if (clnt == NULL) |
360 | goto out_err; | 366 | goto out_err; |
361 | msg->rpc_proc = &clnt->cl_procinfo[proc]; | 367 | msg->rpc_proc = &clnt->cl_procinfo[proc]; |
368 | task_setup_data.rpc_client = clnt; | ||
362 | 369 | ||
363 | /* bootstrap and kick off the async RPC call */ | 370 | /* bootstrap and kick off the async RPC call */ |
364 | return rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req); | 371 | return rpc_run_task(&task_setup_data); |
365 | out_err: | 372 | out_err: |
366 | tk_ops->rpc_release(req); | 373 | tk_ops->rpc_release(req); |
367 | return -ENOLCK; | 374 | return ERR_PTR(-ENOLCK); |
375 | } | ||
376 | |||
377 | static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) | ||
378 | { | ||
379 | struct rpc_task *task; | ||
380 | |||
381 | task = __nlm_async_call(req, proc, msg, tk_ops); | ||
382 | if (IS_ERR(task)) | ||
383 | return PTR_ERR(task); | ||
384 | rpc_put_task(task); | ||
385 | return 0; | ||
368 | } | 386 | } |
369 | 387 | ||
388 | /* | ||
389 | * NLM asynchronous call. | ||
390 | */ | ||
370 | int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) | 391 | int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) |
371 | { | 392 | { |
372 | struct rpc_message msg = { | 393 | struct rpc_message msg = { |
373 | .rpc_argp = &req->a_args, | 394 | .rpc_argp = &req->a_args, |
374 | .rpc_resp = &req->a_res, | 395 | .rpc_resp = &req->a_res, |
375 | }; | 396 | }; |
376 | return __nlm_async_call(req, proc, &msg, tk_ops); | 397 | return nlm_do_async_call(req, proc, &msg, tk_ops); |
377 | } | 398 | } |
378 | 399 | ||
379 | int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) | 400 | int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) |
@@ -381,7 +402,32 @@ int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *t | |||
381 | struct rpc_message msg = { | 402 | struct rpc_message msg = { |
382 | .rpc_argp = &req->a_res, | 403 | .rpc_argp = &req->a_res, |
383 | }; | 404 | }; |
384 | return __nlm_async_call(req, proc, &msg, tk_ops); | 405 | return nlm_do_async_call(req, proc, &msg, tk_ops); |
406 | } | ||
407 | |||
408 | /* | ||
409 | * NLM client asynchronous call. | ||
410 | * | ||
411 | * Note that although the calls are asynchronous, and are therefore | ||
412 | * guaranteed to complete, we still always attempt to wait for | ||
413 | * completion in order to be able to correctly track the lock | ||
414 | * state. | ||
415 | */ | ||
416 | static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) | ||
417 | { | ||
418 | struct rpc_message msg = { | ||
419 | .rpc_argp = &req->a_args, | ||
420 | .rpc_resp = &req->a_res, | ||
421 | }; | ||
422 | struct rpc_task *task; | ||
423 | int err; | ||
424 | |||
425 | task = __nlm_async_call(req, proc, &msg, tk_ops); | ||
426 | if (IS_ERR(task)) | ||
427 | return PTR_ERR(task); | ||
428 | err = rpc_wait_for_completion_task(task); | ||
429 | rpc_put_task(task); | ||
430 | return err; | ||
385 | } | 431 | } |
386 | 432 | ||
387 | /* | 433 | /* |
@@ -620,10 +666,8 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |||
620 | goto out; | 666 | goto out; |
621 | } | 667 | } |
622 | 668 | ||
623 | if (req->a_flags & RPC_TASK_ASYNC) | 669 | atomic_inc(&req->a_count); |
624 | return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); | 670 | status = nlmclnt_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); |
625 | |||
626 | status = nlmclnt_call(req, NLMPROC_UNLOCK); | ||
627 | if (status < 0) | 671 | if (status < 0) |
628 | goto out; | 672 | goto out; |
629 | 673 | ||
@@ -697,7 +741,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl | |||
697 | nlmclnt_setlockargs(req, fl); | 741 | nlmclnt_setlockargs(req, fl); |
698 | req->a_args.block = block; | 742 | req->a_args.block = block; |
699 | 743 | ||
700 | status = nlm_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); | 744 | status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); |
701 | 745 | ||
702 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 746 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
703 | current->blocked = oldset; | 747 | current->blocked = oldset; |