aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfs4callback.c
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@citi.umich.edu>2010-03-03 14:52:55 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2010-04-22 11:34:01 -0400
commitb5a1a81e5c25fb6bb3fdc1812ba69ff6ab638fcf (patch)
treec524a75d111f4060eb985161478362ac18c17169 /fs/nfsd/nfs4callback.c
parent3c4ab2aaa90826060b1e8d4036f9bb8325f8759e (diff)
nfsd4: don't sleep in lease-break callback
The NFSv4 server's fl_break callback can sleep (dropping the BKL), in order to allocate a new rpc task to send a recall to the client. As far as I can tell this doesn't cause any races in the current code, but the analysis is difficult. Also, the sleep here may complicate the move away from the BKL. So, just schedule some work to do the job for us instead. The work will later also prove useful for restarting a call after the callback information is changed. Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'fs/nfsd/nfs4callback.c')
-rw-r--r--fs/nfsd/nfs4callback.c54
1 files changed, 52 insertions, 2 deletions
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 91eb2ea9ef0a..e078c747f49d 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/sunrpc/clnt.h> 34#include <linux/sunrpc/clnt.h>
35#include <linux/sunrpc/svc_xprt.h>
35#include "nfsd.h" 36#include "nfsd.h"
36#include "state.h" 37#include "state.h"
37 38
@@ -692,11 +693,41 @@ static const struct rpc_call_ops nfsd4_cb_recall_ops = {
692 .rpc_release = nfsd4_cb_recall_release, 693 .rpc_release = nfsd4_cb_recall_release,
693}; 694};
694 695
696static struct workqueue_struct *callback_wq;
697
698int nfsd4_create_callback_queue(void)
699{
700 callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
701 if (!callback_wq)
702 return -ENOMEM;
703 return 0;
704}
705
706void nfsd4_destroy_callback_queue(void)
707{
708 destroy_workqueue(callback_wq);
709}
710
711void nfsd4_set_callback_client(struct nfs4_client *clp, struct rpc_clnt
712*new)
713{
714 struct rpc_clnt *old = clp->cl_cb_conn.cb_client;
715
716 clp->cl_cb_conn.cb_client = new;
717 /*
718 * After this, any work that saw the old value of cb_client will
719 * be gone:
720 */
721 flush_workqueue(callback_wq);
722 /* So we can safely shut it down: */
723 if (old)
724 rpc_shutdown_client(old);
725}
726
695/* 727/*
696 * called with dp->dl_count inc'ed. 728 * called with dp->dl_count inc'ed.
697 */ 729 */
698void 730static void _nfsd4_cb_recall(struct nfs4_delegation *dp)
699nfsd4_cb_recall(struct nfs4_delegation *dp)
700{ 731{
701 struct nfs4_client *clp = dp->dl_client; 732 struct nfs4_client *clp = dp->dl_client;
702 struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client; 733 struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
@@ -707,6 +738,9 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
707 }; 738 };
708 int status; 739 int status;
709 740
741 if (clnt == NULL)
742 return; /* Client is shutting down; give up. */
743
710 args->args_op = dp; 744 args->args_op = dp;
711 msg.rpc_argp = args; 745 msg.rpc_argp = args;
712 dp->dl_retries = 1; 746 dp->dl_retries = 1;
@@ -717,3 +751,19 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
717 nfs4_put_delegation(dp); 751 nfs4_put_delegation(dp);
718 } 752 }
719} 753}
754
755void nfsd4_do_callback_rpc(struct work_struct *w)
756{
757 /* XXX: for now, just send off delegation recall. */
758 /* In future, generalize to handle any sort of callback. */
759 struct nfsd4_callback *c = container_of(w, struct nfsd4_callback, cb_work);
760 struct nfs4_delegation *dp = container_of(c, struct nfs4_delegation, dl_recall);
761
762 _nfsd4_cb_recall(dp);
763}
764
765
766void nfsd4_cb_recall(struct nfs4_delegation *dp)
767{
768 queue_work(callback_wq, &dp->dl_recall.cb_work);
769}