aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfs4callback.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-09-24 06:19:18 -0400
committerJ. Bruce Fields <bfields@redhat.com>2014-09-26 16:29:28 -0400
commitf0b5de1b6b8b66552bcc7ae692f45940d411cf05 (patch)
treef3529730175ae2e4cca4ae1e2e99b4a6f51e1f3f /fs/nfsd/nfs4callback.c
parent326129d02aea8efa1dfd1a210653a744e7c85239 (diff)
nfsd: split nfsd4_callback initialization and use
Split out initializing the nfs4_callback structure from using it. For the NULL callback this gets rid of tons of pointless re-initializations. Note that I don't quite understand what protects us from running multiple NULL callbacks at the same time, but at least this chance doesn't make it worse.. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jeff Layton <jlayton@primarydata.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfs4callback.c')
-rw-r--r--fs/nfsd/nfs4callback.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 03d9f4f298ec..d97e2009e310 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -743,11 +743,6 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = {
743 743
744static struct workqueue_struct *callback_wq; 744static struct workqueue_struct *callback_wq;
745 745
746static void do_probe_callback(struct nfs4_client *clp)
747{
748 return nfsd4_cb(&clp->cl_cb_null, clp, NFSPROC4_CLNT_CB_NULL);
749}
750
751/* 746/*
752 * Poke the callback thread to process any updates to the callback 747 * Poke the callback thread to process any updates to the callback
753 * parameters, and send a null probe. 748 * parameters, and send a null probe.
@@ -756,7 +751,7 @@ void nfsd4_probe_callback(struct nfs4_client *clp)
756{ 751{
757 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 752 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
758 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags); 753 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
759 do_probe_callback(clp); 754 nfsd4_run_cb(&clp->cl_cb_null);
760} 755}
761 756
762void nfsd4_probe_callback_sync(struct nfs4_client *clp) 757void nfsd4_probe_callback_sync(struct nfs4_client *clp)
@@ -912,7 +907,7 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp)
912 * instead, nfsd4_run_cb_null() will detect the killed 907 * instead, nfsd4_run_cb_null() will detect the killed
913 * client, destroy the rpc client, and stop: 908 * client, destroy the rpc client, and stop:
914 */ 909 */
915 do_probe_callback(clp); 910 nfsd4_run_cb(&clp->cl_cb_null);
916 flush_workqueue(callback_wq); 911 flush_workqueue(callback_wq);
917} 912}
918 913
@@ -1025,7 +1020,7 @@ nfsd4_run_cb_recall(struct work_struct *w)
1025 nfsd4_run_callback_rpc(cb); 1020 nfsd4_run_callback_rpc(cb);
1026} 1021}
1027 1022
1028void nfsd4_cb(struct nfsd4_callback *cb, struct nfs4_client *clp, 1023void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1029 enum nfsd4_cb_op op) 1024 enum nfsd4_cb_op op)
1030{ 1025{
1031 cb->cb_clp = clp; 1026 cb->cb_clp = clp;
@@ -1038,6 +1033,9 @@ void nfsd4_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1038 cb->cb_ops = &nfsd4_cb_recall_ops; 1033 cb->cb_ops = &nfsd4_cb_recall_ops;
1039 INIT_LIST_HEAD(&cb->cb_per_client); 1034 INIT_LIST_HEAD(&cb->cb_per_client);
1040 cb->cb_done = true; 1035 cb->cb_done = true;
1036}
1041 1037
1038void nfsd4_run_cb(struct nfsd4_callback *cb)
1039{
1042 queue_work(callback_wq, &cb->cb_work); 1040 queue_work(callback_wq, &cb->cb_work);
1043} 1041}