aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs
diff options
context:
space:
mode:
authorAlexandros Batsakis <batsakis@netapp.com>2010-02-05 06:45:04 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2010-03-02 12:44:07 -0500
commitdc96aef96a75348b4d1b01c4c0429ab52780683e (patch)
tree1bd4755b97367a8db0e2ea949cb3a4fd84ec3a66 /fs/nfs
parent888ef2e3f8b7b8daeb031bfb4ad1fd4fa817e193 (diff)
nfs: prevent backlogging of renewd requests
If the renewd send queue gets backlogged (e.g., if the server goes down), we will keep filling the queue with periodic RENEW/SEQUENCE requests. This patch schedules a new renewd request if and only if the previous one returns (either success or failure) Signed-off-by: Alexandros Batsakis <batsakis@netapp.com> [Trond.Myklebust@netapp.com: moved nfs4_schedule_state_renewal() into separate nfs4_renew_release() and nfs41_sequence_release() callbacks to ensure correct behaviour on call setup failure] Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/nfs4proc.c24
-rw-r--r--fs/nfs/nfs4renewd.c24
2 files changed, 27 insertions, 21 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 84b53d38f50b..726bc195039d 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3147,10 +3147,17 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
3147 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3147 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3148 * standalone procedure for queueing an asynchronous RENEW. 3148 * standalone procedure for queueing an asynchronous RENEW.
3149 */ 3149 */
3150static void nfs4_renew_release(void *data)
3151{
3152 struct nfs_client *clp = data;
3153
3154 nfs4_schedule_state_renewal(clp);
3155}
3156
3150static void nfs4_renew_done(struct rpc_task *task, void *data) 3157static void nfs4_renew_done(struct rpc_task *task, void *data)
3151{ 3158{
3152 struct nfs_client *clp = (struct nfs_client *)task->tk_msg.rpc_argp; 3159 struct nfs_client *clp = data;
3153 unsigned long timestamp = (unsigned long)data; 3160 unsigned long timestamp = task->tk_start;
3154 3161
3155 if (task->tk_status < 0) { 3162 if (task->tk_status < 0) {
3156 /* Unless we're shutting down, schedule state recovery! */ 3163 /* Unless we're shutting down, schedule state recovery! */
@@ -3166,6 +3173,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *data)
3166 3173
3167static const struct rpc_call_ops nfs4_renew_ops = { 3174static const struct rpc_call_ops nfs4_renew_ops = {
3168 .rpc_call_done = nfs4_renew_done, 3175 .rpc_call_done = nfs4_renew_done,
3176 .rpc_release = nfs4_renew_release,
3169}; 3177};
3170 3178
3171int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) 3179int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
@@ -3177,7 +3185,7 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
3177 }; 3185 };
3178 3186
3179 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3187 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3180 &nfs4_renew_ops, (void *)jiffies); 3188 &nfs4_renew_ops, clp);
3181} 3189}
3182 3190
3183int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3191int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
@@ -5023,7 +5031,14 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5023 &res, args.sa_cache_this, 1); 5031 &res, args.sa_cache_this, 1);
5024} 5032}
5025 5033
5026void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5034static void nfs41_sequence_release(void *data)
5035{
5036 struct nfs_client *clp = (struct nfs_client *)data;
5037
5038 nfs4_schedule_state_renewal(clp);
5039}
5040
5041static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5027{ 5042{
5028 struct nfs_client *clp = (struct nfs_client *)data; 5043 struct nfs_client *clp = (struct nfs_client *)data;
5029 5044
@@ -5064,6 +5079,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5064static const struct rpc_call_ops nfs41_sequence_ops = { 5079static const struct rpc_call_ops nfs41_sequence_ops = {
5065 .rpc_call_done = nfs41_sequence_call_done, 5080 .rpc_call_done = nfs41_sequence_call_done,
5066 .rpc_call_prepare = nfs41_sequence_prepare, 5081 .rpc_call_prepare = nfs41_sequence_prepare,
5082 .rpc_release = nfs41_sequence_release,
5067}; 5083};
5068 5084
5069static int nfs41_proc_async_sequence(struct nfs_client *clp, 5085static int nfs41_proc_async_sequence(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 0156c01c212c..d87f10327b72 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -36,11 +36,6 @@
36 * as an rpc_task, not a real kernel thread, so it always runs in rpciod's 36 * as an rpc_task, not a real kernel thread, so it always runs in rpciod's
37 * context. There is one renewd per nfs_server. 37 * context. There is one renewd per nfs_server.
38 * 38 *
39 * TODO: If the send queue gets backlogged (e.g., if the server goes down),
40 * we will keep filling the queue with periodic RENEW requests. We need a
41 * mechanism for ensuring that if renewd successfully sends off a request,
42 * then it only wakes up when the request is finished. Maybe use the
43 * child task framework of the RPC layer?
44 */ 39 */
45 40
46#include <linux/mm.h> 41#include <linux/mm.h>
@@ -63,7 +58,7 @@ nfs4_renew_state(struct work_struct *work)
63 struct nfs_client *clp = 58 struct nfs_client *clp =
64 container_of(work, struct nfs_client, cl_renewd.work); 59 container_of(work, struct nfs_client, cl_renewd.work);
65 struct rpc_cred *cred; 60 struct rpc_cred *cred;
66 long lease, timeout; 61 long lease;
67 unsigned long last, now; 62 unsigned long last, now;
68 63
69 ops = nfs4_state_renewal_ops[clp->cl_minorversion]; 64 ops = nfs4_state_renewal_ops[clp->cl_minorversion];
@@ -75,7 +70,6 @@ nfs4_renew_state(struct work_struct *work)
75 lease = clp->cl_lease_time; 70 lease = clp->cl_lease_time;
76 last = clp->cl_last_renewal; 71 last = clp->cl_last_renewal;
77 now = jiffies; 72 now = jiffies;
78 timeout = (2 * lease) / 3 + (long)last - (long)now;
79 /* Are we close to a lease timeout? */ 73 /* Are we close to a lease timeout? */
80 if (time_after(now, last + lease/3)) { 74 if (time_after(now, last + lease/3)) {
81 cred = ops->get_state_renewal_cred_locked(clp); 75 cred = ops->get_state_renewal_cred_locked(clp);
@@ -90,19 +84,15 @@ nfs4_renew_state(struct work_struct *work)
90 /* Queue an asynchronous RENEW. */ 84 /* Queue an asynchronous RENEW. */
91 ops->sched_state_renewal(clp, cred); 85 ops->sched_state_renewal(clp, cred);
92 put_rpccred(cred); 86 put_rpccred(cred);
87 goto out_exp;
93 } 88 }
94 timeout = (2 * lease) / 3; 89 } else {
95 spin_lock(&clp->cl_lock);
96 } else
97 dprintk("%s: failed to call renewd. Reason: lease not expired \n", 90 dprintk("%s: failed to call renewd. Reason: lease not expired \n",
98 __func__); 91 __func__);
99 if (timeout < 5 * HZ) /* safeguard */ 92 spin_unlock(&clp->cl_lock);
100 timeout = 5 * HZ; 93 }
101 dprintk("%s: requeueing work. Lease period = %ld\n", 94 nfs4_schedule_state_renewal(clp);
102 __func__, (timeout + HZ - 1) / HZ); 95out_exp:
103 cancel_delayed_work(&clp->cl_renewd);
104 schedule_delayed_work(&clp->cl_renewd, timeout);
105 spin_unlock(&clp->cl_lock);
106 nfs_expire_unreferenced_delegations(clp); 96 nfs_expire_unreferenced_delegations(clp);
107out: 97out:
108 dprintk("%s: done\n", __func__); 98 dprintk("%s: done\n", __func__);