diff options
Diffstat (limited to 'fs/nfs/nfs4renewd.c')
| -rw-r--r-- | fs/nfs/nfs4renewd.c | 24 |
1 files changed, 7 insertions, 17 deletions
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 0156c01c212c..d87f10327b72 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c | |||
| @@ -36,11 +36,6 @@ | |||
| 36 | * as an rpc_task, not a real kernel thread, so it always runs in rpciod's | 36 | * as an rpc_task, not a real kernel thread, so it always runs in rpciod's |
| 37 | * context. There is one renewd per nfs_server. | 37 | * context. There is one renewd per nfs_server. |
| 38 | * | 38 | * |
| 39 | * TODO: If the send queue gets backlogged (e.g., if the server goes down), | ||
| 40 | * we will keep filling the queue with periodic RENEW requests. We need a | ||
| 41 | * mechanism for ensuring that if renewd successfully sends off a request, | ||
| 42 | * then it only wakes up when the request is finished. Maybe use the | ||
| 43 | * child task framework of the RPC layer? | ||
| 44 | */ | 39 | */ |
| 45 | 40 | ||
| 46 | #include <linux/mm.h> | 41 | #include <linux/mm.h> |
| @@ -63,7 +58,7 @@ nfs4_renew_state(struct work_struct *work) | |||
| 63 | struct nfs_client *clp = | 58 | struct nfs_client *clp = |
| 64 | container_of(work, struct nfs_client, cl_renewd.work); | 59 | container_of(work, struct nfs_client, cl_renewd.work); |
| 65 | struct rpc_cred *cred; | 60 | struct rpc_cred *cred; |
| 66 | long lease, timeout; | 61 | long lease; |
| 67 | unsigned long last, now; | 62 | unsigned long last, now; |
| 68 | 63 | ||
| 69 | ops = nfs4_state_renewal_ops[clp->cl_minorversion]; | 64 | ops = nfs4_state_renewal_ops[clp->cl_minorversion]; |
| @@ -75,7 +70,6 @@ nfs4_renew_state(struct work_struct *work) | |||
| 75 | lease = clp->cl_lease_time; | 70 | lease = clp->cl_lease_time; |
| 76 | last = clp->cl_last_renewal; | 71 | last = clp->cl_last_renewal; |
| 77 | now = jiffies; | 72 | now = jiffies; |
| 78 | timeout = (2 * lease) / 3 + (long)last - (long)now; | ||
| 79 | /* Are we close to a lease timeout? */ | 73 | /* Are we close to a lease timeout? */ |
| 80 | if (time_after(now, last + lease/3)) { | 74 | if (time_after(now, last + lease/3)) { |
| 81 | cred = ops->get_state_renewal_cred_locked(clp); | 75 | cred = ops->get_state_renewal_cred_locked(clp); |
| @@ -90,19 +84,15 @@ nfs4_renew_state(struct work_struct *work) | |||
| 90 | /* Queue an asynchronous RENEW. */ | 84 | /* Queue an asynchronous RENEW. */ |
| 91 | ops->sched_state_renewal(clp, cred); | 85 | ops->sched_state_renewal(clp, cred); |
| 92 | put_rpccred(cred); | 86 | put_rpccred(cred); |
| 87 | goto out_exp; | ||
| 93 | } | 88 | } |
| 94 | timeout = (2 * lease) / 3; | 89 | } else { |
| 95 | spin_lock(&clp->cl_lock); | ||
| 96 | } else | ||
| 97 | dprintk("%s: failed to call renewd. Reason: lease not expired \n", | 90 | dprintk("%s: failed to call renewd. Reason: lease not expired \n", |
| 98 | __func__); | 91 | __func__); |
| 99 | if (timeout < 5 * HZ) /* safeguard */ | 92 | spin_unlock(&clp->cl_lock); |
| 100 | timeout = 5 * HZ; | 93 | } |
| 101 | dprintk("%s: requeueing work. Lease period = %ld\n", | 94 | nfs4_schedule_state_renewal(clp); |
| 102 | __func__, (timeout + HZ - 1) / HZ); | 95 | out_exp: |
| 103 | cancel_delayed_work(&clp->cl_renewd); | ||
| 104 | schedule_delayed_work(&clp->cl_renewd, timeout); | ||
| 105 | spin_unlock(&clp->cl_lock); | ||
| 106 | nfs_expire_unreferenced_delegations(clp); | 96 | nfs_expire_unreferenced_delegations(clp); |
| 107 | out: | 97 | out: |
| 108 | dprintk("%s: done\n", __func__); | 98 | dprintk("%s: done\n", __func__); |
