aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfscache.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2013-02-04 08:18:05 -0500
committerJ. Bruce Fields <bfields@redhat.com>2013-02-04 17:19:12 -0500
commitaca8a23de60c705e2458b2c6731ad59aa0717f83 (patch)
tree9b6d8ae1402b9ee701b907023d72b4f29fd41fb7 /fs/nfsd/nfscache.c
parent2c6b691c05bf77c4bc7c9f1a9b6d93a160928421 (diff)
nfsd: add recurring workqueue job to clean the cache
It's not sufficient to only clean the cache when requests come in. What if we have a flurry of activity and then the server goes idle? Add a workqueue job that will clean the cache every RC_EXPIRE period. Care is taken to only run this when we expect to have entries expiring. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfscache.c')
-rw-r--r--fs/nfsd/nfscache.c50
1 files changed, 47 insertions, 3 deletions
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index e8ea785e295d..d7b088bee684 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -36,6 +36,7 @@ static inline u32 request_hash(u32 xid)
36} 36}
37 37
38static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 38static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
39static void cache_cleaner_func(struct work_struct *unused);
39 40
40/* 41/*
41 * locking for the reply cache: 42 * locking for the reply cache:
@@ -43,6 +44,7 @@ static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
43 * Otherwise, it when accessing _prev or _next, the lock must be held. 44 * Otherwise, it when accessing _prev or _next, the lock must be held.
44 */ 45 */
45static DEFINE_SPINLOCK(cache_lock); 46static DEFINE_SPINLOCK(cache_lock);
47static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
46 48
47/* 49/*
48 * Put a cap on the size of the DRC based on the amount of available 50 * Put a cap on the size of the DRC based on the amount of available
@@ -131,6 +133,8 @@ void nfsd_reply_cache_shutdown(void)
131{ 133{
132 struct svc_cacherep *rp; 134 struct svc_cacherep *rp;
133 135
136 cancel_delayed_work_sync(&cache_cleaner);
137
134 while (!list_empty(&lru_head)) { 138 while (!list_empty(&lru_head)) {
135 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); 139 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
136 nfsd_reply_cache_free_locked(rp); 140 nfsd_reply_cache_free_locked(rp);
@@ -146,13 +150,15 @@ void nfsd_reply_cache_shutdown(void)
146} 150}
147 151
148/* 152/*
149 * Move cache entry to end of LRU list 153 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
154 * not already scheduled.
150 */ 155 */
151static void 156static void
152lru_put_end(struct svc_cacherep *rp) 157lru_put_end(struct svc_cacherep *rp)
153{ 158{
154 rp->c_timestamp = jiffies; 159 rp->c_timestamp = jiffies;
155 list_move_tail(&rp->c_lru, &lru_head); 160 list_move_tail(&rp->c_lru, &lru_head);
161 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
156} 162}
157 163
158/* 164/*
@@ -173,6 +179,42 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp)
173} 179}
174 180
175/* 181/*
182 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
183 * Also prune the oldest ones when the total exceeds the max number of entries.
184 */
185static void
186prune_cache_entries(void)
187{
188 struct svc_cacherep *rp, *tmp;
189
190 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
191 if (!nfsd_cache_entry_expired(rp) &&
192 num_drc_entries <= max_drc_entries)
193 break;
194 nfsd_reply_cache_free_locked(rp);
195 }
196
197 /*
198 * Conditionally rearm the job. If we cleaned out the list, then
199 * cancel any pending run (since there won't be any work to do).
200 * Otherwise, we rearm the job or modify the existing one to run in
201 * RC_EXPIRE since we just ran the pruner.
202 */
203 if (list_empty(&lru_head))
204 cancel_delayed_work(&cache_cleaner);
205 else
206 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
207}
208
209static void
210cache_cleaner_func(struct work_struct *unused)
211{
212 spin_lock(&cache_lock);
213 prune_cache_entries();
214 spin_unlock(&cache_lock);
215}
216
217/*
176 * Search the request hash for an entry that matches the given rqstp. 218 * Search the request hash for an entry that matches the given rqstp.
177 * Must be called with cache_lock held. Returns the found entry or 219 * Must be called with cache_lock held. Returns the found entry or
178 * NULL on failure. 220 * NULL on failure.
@@ -192,7 +234,6 @@ nfsd_cache_search(struct svc_rqst *rqstp)
192 hlist_for_each_entry(rp, hn, rh, c_hash) { 234 hlist_for_each_entry(rp, hn, rh, c_hash) {
193 if (xid == rp->c_xid && proc == rp->c_proc && 235 if (xid == rp->c_xid && proc == rp->c_proc &&
194 proto == rp->c_prot && vers == rp->c_vers && 236 proto == rp->c_prot && vers == rp->c_vers &&
195 !nfsd_cache_entry_expired(rp) &&
196 rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && 237 rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
197 rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) 238 rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr))
198 return rp; 239 return rp;
@@ -234,8 +275,11 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
234 if (!list_empty(&lru_head)) { 275 if (!list_empty(&lru_head)) {
235 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 276 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
236 if (nfsd_cache_entry_expired(rp) || 277 if (nfsd_cache_entry_expired(rp) ||
237 num_drc_entries >= max_drc_entries) 278 num_drc_entries >= max_drc_entries) {
279 lru_put_end(rp);
280 prune_cache_entries();
238 goto setup_entry; 281 goto setup_entry;
282 }
239 } 283 }
240 284
241 spin_unlock(&cache_lock); 285 spin_unlock(&cache_lock);