diff options
author | David Chinner <dgc@sgi.com> | 2007-08-16 01:21:11 -0400 |
---|---|---|
committer | Tim Shimmin <tes@chook.melbourne.sgi.com> | 2007-09-17 02:42:02 -0400 |
commit | 65de5567564e70edd01b6d4e95e548d7ba284872 (patch) | |
tree | 977aef83761c4ef87d95f1e192bc1ae9c7cbdeac | |
parent | c2f828977ba5d17c13debba374ea252d18e5ccfb (diff) |
[XFS] On-demand reaping of the MRU cache
Instead of running the mru cache reaper all the time based on a timeout,
we should only run it when the cache has active objects. This allows CPUs
to sleep when there is no activity rather than be woken repeatedly just to
check if there is anything to do.
SGI-PV: 968554
SGI-Modid: xfs-linux-melb:xfs-kern:29305a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Donald Douwsma <donaldd@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
-rw-r--r-- | fs/xfs/xfs_filestream.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_mru_cache.c | 72 | ||||
-rw-r--r-- | fs/xfs/xfs_mru_cache.h | 6 |
3 files changed, 31 insertions, 50 deletions
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index ce2278611bb..16f8e175167 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -467,8 +467,7 @@ void | |||
467 | xfs_filestream_flush( | 467 | xfs_filestream_flush( |
468 | xfs_mount_t *mp) | 468 | xfs_mount_t *mp) |
469 | { | 469 | { |
470 | /* point in time flush, so keep the reaper running */ | 470 | xfs_mru_cache_flush(mp->m_filestream); |
471 | xfs_mru_cache_flush(mp->m_filestream, 1); | ||
472 | } | 471 | } |
473 | 472 | ||
474 | /* | 473 | /* |
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index 7deb9e3cbbd..e0b358c1c53 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c | |||
@@ -206,8 +206,11 @@ _xfs_mru_cache_list_insert( | |||
206 | */ | 206 | */ |
207 | if (!_xfs_mru_cache_migrate(mru, now)) { | 207 | if (!_xfs_mru_cache_migrate(mru, now)) { |
208 | mru->time_zero = now; | 208 | mru->time_zero = now; |
209 | if (!mru->next_reap) | 209 | if (!mru->queued) { |
210 | mru->next_reap = mru->grp_count * mru->grp_time; | 210 | mru->queued = 1; |
211 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, | ||
212 | mru->grp_count * mru->grp_time); | ||
213 | } | ||
211 | } else { | 214 | } else { |
212 | grp = (now - mru->time_zero) / mru->grp_time; | 215 | grp = (now - mru->time_zero) / mru->grp_time; |
213 | grp = (mru->lru_grp + grp) % mru->grp_count; | 216 | grp = (mru->lru_grp + grp) % mru->grp_count; |
@@ -271,29 +274,26 @@ _xfs_mru_cache_reap( | |||
271 | struct work_struct *work) | 274 | struct work_struct *work) |
272 | { | 275 | { |
273 | xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); | 276 | xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); |
274 | unsigned long now; | 277 | unsigned long now, next; |
275 | 278 | ||
276 | ASSERT(mru && mru->lists); | 279 | ASSERT(mru && mru->lists); |
277 | if (!mru || !mru->lists) | 280 | if (!mru || !mru->lists) |
278 | return; | 281 | return; |
279 | 282 | ||
280 | mutex_spinlock(&mru->lock); | 283 | mutex_spinlock(&mru->lock); |
281 | now = jiffies; | 284 | next = _xfs_mru_cache_migrate(mru, jiffies); |
282 | if (mru->reap_all || | 285 | _xfs_mru_cache_clear_reap_list(mru); |
283 | (mru->next_reap && time_after(now, mru->next_reap))) { | 286 | |
284 | if (mru->reap_all) | 287 | mru->queued = next; |
285 | now += mru->grp_count * mru->grp_time * 2; | 288 | if ((mru->queued > 0)) { |
286 | mru->next_reap = _xfs_mru_cache_migrate(mru, now); | 289 | now = jiffies; |
287 | _xfs_mru_cache_clear_reap_list(mru); | 290 | if (next <= now) |
291 | next = 0; | ||
292 | else | ||
293 | next -= now; | ||
294 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); | ||
288 | } | 295 | } |
289 | 296 | ||
290 | /* | ||
291 | * the process that triggered the reap_all is responsible | ||
292 | * for restating the periodic reap if it is required. | ||
293 | */ | ||
294 | if (!mru->reap_all) | ||
295 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time); | ||
296 | mru->reap_all = 0; | ||
297 | mutex_spinunlock(&mru->lock, 0); | 297 | mutex_spinunlock(&mru->lock, 0); |
298 | } | 298 | } |
299 | 299 | ||
@@ -352,7 +352,7 @@ xfs_mru_cache_create( | |||
352 | 352 | ||
353 | /* An extra list is needed to avoid reaping up to a grp_time early. */ | 353 | /* An extra list is needed to avoid reaping up to a grp_time early. */ |
354 | mru->grp_count = grp_count + 1; | 354 | mru->grp_count = grp_count + 1; |
355 | mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); | 355 | mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); |
356 | 356 | ||
357 | if (!mru->lists) { | 357 | if (!mru->lists) { |
358 | err = ENOMEM; | 358 | err = ENOMEM; |
@@ -374,11 +374,6 @@ xfs_mru_cache_create( | |||
374 | mru->grp_time = grp_time; | 374 | mru->grp_time = grp_time; |
375 | mru->free_func = free_func; | 375 | mru->free_func = free_func; |
376 | 376 | ||
377 | /* start up the reaper event */ | ||
378 | mru->next_reap = 0; | ||
379 | mru->reap_all = 0; | ||
380 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time); | ||
381 | |||
382 | *mrup = mru; | 377 | *mrup = mru; |
383 | 378 | ||
384 | exit: | 379 | exit: |
@@ -394,35 +389,25 @@ exit: | |||
394 | * Call xfs_mru_cache_flush() to flush out all cached entries, calling their | 389 | * Call xfs_mru_cache_flush() to flush out all cached entries, calling their |
395 | * free functions as they're deleted. When this function returns, the caller is | 390 | * free functions as they're deleted. When this function returns, the caller is |
396 | * guaranteed that all the free functions for all the elements have finished | 391 | * guaranteed that all the free functions for all the elements have finished |
397 | * executing. | 392 | * executing and the reaper is not running. |
398 | * | ||
399 | * While we are flushing, we stop the periodic reaper event from triggering. | ||
400 | * Normally, we want to restart this periodic event, but if we are shutting | ||
401 | * down the cache we do not want it restarted. hence the restart parameter | ||
402 | * where 0 = do not restart reaper and 1 = restart reaper. | ||
403 | */ | 393 | */ |
404 | void | 394 | void |
405 | xfs_mru_cache_flush( | 395 | xfs_mru_cache_flush( |
406 | xfs_mru_cache_t *mru, | 396 | xfs_mru_cache_t *mru) |
407 | int restart) | ||
408 | { | 397 | { |
409 | if (!mru || !mru->lists) | 398 | if (!mru || !mru->lists) |
410 | return; | 399 | return; |
411 | 400 | ||
412 | cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); | ||
413 | |||
414 | mutex_spinlock(&mru->lock); | 401 | mutex_spinlock(&mru->lock); |
415 | mru->reap_all = 1; | 402 | if (mru->queued) { |
416 | mutex_spinunlock(&mru->lock, 0); | 403 | mutex_spinunlock(&mru->lock, 0); |
404 | cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); | ||
405 | mutex_spinlock(&mru->lock); | ||
406 | } | ||
417 | 407 | ||
418 | queue_work(xfs_mru_reap_wq, &mru->work.work); | 408 | _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); |
419 | flush_workqueue(xfs_mru_reap_wq); | 409 | _xfs_mru_cache_clear_reap_list(mru); |
420 | 410 | ||
421 | mutex_spinlock(&mru->lock); | ||
422 | WARN_ON_ONCE(mru->reap_all != 0); | ||
423 | mru->reap_all = 0; | ||
424 | if (restart) | ||
425 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time); | ||
426 | mutex_spinunlock(&mru->lock, 0); | 411 | mutex_spinunlock(&mru->lock, 0); |
427 | } | 412 | } |
428 | 413 | ||
@@ -433,8 +418,7 @@ xfs_mru_cache_destroy( | |||
433 | if (!mru || !mru->lists) | 418 | if (!mru || !mru->lists) |
434 | return; | 419 | return; |
435 | 420 | ||
436 | /* we don't want the reaper to restart here */ | 421 | xfs_mru_cache_flush(mru); |
437 | xfs_mru_cache_flush(mru, 0); | ||
438 | 422 | ||
439 | kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); | 423 | kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); |
440 | kmem_free(mru, sizeof(*mru)); | 424 | kmem_free(mru, sizeof(*mru)); |
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h index 624fd10ee8e..dd58ea1bbeb 100644 --- a/fs/xfs/xfs_mru_cache.h +++ b/fs/xfs/xfs_mru_cache.h | |||
@@ -32,11 +32,9 @@ typedef struct xfs_mru_cache | |||
32 | unsigned int grp_time; /* Time period spanned by grps. */ | 32 | unsigned int grp_time; /* Time period spanned by grps. */ |
33 | unsigned int lru_grp; /* Group containing time zero. */ | 33 | unsigned int lru_grp; /* Group containing time zero. */ |
34 | unsigned long time_zero; /* Time first element was added. */ | 34 | unsigned long time_zero; /* Time first element was added. */ |
35 | unsigned long next_reap; /* Time that the reaper should | ||
36 | next do something. */ | ||
37 | unsigned int reap_all; /* if set, reap all lists */ | ||
38 | xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */ | 35 | xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */ |
39 | struct delayed_work work; /* Workqueue data for reaping. */ | 36 | struct delayed_work work; /* Workqueue data for reaping. */ |
37 | unsigned int queued; /* work has been queued */ | ||
40 | } xfs_mru_cache_t; | 38 | } xfs_mru_cache_t; |
41 | 39 | ||
42 | int xfs_mru_cache_init(void); | 40 | int xfs_mru_cache_init(void); |
@@ -44,7 +42,7 @@ void xfs_mru_cache_uninit(void); | |||
44 | int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms, | 42 | int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms, |
45 | unsigned int grp_count, | 43 | unsigned int grp_count, |
46 | xfs_mru_cache_free_func_t free_func); | 44 | xfs_mru_cache_free_func_t free_func); |
47 | void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart); | 45 | void xfs_mru_cache_flush(xfs_mru_cache_t *mru); |
48 | void xfs_mru_cache_destroy(struct xfs_mru_cache *mru); | 46 | void xfs_mru_cache_destroy(struct xfs_mru_cache *mru); |
49 | int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, | 47 | int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, |
50 | void *value); | 48 | void *value); |