aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mru_cache.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-22 17:11:51 -0400
committerDave Chinner <david@fromorbit.com>2014-04-22 17:11:51 -0400
commit22328d712dd7fdc984d17da2121be840d1f844cd (patch)
tree678de2634238b10bfcbf72f1ded34faee9919f32 /fs/xfs/xfs_mru_cache.c
parentce695c6551f9488e75247ac1eefcd173fda0c029 (diff)
xfs: embedd mru_elem into parent structure
There is no need to do a separate allocation for each mru element, just embedd the structure into the parent one in the user. Besides saving a memory allocation and the infrastructure required for it this also simplifies the API. While we do major surgery on xfs_mru_cache.c also de-typedef it and make struct mru_cache private to the implementation file. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs/xfs_mru_cache.c')
-rw-r--r--fs/xfs/xfs_mru_cache.c155
1 files changed, 61 insertions, 94 deletions
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 4aa316644721..f99b4933dc22 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -100,14 +100,20 @@
100 * likely result in a loop in one of the lists. That's a sure-fire recipe for 100 * likely result in a loop in one of the lists. That's a sure-fire recipe for
101 * an infinite loop in the code. 101 * an infinite loop in the code.
102 */ 102 */
103typedef struct xfs_mru_cache_elem 103struct xfs_mru_cache {
104{ 104 struct radix_tree_root store; /* Core storage data structure. */
105 struct list_head list_node; 105 struct list_head *lists; /* Array of lists, one per grp. */
106 unsigned long key; 106 struct list_head reap_list; /* Elements overdue for reaping. */
107 void *value; 107 spinlock_t lock; /* Lock to protect this struct. */
108} xfs_mru_cache_elem_t; 108 unsigned int grp_count; /* Number of discrete groups. */
109 unsigned int grp_time; /* Time period spanned by grps. */
110 unsigned int lru_grp; /* Group containing time zero. */
111 unsigned long time_zero; /* Time first element was added. */
112 xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
113 struct delayed_work work; /* Workqueue data for reaping. */
114 unsigned int queued; /* work has been queued */
115};
109 116
110static kmem_zone_t *xfs_mru_elem_zone;
111static struct workqueue_struct *xfs_mru_reap_wq; 117static struct workqueue_struct *xfs_mru_reap_wq;
112 118
113/* 119/*
@@ -129,12 +135,12 @@ static struct workqueue_struct *xfs_mru_reap_wq;
129 */ 135 */
130STATIC unsigned long 136STATIC unsigned long
131_xfs_mru_cache_migrate( 137_xfs_mru_cache_migrate(
132 xfs_mru_cache_t *mru, 138 struct xfs_mru_cache *mru,
133 unsigned long now) 139 unsigned long now)
134{ 140{
135 unsigned int grp; 141 unsigned int grp;
136 unsigned int migrated = 0; 142 unsigned int migrated = 0;
137 struct list_head *lru_list; 143 struct list_head *lru_list;
138 144
139 /* Nothing to do if the data store is empty. */ 145 /* Nothing to do if the data store is empty. */
140 if (!mru->time_zero) 146 if (!mru->time_zero)
@@ -193,11 +199,11 @@ _xfs_mru_cache_migrate(
193 */ 199 */
194STATIC void 200STATIC void
195_xfs_mru_cache_list_insert( 201_xfs_mru_cache_list_insert(
196 xfs_mru_cache_t *mru, 202 struct xfs_mru_cache *mru,
197 xfs_mru_cache_elem_t *elem) 203 struct xfs_mru_cache_elem *elem)
198{ 204{
199 unsigned int grp = 0; 205 unsigned int grp = 0;
200 unsigned long now = jiffies; 206 unsigned long now = jiffies;
201 207
202 /* 208 /*
203 * If the data store is empty, initialise time zero, leave grp set to 209 * If the data store is empty, initialise time zero, leave grp set to
@@ -231,10 +237,10 @@ _xfs_mru_cache_list_insert(
231 */ 237 */
232STATIC void 238STATIC void
233_xfs_mru_cache_clear_reap_list( 239_xfs_mru_cache_clear_reap_list(
234 xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock) 240 struct xfs_mru_cache *mru)
235 241 __releases(mru->lock) __acquires(mru->lock)
236{ 242{
237 xfs_mru_cache_elem_t *elem, *next; 243 struct xfs_mru_cache_elem *elem, *next;
238 struct list_head tmp; 244 struct list_head tmp;
239 245
240 INIT_LIST_HEAD(&tmp); 246 INIT_LIST_HEAD(&tmp);
@@ -252,15 +258,8 @@ _xfs_mru_cache_clear_reap_list(
252 spin_unlock(&mru->lock); 258 spin_unlock(&mru->lock);
253 259
254 list_for_each_entry_safe(elem, next, &tmp, list_node) { 260 list_for_each_entry_safe(elem, next, &tmp, list_node) {
255
256 /* Remove the element from the reap list. */
257 list_del_init(&elem->list_node); 261 list_del_init(&elem->list_node);
258 262 mru->free_func(elem);
259 /* Call the client's free function with the key and value pointer. */
260 mru->free_func(elem->key, elem->value);
261
262 /* Free the element structure. */
263 kmem_zone_free(xfs_mru_elem_zone, elem);
264 } 263 }
265 264
266 spin_lock(&mru->lock); 265 spin_lock(&mru->lock);
@@ -277,7 +276,8 @@ STATIC void
277_xfs_mru_cache_reap( 276_xfs_mru_cache_reap(
278 struct work_struct *work) 277 struct work_struct *work)
279{ 278{
280 xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); 279 struct xfs_mru_cache *mru =
280 container_of(work, struct xfs_mru_cache, work.work);
281 unsigned long now, next; 281 unsigned long now, next;
282 282
283 ASSERT(mru && mru->lists); 283 ASSERT(mru && mru->lists);
@@ -304,28 +304,16 @@ _xfs_mru_cache_reap(
304int 304int
305xfs_mru_cache_init(void) 305xfs_mru_cache_init(void)
306{ 306{
307 xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t),
308 "xfs_mru_cache_elem");
309 if (!xfs_mru_elem_zone)
310 goto out;
311
312 xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", WQ_MEM_RECLAIM, 1); 307 xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", WQ_MEM_RECLAIM, 1);
313 if (!xfs_mru_reap_wq) 308 if (!xfs_mru_reap_wq)
314 goto out_destroy_mru_elem_zone; 309 return -ENOMEM;
315
316 return 0; 310 return 0;
317
318 out_destroy_mru_elem_zone:
319 kmem_zone_destroy(xfs_mru_elem_zone);
320 out:
321 return -ENOMEM;
322} 311}
323 312
324void 313void
325xfs_mru_cache_uninit(void) 314xfs_mru_cache_uninit(void)
326{ 315{
327 destroy_workqueue(xfs_mru_reap_wq); 316 destroy_workqueue(xfs_mru_reap_wq);
328 kmem_zone_destroy(xfs_mru_elem_zone);
329} 317}
330 318
331/* 319/*
@@ -336,14 +324,14 @@ xfs_mru_cache_uninit(void)
336 */ 324 */
337int 325int
338xfs_mru_cache_create( 326xfs_mru_cache_create(
339 xfs_mru_cache_t **mrup, 327 struct xfs_mru_cache **mrup,
340 unsigned int lifetime_ms, 328 unsigned int lifetime_ms,
341 unsigned int grp_count, 329 unsigned int grp_count,
342 xfs_mru_cache_free_func_t free_func) 330 xfs_mru_cache_free_func_t free_func)
343{ 331{
344 xfs_mru_cache_t *mru = NULL; 332 struct xfs_mru_cache *mru = NULL;
345 int err = 0, grp; 333 int err = 0, grp;
346 unsigned int grp_time; 334 unsigned int grp_time;
347 335
348 if (mrup) 336 if (mrup)
349 *mrup = NULL; 337 *mrup = NULL;
@@ -400,7 +388,7 @@ exit:
400 */ 388 */
401static void 389static void
402xfs_mru_cache_flush( 390xfs_mru_cache_flush(
403 xfs_mru_cache_t *mru) 391 struct xfs_mru_cache *mru)
404{ 392{
405 if (!mru || !mru->lists) 393 if (!mru || !mru->lists)
406 return; 394 return;
@@ -420,7 +408,7 @@ xfs_mru_cache_flush(
420 408
421void 409void
422xfs_mru_cache_destroy( 410xfs_mru_cache_destroy(
423 xfs_mru_cache_t *mru) 411 struct xfs_mru_cache *mru)
424{ 412{
425 if (!mru || !mru->lists) 413 if (!mru || !mru->lists)
426 return; 414 return;
@@ -438,45 +426,29 @@ xfs_mru_cache_destroy(
438 */ 426 */
439int 427int
440xfs_mru_cache_insert( 428xfs_mru_cache_insert(
441 xfs_mru_cache_t *mru, 429 struct xfs_mru_cache *mru,
442 unsigned long key, 430 unsigned long key,
443 void *value) 431 struct xfs_mru_cache_elem *elem)
444{ 432{
445 xfs_mru_cache_elem_t *elem; 433 int error;
446 int error;
447 434
448 ASSERT(mru && mru->lists); 435 ASSERT(mru && mru->lists);
449 if (!mru || !mru->lists) 436 if (!mru || !mru->lists)
450 return EINVAL; 437 return EINVAL;
451 438
452 elem = kmem_zone_zalloc(xfs_mru_elem_zone, KM_SLEEP); 439 if (radix_tree_preload(GFP_KERNEL))
453 if (!elem)
454 return ENOMEM; 440 return ENOMEM;
455 441
456 if (radix_tree_preload(GFP_KERNEL)) {
457 error = ENOMEM;
458 goto out_free_item;
459 }
460
461 INIT_LIST_HEAD(&elem->list_node); 442 INIT_LIST_HEAD(&elem->list_node);
462 elem->key = key; 443 elem->key = key;
463 elem->value = value;
464 444
465 spin_lock(&mru->lock); 445 spin_lock(&mru->lock);
466
467 error = -radix_tree_insert(&mru->store, key, elem); 446 error = -radix_tree_insert(&mru->store, key, elem);
468 radix_tree_preload_end(); 447 radix_tree_preload_end();
469 if (error) { 448 if (!error)
470 spin_unlock(&mru->lock); 449 _xfs_mru_cache_list_insert(mru, elem);
471 goto out_free_item;
472 }
473 _xfs_mru_cache_list_insert(mru, elem);
474
475 spin_unlock(&mru->lock); 450 spin_unlock(&mru->lock);
476 451
477 return 0;
478out_free_item:
479 kmem_zone_free(xfs_mru_elem_zone, elem);
480 return error; 452 return error;
481} 453}
482 454
@@ -486,13 +458,12 @@ out_free_item:
486 * the client data pointer for the removed element is returned, otherwise this 458 * the client data pointer for the removed element is returned, otherwise this
487 * function will return a NULL pointer. 459 * function will return a NULL pointer.
488 */ 460 */
489void * 461struct xfs_mru_cache_elem *
490xfs_mru_cache_remove( 462xfs_mru_cache_remove(
491 xfs_mru_cache_t *mru, 463 struct xfs_mru_cache *mru,
492 unsigned long key) 464 unsigned long key)
493{ 465{
494 xfs_mru_cache_elem_t *elem; 466 struct xfs_mru_cache_elem *elem;
495 void *value = NULL;
496 467
497 ASSERT(mru && mru->lists); 468 ASSERT(mru && mru->lists);
498 if (!mru || !mru->lists) 469 if (!mru || !mru->lists)
@@ -500,17 +471,11 @@ xfs_mru_cache_remove(
500 471
501 spin_lock(&mru->lock); 472 spin_lock(&mru->lock);
502 elem = radix_tree_delete(&mru->store, key); 473 elem = radix_tree_delete(&mru->store, key);
503 if (elem) { 474 if (elem)
504 value = elem->value;
505 list_del(&elem->list_node); 475 list_del(&elem->list_node);
506 }
507
508 spin_unlock(&mru->lock); 476 spin_unlock(&mru->lock);
509 477
510 if (elem) 478 return elem;
511 kmem_zone_free(xfs_mru_elem_zone, elem);
512
513 return value;
514} 479}
515 480
516/* 481/*
@@ -519,13 +484,14 @@ xfs_mru_cache_remove(
519 */ 484 */
520void 485void
521xfs_mru_cache_delete( 486xfs_mru_cache_delete(
522 xfs_mru_cache_t *mru, 487 struct xfs_mru_cache *mru,
523 unsigned long key) 488 unsigned long key)
524{ 489{
525 void *value = xfs_mru_cache_remove(mru, key); 490 struct xfs_mru_cache_elem *elem;
526 491
527 if (value) 492 elem = xfs_mru_cache_remove(mru, key);
528 mru->free_func(key, value); 493 if (elem)
494 mru->free_func(elem);
529} 495}
530 496
531/* 497/*
@@ -548,12 +514,12 @@ xfs_mru_cache_delete(
548 * status, we need to help it get it right by annotating the path that does 514 * status, we need to help it get it right by annotating the path that does
549 * not release the lock. 515 * not release the lock.
550 */ 516 */
551void * 517struct xfs_mru_cache_elem *
552xfs_mru_cache_lookup( 518xfs_mru_cache_lookup(
553 xfs_mru_cache_t *mru, 519 struct xfs_mru_cache *mru,
554 unsigned long key) 520 unsigned long key)
555{ 521{
556 xfs_mru_cache_elem_t *elem; 522 struct xfs_mru_cache_elem *elem;
557 523
558 ASSERT(mru && mru->lists); 524 ASSERT(mru && mru->lists);
559 if (!mru || !mru->lists) 525 if (!mru || !mru->lists)
@@ -568,7 +534,7 @@ xfs_mru_cache_lookup(
568 } else 534 } else
569 spin_unlock(&mru->lock); 535 spin_unlock(&mru->lock);
570 536
571 return elem ? elem->value : NULL; 537 return elem;
572} 538}
573 539
574/* 540/*
@@ -578,7 +544,8 @@ xfs_mru_cache_lookup(
578 */ 544 */
579void 545void
580xfs_mru_cache_done( 546xfs_mru_cache_done(
581 xfs_mru_cache_t *mru) __releases(mru->lock) 547 struct xfs_mru_cache *mru)
548 __releases(mru->lock)
582{ 549{
583 spin_unlock(&mru->lock); 550 spin_unlock(&mru->lock);
584} 551}