aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mru_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_mru_cache.c')
-rw-r--r--fs/xfs/xfs_mru_cache.c54
1 files changed, 32 insertions, 22 deletions
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index e0b358c1c533..a0b2c0a2589a 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -225,10 +225,14 @@ _xfs_mru_cache_list_insert(
225 * list need to be deleted. For each element this involves removing it from the 225 * list need to be deleted. For each element this involves removing it from the
226 * data store, removing it from the reap list, calling the client's free 226 * data store, removing it from the reap list, calling the client's free
227 * function and deleting the element from the element zone. 227 * function and deleting the element from the element zone.
228 *
229 * We get called holding the mru->lock, which we drop and then reacquire.
230 * Sparse need special help with this to tell it we know what we are doing.
228 */ 231 */
229STATIC void 232STATIC void
230_xfs_mru_cache_clear_reap_list( 233_xfs_mru_cache_clear_reap_list(
231 xfs_mru_cache_t *mru) 234 xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock)
235
232{ 236{
233 xfs_mru_cache_elem_t *elem, *next; 237 xfs_mru_cache_elem_t *elem, *next;
234 struct list_head tmp; 238 struct list_head tmp;
@@ -245,7 +249,7 @@ _xfs_mru_cache_clear_reap_list(
245 */ 249 */
246 list_move(&elem->list_node, &tmp); 250 list_move(&elem->list_node, &tmp);
247 } 251 }
248 mutex_spinunlock(&mru->lock, 0); 252 spin_unlock(&mru->lock);
249 253
250 list_for_each_entry_safe(elem, next, &tmp, list_node) { 254 list_for_each_entry_safe(elem, next, &tmp, list_node) {
251 255
@@ -259,7 +263,7 @@ _xfs_mru_cache_clear_reap_list(
259 kmem_zone_free(xfs_mru_elem_zone, elem); 263 kmem_zone_free(xfs_mru_elem_zone, elem);
260 } 264 }
261 265
262 mutex_spinlock(&mru->lock); 266 spin_lock(&mru->lock);
263} 267}
264 268
265/* 269/*
@@ -280,7 +284,7 @@ _xfs_mru_cache_reap(
280 if (!mru || !mru->lists) 284 if (!mru || !mru->lists)
281 return; 285 return;
282 286
283 mutex_spinlock(&mru->lock); 287 spin_lock(&mru->lock);
284 next = _xfs_mru_cache_migrate(mru, jiffies); 288 next = _xfs_mru_cache_migrate(mru, jiffies);
285 _xfs_mru_cache_clear_reap_list(mru); 289 _xfs_mru_cache_clear_reap_list(mru);
286 290
@@ -294,7 +298,7 @@ _xfs_mru_cache_reap(
294 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); 298 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
295 } 299 }
296 300
297 mutex_spinunlock(&mru->lock, 0); 301 spin_unlock(&mru->lock);
298} 302}
299 303
300int 304int
@@ -368,7 +372,7 @@ xfs_mru_cache_create(
368 */ 372 */
369 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); 373 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC);
370 INIT_LIST_HEAD(&mru->reap_list); 374 INIT_LIST_HEAD(&mru->reap_list);
371 spinlock_init(&mru->lock, "xfs_mru_cache"); 375 spin_lock_init(&mru->lock);
372 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); 376 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap);
373 377
374 mru->grp_time = grp_time; 378 mru->grp_time = grp_time;
@@ -398,17 +402,17 @@ xfs_mru_cache_flush(
398 if (!mru || !mru->lists) 402 if (!mru || !mru->lists)
399 return; 403 return;
400 404
401 mutex_spinlock(&mru->lock); 405 spin_lock(&mru->lock);
402 if (mru->queued) { 406 if (mru->queued) {
403 mutex_spinunlock(&mru->lock, 0); 407 spin_unlock(&mru->lock);
404 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); 408 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
405 mutex_spinlock(&mru->lock); 409 spin_lock(&mru->lock);
406 } 410 }
407 411
408 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); 412 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
409 _xfs_mru_cache_clear_reap_list(mru); 413 _xfs_mru_cache_clear_reap_list(mru);
410 414
411 mutex_spinunlock(&mru->lock, 0); 415 spin_unlock(&mru->lock);
412} 416}
413 417
414void 418void
@@ -454,13 +458,13 @@ xfs_mru_cache_insert(
454 elem->key = key; 458 elem->key = key;
455 elem->value = value; 459 elem->value = value;
456 460
457 mutex_spinlock(&mru->lock); 461 spin_lock(&mru->lock);
458 462
459 radix_tree_insert(&mru->store, key, elem); 463 radix_tree_insert(&mru->store, key, elem);
460 radix_tree_preload_end(); 464 radix_tree_preload_end();
461 _xfs_mru_cache_list_insert(mru, elem); 465 _xfs_mru_cache_list_insert(mru, elem);
462 466
463 mutex_spinunlock(&mru->lock, 0); 467 spin_unlock(&mru->lock);
464 468
465 return 0; 469 return 0;
466} 470}
@@ -483,14 +487,14 @@ xfs_mru_cache_remove(
483 if (!mru || !mru->lists) 487 if (!mru || !mru->lists)
484 return NULL; 488 return NULL;
485 489
486 mutex_spinlock(&mru->lock); 490 spin_lock(&mru->lock);
487 elem = radix_tree_delete(&mru->store, key); 491 elem = radix_tree_delete(&mru->store, key);
488 if (elem) { 492 if (elem) {
489 value = elem->value; 493 value = elem->value;
490 list_del(&elem->list_node); 494 list_del(&elem->list_node);
491 } 495 }
492 496
493 mutex_spinunlock(&mru->lock, 0); 497 spin_unlock(&mru->lock);
494 498
495 if (elem) 499 if (elem)
496 kmem_zone_free(xfs_mru_elem_zone, elem); 500 kmem_zone_free(xfs_mru_elem_zone, elem);
@@ -528,6 +532,10 @@ xfs_mru_cache_delete(
528 * 532 *
529 * If the element isn't found, this function returns NULL and the spinlock is 533 * If the element isn't found, this function returns NULL and the spinlock is
530 * released. xfs_mru_cache_done() should NOT be called when this occurs. 534 * released. xfs_mru_cache_done() should NOT be called when this occurs.
535 *
536 * Because sparse isn't smart enough to know about conditional lock return
537 * status, we need to help it get it right by annotating the path that does
538 * not release the lock.
531 */ 539 */
532void * 540void *
533xfs_mru_cache_lookup( 541xfs_mru_cache_lookup(
@@ -540,14 +548,14 @@ xfs_mru_cache_lookup(
540 if (!mru || !mru->lists) 548 if (!mru || !mru->lists)
541 return NULL; 549 return NULL;
542 550
543 mutex_spinlock(&mru->lock); 551 spin_lock(&mru->lock);
544 elem = radix_tree_lookup(&mru->store, key); 552 elem = radix_tree_lookup(&mru->store, key);
545 if (elem) { 553 if (elem) {
546 list_del(&elem->list_node); 554 list_del(&elem->list_node);
547 _xfs_mru_cache_list_insert(mru, elem); 555 _xfs_mru_cache_list_insert(mru, elem);
548 } 556 __release(mru_lock); /* help sparse not be stupid */
549 else 557 } else
550 mutex_spinunlock(&mru->lock, 0); 558 spin_unlock(&mru->lock);
551 559
552 return elem ? elem->value : NULL; 560 return elem ? elem->value : NULL;
553} 561}
@@ -571,10 +579,12 @@ xfs_mru_cache_peek(
571 if (!mru || !mru->lists) 579 if (!mru || !mru->lists)
572 return NULL; 580 return NULL;
573 581
574 mutex_spinlock(&mru->lock); 582 spin_lock(&mru->lock);
575 elem = radix_tree_lookup(&mru->store, key); 583 elem = radix_tree_lookup(&mru->store, key);
576 if (!elem) 584 if (!elem)
577 mutex_spinunlock(&mru->lock, 0); 585 spin_unlock(&mru->lock);
586 else
587 __release(mru_lock); /* help sparse not be stupid */
578 588
579 return elem ? elem->value : NULL; 589 return elem ? elem->value : NULL;
580} 590}
@@ -586,7 +596,7 @@ xfs_mru_cache_peek(
586 */ 596 */
587void 597void
588xfs_mru_cache_done( 598xfs_mru_cache_done(
589 xfs_mru_cache_t *mru) 599 xfs_mru_cache_t *mru) __releases(mru->lock)
590{ 600{
591 mutex_spinunlock(&mru->lock, 0); 601 spin_unlock(&mru->lock);
592} 602}