diff options
Diffstat (limited to 'fs/xfs/xfs_mru_cache.c')
-rw-r--r-- | fs/xfs/xfs_mru_cache.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index 012209eda35d..a0b2c0a2589a 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c | |||
@@ -225,10 +225,14 @@ _xfs_mru_cache_list_insert( | |||
225 | * list need to be deleted. For each element this involves removing it from the | 225 | * list need to be deleted. For each element this involves removing it from the |
226 | * data store, removing it from the reap list, calling the client's free | 226 | * data store, removing it from the reap list, calling the client's free |
227 | * function and deleting the element from the element zone. | 227 | * function and deleting the element from the element zone. |
228 | * | ||
229 | * We get called holding the mru->lock, which we drop and then reacquire. | ||
230 | * Sparse need special help with this to tell it we know what we are doing. | ||
228 | */ | 231 | */ |
229 | STATIC void | 232 | STATIC void |
230 | _xfs_mru_cache_clear_reap_list( | 233 | _xfs_mru_cache_clear_reap_list( |
231 | xfs_mru_cache_t *mru) | 234 | xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock) |
235 | |||
232 | { | 236 | { |
233 | xfs_mru_cache_elem_t *elem, *next; | 237 | xfs_mru_cache_elem_t *elem, *next; |
234 | struct list_head tmp; | 238 | struct list_head tmp; |
@@ -528,6 +532,10 @@ xfs_mru_cache_delete( | |||
528 | * | 532 | * |
529 | * If the element isn't found, this function returns NULL and the spinlock is | 533 | * If the element isn't found, this function returns NULL and the spinlock is |
530 | * released. xfs_mru_cache_done() should NOT be called when this occurs. | 534 | * released. xfs_mru_cache_done() should NOT be called when this occurs. |
535 | * | ||
536 | * Because sparse isn't smart enough to know about conditional lock return | ||
537 | * status, we need to help it get it right by annotating the path that does | ||
538 | * not release the lock. | ||
531 | */ | 539 | */ |
532 | void * | 540 | void * |
533 | xfs_mru_cache_lookup( | 541 | xfs_mru_cache_lookup( |
@@ -545,8 +553,8 @@ xfs_mru_cache_lookup( | |||
545 | if (elem) { | 553 | if (elem) { |
546 | list_del(&elem->list_node); | 554 | list_del(&elem->list_node); |
547 | _xfs_mru_cache_list_insert(mru, elem); | 555 | _xfs_mru_cache_list_insert(mru, elem); |
548 | } | 556 | __release(mru_lock); /* help sparse not be stupid */ |
549 | else | 557 | } else |
550 | spin_unlock(&mru->lock); | 558 | spin_unlock(&mru->lock); |
551 | 559 | ||
552 | return elem ? elem->value : NULL; | 560 | return elem ? elem->value : NULL; |
@@ -575,6 +583,8 @@ xfs_mru_cache_peek( | |||
575 | elem = radix_tree_lookup(&mru->store, key); | 583 | elem = radix_tree_lookup(&mru->store, key); |
576 | if (!elem) | 584 | if (!elem) |
577 | spin_unlock(&mru->lock); | 585 | spin_unlock(&mru->lock); |
586 | else | ||
587 | __release(mru_lock); /* help sparse not be stupid */ | ||
578 | 588 | ||
579 | return elem ? elem->value : NULL; | 589 | return elem ? elem->value : NULL; |
580 | } | 590 | } |
@@ -586,7 +596,7 @@ xfs_mru_cache_peek( | |||
586 | */ | 596 | */ |
587 | void | 597 | void |
588 | xfs_mru_cache_done( | 598 | xfs_mru_cache_done( |
589 | xfs_mru_cache_t *mru) | 599 | xfs_mru_cache_t *mru) __releases(mru->lock) |
590 | { | 600 | { |
591 | spin_unlock(&mru->lock); | 601 | spin_unlock(&mru->lock); |
592 | } | 602 | } |