aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-09-21 20:47:20 -0400
committerAlex Elder <aelder@sgi.com>2010-10-18 16:07:49 -0400
commit26af655233dd486659235f3049959d2f7dafc5a1 (patch)
tree590b4976871582d6cad34880705a9a4021e204f5 /fs/xfs/linux-2.6/xfs_buf.c
parentebad861b5702c3e2332a3e906978f47144d22f70 (diff)
xfs: kill XBF_FS_MANAGED buffers
Filesystem level managed buffers are buffers that have their lifecycle controlled by the filesystem layer, not the buffer cache. We currently cache these buffers, which makes cleanup and cache walking somewhat troublesome. Convert the fs managed buffers to uncached buffers obtained by via xfs_buf_get_uncached(), and remove the XBF_FS_MANAGED special cases from the buffer cache. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c20
1 files changed, 4 insertions, 16 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d6928970097f..975d6589394a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -826,8 +826,6 @@ xfs_buf_rele(
826 atomic_inc(&bp->b_hold); 826 atomic_inc(&bp->b_hold);
827 spin_unlock(&hash->bh_lock); 827 spin_unlock(&hash->bh_lock);
828 (*(bp->b_relse)) (bp); 828 (*(bp->b_relse)) (bp);
829 } else if (bp->b_flags & XBF_FS_MANAGED) {
830 spin_unlock(&hash->bh_lock);
831 } else { 829 } else {
832 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); 830 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
833 list_del_init(&bp->b_hash_list); 831 list_del_init(&bp->b_hash_list);
@@ -1433,26 +1431,16 @@ void
1433xfs_wait_buftarg( 1431xfs_wait_buftarg(
1434 xfs_buftarg_t *btp) 1432 xfs_buftarg_t *btp)
1435{ 1433{
1436 xfs_buf_t *bp, *n;
1437 xfs_bufhash_t *hash; 1434 xfs_bufhash_t *hash;
1438 uint i; 1435 uint i;
1439 1436
1440 for (i = 0; i < (1 << btp->bt_hashshift); i++) { 1437 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1441 hash = &btp->bt_hash[i]; 1438 hash = &btp->bt_hash[i];
1442again:
1443 spin_lock(&hash->bh_lock); 1439 spin_lock(&hash->bh_lock);
1444 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { 1440 while (!list_empty(&hash->bh_list)) {
1445 ASSERT(btp == bp->b_target); 1441 spin_unlock(&hash->bh_lock);
1446 if (!(bp->b_flags & XBF_FS_MANAGED)) { 1442 delay(100);
1447 spin_unlock(&hash->bh_lock); 1443 spin_lock(&hash->bh_lock);
1448 /*
1449 * Catch superblock reference count leaks
1450 * immediately
1451 */
1452 BUG_ON(bp->b_bn == 0);
1453 delay(100);
1454 goto again;
1455 }
1456 } 1444 }
1457 spin_unlock(&hash->bh_lock); 1445 spin_unlock(&hash->bh_lock);
1458 } 1446 }