aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext3/balloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext3/balloc.c')
-rw-r--r--fs/ext3/balloc.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 063d994bda0b..e6b983707008 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -74,7 +74,7 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
74} 74}
75 75
76/* 76/*
77 * Read the bitmap for a given block_group, reading into the specified 77 * Read the bitmap for a given block_group, reading into the specified
78 * slot in the superblock's bitmap cache. 78 * slot in the superblock's bitmap cache.
79 * 79 *
80 * Return buffer_head on success or NULL in case of failure. 80 * Return buffer_head on success or NULL in case of failure.
@@ -419,8 +419,8 @@ do_more:
419 } 419 }
420 /* @@@ This prevents newly-allocated data from being 420 /* @@@ This prevents newly-allocated data from being
421 * freed and then reallocated within the same 421 * freed and then reallocated within the same
422 * transaction. 422 * transaction.
423 * 423 *
424 * Ideally we would want to allow that to happen, but to 424 * Ideally we would want to allow that to happen, but to
425 * do so requires making journal_forget() capable of 425 * do so requires making journal_forget() capable of
426 * revoking the queued write of a data block, which 426 * revoking the queued write of a data block, which
@@ -433,7 +433,7 @@ do_more:
433 * safe not to set the allocation bit in the committed 433 * safe not to set the allocation bit in the committed
434 * bitmap, because we know that there is no outstanding 434 * bitmap, because we know that there is no outstanding
435 * activity on the buffer any more and so it is safe to 435 * activity on the buffer any more and so it is safe to
436 * reallocate it. 436 * reallocate it.
437 */ 437 */
438 BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); 438 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
439 J_ASSERT_BH(bitmap_bh, 439 J_ASSERT_BH(bitmap_bh,
@@ -518,7 +518,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
518 * data would allow the old block to be overwritten before the 518 * data would allow the old block to be overwritten before the
519 * transaction committed (because we force data to disk before commit). 519 * transaction committed (because we force data to disk before commit).
520 * This would lead to corruption if we crashed between overwriting the 520 * This would lead to corruption if we crashed between overwriting the
521 * data and committing the delete. 521 * data and committing the delete.
522 * 522 *
523 * @@@ We may want to make this allocation behaviour conditional on 523 * @@@ We may want to make this allocation behaviour conditional on
524 * data-writes at some point, and disable it for metadata allocations or 524 * data-writes at some point, and disable it for metadata allocations or
@@ -584,7 +584,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
584 584
585 if (start > 0) { 585 if (start > 0) {
586 /* 586 /*
587 * The goal was occupied; search forward for a free 587 * The goal was occupied; search forward for a free
588 * block within the next XX blocks. 588 * block within the next XX blocks.
589 * 589 *
590 * end_goal is more or less random, but it has to be 590 * end_goal is more or less random, but it has to be
@@ -1194,7 +1194,7 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1194/* 1194/*
1195 * ext3_new_block uses a goal block to assist allocation. If the goal is 1195 * ext3_new_block uses a goal block to assist allocation. If the goal is
1196 * free, or there is a free block within 32 blocks of the goal, that block 1196 * free, or there is a free block within 32 blocks of the goal, that block
1197 * is allocated. Otherwise a forward search is made for a free block; within 1197 * is allocated. Otherwise a forward search is made for a free block; within
1198 * each block group the search first looks for an entire free byte in the block 1198 * each block group the search first looks for an entire free byte in the block
1199 * bitmap, and then for any free bit if that fails. 1199 * bitmap, and then for any free bit if that fails.
1200 * This function also updates quota and i_blocks field. 1200 * This function also updates quota and i_blocks field.
@@ -1303,7 +1303,7 @@ retry_alloc:
1303 smp_rmb(); 1303 smp_rmb();
1304 1304
1305 /* 1305 /*
1306 * Now search the rest of the groups. We assume that 1306 * Now search the rest of the groups. We assume that
1307 * i and gdp correctly point to the last group visited. 1307 * i and gdp correctly point to the last group visited.
1308 */ 1308 */
1309 for (bgi = 0; bgi < ngroups; bgi++) { 1309 for (bgi = 0; bgi < ngroups; bgi++) {