aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2009-05-13 15:13:42 -0400
committerTheodore Ts'o <tytso@mit.edu>2009-05-13 15:13:42 -0400
commit79ffab34391933ee3b95dac7f25c0478fa2f8f1e (patch)
tree8bc139928e172ef2ebd38e01f97dc01f886d8526
parent9fa7eb283c5cdc2b0f4a8cfe6387ed82e5e9a3d3 (diff)
ext4: Properly initialize the buffer_head state
These struct buffer_heads are allocated on the stack (and hence are initialized with stack garbage). They are only used to call a get_blocks() function, so that's mostly OK, but b_state must be initialized to be 0 so we don't have any unexpected BH_* flags set by accident, such as BH_Unwritten or BH_Delay. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/inode.c15
-rw-r--r--fs/mpage.c6
3 files changed, 19 insertions, 3 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e3a55eb8b26a..a953214f2829 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3150,6 +3150,7 @@ retry:
3150 ret = PTR_ERR(handle); 3150 ret = PTR_ERR(handle);
3151 break; 3151 break;
3152 } 3152 }
3153 map_bh.b_state = 0;
3153 ret = ext4_get_blocks_wrap(handle, inode, block, 3154 ret = ext4_get_blocks_wrap(handle, inode, block,
3154 max_blocks, &map_bh, 3155 max_blocks, &map_bh,
3155 EXT4_CREATE_UNINITIALIZED_EXT, 0, 0); 3156 EXT4_CREATE_UNINITIALIZED_EXT, 0, 0);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2a9ffd528dd1..d7ad0bb73cd5 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2055,7 +2055,20 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2055 if ((mpd->b_state & (1 << BH_Mapped)) && 2055 if ((mpd->b_state & (1 << BH_Mapped)) &&
2056 !(mpd->b_state & (1 << BH_Delay))) 2056 !(mpd->b_state & (1 << BH_Delay)))
2057 return 0; 2057 return 0;
2058 new.b_state = mpd->b_state; 2058 /*
2059 * We need to make sure the BH_Delay flag is passed down to
2060 * ext4_da_get_block_write(), since it calls
2061 * ext4_get_blocks_wrap() with the EXT4_DELALLOC_RSVED flag.
2062 * This flag causes ext4_get_blocks_wrap() to call
2063 * ext4_da_update_reserve_space() if the passed buffer head
2064 * has the BH_Delay flag set. In the future, once we clean up
2065 * the interfaces to ext4_get_blocks_wrap(), we should pass in
2066 * a separate flag which requests that the delayed allocation
2067 * statistics should be updated, instead of depending on the
2068 * state information getting passed down via the map_bh's
2069 * state bitmasks plus the magic EXT4_DELALLOC_RSVED flag.
2070 */
2071 new.b_state = mpd->b_state & (1 << BH_Delay);
2059 new.b_blocknr = 0; 2072 new.b_blocknr = 0;
2060 new.b_size = mpd->b_size; 2073 new.b_size = mpd->b_size;
2061 next = mpd->b_blocknr; 2074 next = mpd->b_blocknr;
diff --git a/fs/mpage.c b/fs/mpage.c
index 680ba60863ff..42381bd6543b 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -379,7 +379,8 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
379 struct buffer_head map_bh; 379 struct buffer_head map_bh;
380 unsigned long first_logical_block = 0; 380 unsigned long first_logical_block = 0;
381 381
382 clear_buffer_mapped(&map_bh); 382 map_bh.b_state = 0;
383 map_bh.b_size = 0;
383 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 384 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
384 struct page *page = list_entry(pages->prev, struct page, lru); 385 struct page *page = list_entry(pages->prev, struct page, lru);
385 386
@@ -412,7 +413,8 @@ int mpage_readpage(struct page *page, get_block_t get_block)
412 struct buffer_head map_bh; 413 struct buffer_head map_bh;
413 unsigned long first_logical_block = 0; 414 unsigned long first_logical_block = 0;
414 415
415 clear_buffer_mapped(&map_bh); 416 map_bh.b_state = 0;
417 map_bh.b_size = 0;
416 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, 418 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
417 &map_bh, &first_logical_block, get_block); 419 &map_bh, &first_logical_block, get_block);
418 if (bio) 420 if (bio)