diff options
author | Theodore Ts'o <tytso@mit.edu> | 2009-05-14 09:29:45 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2009-05-14 09:29:45 -0400 |
commit | 2fa3cdfb319055fd8b25abdafa413e16f00ad493 (patch) | |
tree | c85e56fa92eecc163a85084d11aef356a686c9b9 /fs/ext4/inode.c | |
parent | a2dc52b5d1d8cc280b3e795abf1c80ac8c49f30c (diff) |
ext4: Merge ext4_da_get_block_write() into mpage_da_map_blocks()
The static function ext4_da_get_block_write() was only used by
mpage_da_map_blocks(). So to simplify the code, merge that function
into mpage_da_map_blocks().
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 110 |
1 files changed, 43 insertions, 67 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e6113c3a126f..bfe50a22363b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -2006,57 +2006,6 @@ static void ext4_print_free_blocks(struct inode *inode) | |||
2006 | } | 2006 | } |
2007 | 2007 | ||
2008 | /* | 2008 | /* |
2009 | * This function is used by mpage_da_map_blocks(). We separate it out | ||
2010 | * as a separate function just to make life easier, and because | ||
2011 | * mpage_da_map_blocks() used to be a generic function that took a | ||
2012 | * get_block_t. | ||
2013 | */ | ||
2014 | static int ext4_da_get_block_write(struct inode *inode, sector_t iblock, | ||
2015 | struct buffer_head *bh_result) | ||
2016 | { | ||
2017 | int ret; | ||
2018 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | ||
2019 | loff_t disksize = EXT4_I(inode)->i_disksize; | ||
2020 | handle_t *handle = NULL; | ||
2021 | |||
2022 | handle = ext4_journal_current_handle(); | ||
2023 | BUG_ON(!handle); | ||
2024 | ret = ext4_get_blocks(handle, inode, iblock, max_blocks, | ||
2025 | bh_result, EXT4_GET_BLOCKS_CREATE| | ||
2026 | EXT4_GET_BLOCKS_DELALLOC_RESERVE); | ||
2027 | if (ret <= 0) | ||
2028 | return ret; | ||
2029 | |||
2030 | bh_result->b_size = (ret << inode->i_blkbits); | ||
2031 | |||
2032 | if (ext4_should_order_data(inode)) { | ||
2033 | int retval; | ||
2034 | retval = ext4_jbd2_file_inode(handle, inode); | ||
2035 | if (retval) | ||
2036 | /* | ||
2037 | * Failed to add inode for ordered mode. Don't | ||
2038 | * update file size | ||
2039 | */ | ||
2040 | return retval; | ||
2041 | } | ||
2042 | |||
2043 | /* | ||
2044 | * Update on-disk size along with block allocation we don't | ||
2045 | * use EXT4_GET_BLOCKS_EXTEND_DISKSIZE as size may change | ||
2046 | * within already allocated block -bzzz | ||
2047 | */ | ||
2048 | disksize = ((loff_t) iblock + ret) << inode->i_blkbits; | ||
2049 | if (disksize > i_size_read(inode)) | ||
2050 | disksize = i_size_read(inode); | ||
2051 | if (disksize > EXT4_I(inode)->i_disksize) { | ||
2052 | ext4_update_i_disksize(inode, disksize); | ||
2053 | ret = ext4_mark_inode_dirty(handle, inode); | ||
2054 | return ret; | ||
2055 | } | ||
2056 | return 0; | ||
2057 | } | ||
2058 | |||
2059 | /* | ||
2060 | * mpage_da_map_blocks - go through given space | 2009 | * mpage_da_map_blocks - go through given space |
2061 | * | 2010 | * |
2062 | * @mpd - bh describing space | 2011 | * @mpd - bh describing space |
@@ -2066,9 +2015,12 @@ static int ext4_da_get_block_write(struct inode *inode, sector_t iblock, | |||
2066 | */ | 2015 | */ |
2067 | static int mpage_da_map_blocks(struct mpage_da_data *mpd) | 2016 | static int mpage_da_map_blocks(struct mpage_da_data *mpd) |
2068 | { | 2017 | { |
2069 | int err = 0; | 2018 | int err, blks; |
2070 | struct buffer_head new; | 2019 | struct buffer_head new; |
2071 | sector_t next; | 2020 | sector_t next = mpd->b_blocknr; |
2021 | unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; | ||
2022 | loff_t disksize = EXT4_I(mpd->inode)->i_disksize; | ||
2023 | handle_t *handle = NULL; | ||
2072 | 2024 | ||
2073 | /* | 2025 | /* |
2074 | * We consider only non-mapped and non-allocated blocks | 2026 | * We consider only non-mapped and non-allocated blocks |
@@ -2077,6 +2029,16 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
2077 | !(mpd->b_state & (1 << BH_Delay)) && | 2029 | !(mpd->b_state & (1 << BH_Delay)) && |
2078 | !(mpd->b_state & (1 << BH_Unwritten))) | 2030 | !(mpd->b_state & (1 << BH_Unwritten))) |
2079 | return 0; | 2031 | return 0; |
2032 | |||
2033 | /* | ||
2034 | * If we didn't accumulate anything to write simply return | ||
2035 | */ | ||
2036 | if (!mpd->b_size) | ||
2037 | return 0; | ||
2038 | |||
2039 | handle = ext4_journal_current_handle(); | ||
2040 | BUG_ON(!handle); | ||
2041 | |||
2080 | /* | 2042 | /* |
2081 | * We need to make sure the BH_Delay flag is passed down to | 2043 | * We need to make sure the BH_Delay flag is passed down to |
2082 | * ext4_da_get_block_write(), since it calls ext4_get_blocks() | 2044 | * ext4_da_get_block_write(), since it calls ext4_get_blocks() |
@@ -2092,18 +2054,11 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
2092 | * EXT4_GET_BLOCKS_DELALLOC_RESERVE flag. | 2054 | * EXT4_GET_BLOCKS_DELALLOC_RESERVE flag. |
2093 | */ | 2055 | */ |
2094 | new.b_state = mpd->b_state & (1 << BH_Delay); | 2056 | new.b_state = mpd->b_state & (1 << BH_Delay); |
2095 | new.b_blocknr = 0; | 2057 | blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, |
2096 | new.b_size = mpd->b_size; | 2058 | &new, EXT4_GET_BLOCKS_CREATE| |
2097 | next = mpd->b_blocknr; | 2059 | EXT4_GET_BLOCKS_DELALLOC_RESERVE); |
2098 | /* | 2060 | if (blks < 0) { |
2099 | * If we didn't accumulate anything | 2061 | err = blks; |
2100 | * to write simply return | ||
2101 | */ | ||
2102 | if (!new.b_size) | ||
2103 | return 0; | ||
2104 | |||
2105 | err = ext4_da_get_block_write(mpd->inode, next, &new); | ||
2106 | if (err) { | ||
2107 | /* | 2062 | /* |
2108 | * If get block returns with error we simply | 2063 | * If get block returns with error we simply |
2109 | * return. Later writepage will redirty the page and | 2064 | * return. Later writepage will redirty the page and |
@@ -2136,12 +2091,14 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
2136 | if (err == -ENOSPC) { | 2091 | if (err == -ENOSPC) { |
2137 | ext4_print_free_blocks(mpd->inode); | 2092 | ext4_print_free_blocks(mpd->inode); |
2138 | } | 2093 | } |
2139 | /* invlaidate all the pages */ | 2094 | /* invalidate all the pages */ |
2140 | ext4_da_block_invalidatepages(mpd, next, | 2095 | ext4_da_block_invalidatepages(mpd, next, |
2141 | mpd->b_size >> mpd->inode->i_blkbits); | 2096 | mpd->b_size >> mpd->inode->i_blkbits); |
2142 | return err; | 2097 | return err; |
2143 | } | 2098 | } |
2144 | BUG_ON(new.b_size == 0); | 2099 | BUG_ON(blks == 0); |
2100 | |||
2101 | new.b_size = (blks << mpd->inode->i_blkbits); | ||
2145 | 2102 | ||
2146 | if (buffer_new(&new)) | 2103 | if (buffer_new(&new)) |
2147 | __unmap_underlying_blocks(mpd->inode, &new); | 2104 | __unmap_underlying_blocks(mpd->inode, &new); |
@@ -2154,6 +2111,25 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
2154 | (mpd->b_state & (1 << BH_Unwritten))) | 2111 | (mpd->b_state & (1 << BH_Unwritten))) |
2155 | mpage_put_bnr_to_bhs(mpd, next, &new); | 2112 | mpage_put_bnr_to_bhs(mpd, next, &new); |
2156 | 2113 | ||
2114 | if (ext4_should_order_data(mpd->inode)) { | ||
2115 | err = ext4_jbd2_file_inode(handle, mpd->inode); | ||
2116 | if (err) | ||
2117 | return err; | ||
2118 | } | ||
2119 | |||
2120 | /* | ||
2121 | * Update on-disk size along with block allocation we don't | ||
2122 | * use EXT4_GET_BLOCKS_EXTEND_DISKSIZE as size may change | ||
2123 | * within already allocated block -bzzz | ||
2124 | */ | ||
2125 | disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; | ||
2126 | if (disksize > i_size_read(mpd->inode)) | ||
2127 | disksize = i_size_read(mpd->inode); | ||
2128 | if (disksize > EXT4_I(mpd->inode)->i_disksize) { | ||
2129 | ext4_update_i_disksize(mpd->inode, disksize); | ||
2130 | return ext4_mark_inode_dirty(handle, mpd->inode); | ||
2131 | } | ||
2132 | |||
2157 | return 0; | 2133 | return 0; |
2158 | } | 2134 | } |
2159 | 2135 | ||