aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2010-10-27 21:30:10 -0400
committerTheodore Ts'o <tytso@mit.edu>2010-10-27 21:30:10 -0400
commit1de3e3df917459422cb2aecac440febc8879d410 (patch)
tree3ff2584fa323ec855dec74b7e842e5e4abe39597 /fs/ext4/inode.c
parent3ecdb3a193a5f224f084c04a63aa28cdccf4d7d0 (diff)
ext4: move mpage_put_bnr_to_bhs()'s functionality to mpage_da_submit_io()
This massively simplifies the ext4_da_writepages() code path by completely removing mpage_put_bnr_bhs(), which is almost 100 lines of code iterating over a set of pages using pagevec_lookup(), and folds that functionality into mpage_da_submit_io()'s existing pagevec_lookup() loop. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c139
1 files changed, 38 insertions, 101 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5da6cfcecd83..c65d647378f9 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2003,7 +2003,8 @@ static void ext4_da_page_release_reservation(struct page *page,
2003 * 2003 *
2004 * As pages are already locked by write_cache_pages(), we can't use it 2004 * As pages are already locked by write_cache_pages(), we can't use it
2005 */ 2005 */
2006static int mpage_da_submit_io(struct mpage_da_data *mpd) 2006static int mpage_da_submit_io(struct mpage_da_data *mpd,
2007 struct ext4_map_blocks *map)
2007{ 2008{
2008 struct pagevec pvec; 2009 struct pagevec pvec;
2009 unsigned long index, end; 2010 unsigned long index, end;
@@ -2014,6 +2015,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
2014 unsigned int len, block_start; 2015 unsigned int len, block_start;
2015 struct buffer_head *bh, *page_bufs = NULL; 2016 struct buffer_head *bh, *page_bufs = NULL;
2016 int journal_data = ext4_should_journal_data(inode); 2017 int journal_data = ext4_should_journal_data(inode);
2018 sector_t pblock = 0, cur_logical = 0;
2017 2019
2018 BUG_ON(mpd->next_page <= mpd->first_page); 2020 BUG_ON(mpd->next_page <= mpd->first_page);
2019 /* 2021 /*
@@ -2031,7 +2033,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
2031 if (nr_pages == 0) 2033 if (nr_pages == 0)
2032 break; 2034 break;
2033 for (i = 0; i < nr_pages; i++) { 2035 for (i = 0; i < nr_pages; i++) {
2034 int commit_write = 0; 2036 int commit_write = 0, redirty_page = 0;
2035 struct page *page = pvec.pages[i]; 2037 struct page *page = pvec.pages[i];
2036 2038
2037 index = page->index; 2039 index = page->index;
@@ -2042,6 +2044,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
2042 len = size & ~PAGE_CACHE_MASK; 2044 len = size & ~PAGE_CACHE_MASK;
2043 else 2045 else
2044 len = PAGE_CACHE_SIZE; 2046 len = PAGE_CACHE_SIZE;
2047 if (map) {
2048 cur_logical = index << (PAGE_CACHE_SHIFT -
2049 inode->i_blkbits);
2050 pblock = map->m_pblk + (cur_logical -
2051 map->m_lblk);
2052 }
2045 index++; 2053 index++;
2046 2054
2047 BUG_ON(!PageLocked(page)); 2055 BUG_ON(!PageLocked(page));
@@ -2068,13 +2076,34 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
2068 bh = page_bufs = page_buffers(page); 2076 bh = page_bufs = page_buffers(page);
2069 block_start = 0; 2077 block_start = 0;
2070 do { 2078 do {
2071 /* redirty page if block allocation undone */ 2079 if (!bh)
2072 if (!bh || buffer_delay(bh) ||
2073 buffer_unwritten(bh))
2074 goto redirty_page; 2080 goto redirty_page;
2081 if (map && (cur_logical >= map->m_lblk) &&
2082 (cur_logical <= (map->m_lblk +
2083 (map->m_len - 1)))) {
2084 if (buffer_delay(bh)) {
2085 clear_buffer_delay(bh);
2086 bh->b_blocknr = pblock;
2087 }
2088 if (buffer_unwritten(bh) ||
2089 buffer_mapped(bh))
2090 BUG_ON(bh->b_blocknr != pblock);
2091 if (map->m_flags & EXT4_MAP_UNINIT)
2092 set_buffer_uninit(bh);
2093 clear_buffer_unwritten(bh);
2094 }
2095
2096 /* redirty page if block allocation undone */
2097 if (buffer_delay(bh) || buffer_unwritten(bh))
2098 redirty_page = 1;
2075 bh = bh->b_this_page; 2099 bh = bh->b_this_page;
2076 block_start += bh->b_size; 2100 block_start += bh->b_size;
2077 } while ((bh != page_bufs) && (block_start < len)); 2101 cur_logical++;
2102 pblock++;
2103 } while (bh != page_bufs);
2104
2105 if (redirty_page)
2106 goto redirty_page;
2078 2107
2079 if (commit_write) 2108 if (commit_write)
2080 /* mark the buffer_heads as dirty & uptodate */ 2109 /* mark the buffer_heads as dirty & uptodate */
@@ -2105,91 +2134,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
2105 return ret; 2134 return ret;
2106} 2135}
2107 2136
2108/*
2109 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
2110 *
2111 * the function goes through all passed space and put actual disk
2112 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
2113 */
2114static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd,
2115 struct ext4_map_blocks *map)
2116{
2117 struct inode *inode = mpd->inode;
2118 struct address_space *mapping = inode->i_mapping;
2119 int blocks = map->m_len;
2120 sector_t pblock = map->m_pblk, cur_logical;
2121 struct buffer_head *head, *bh;
2122 pgoff_t index, end;
2123 struct pagevec pvec;
2124 int nr_pages, i;
2125
2126 index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2127 end = (map->m_lblk + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2128 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2129
2130 pagevec_init(&pvec, 0);
2131
2132 while (index <= end) {
2133 /* XXX: optimize tail */
2134 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2135 if (nr_pages == 0)
2136 break;
2137 for (i = 0; i < nr_pages; i++) {
2138 struct page *page = pvec.pages[i];
2139
2140 index = page->index;
2141 if (index > end)
2142 break;
2143 index++;
2144
2145 BUG_ON(!PageLocked(page));
2146 BUG_ON(PageWriteback(page));
2147 BUG_ON(!page_has_buffers(page));
2148
2149 bh = page_buffers(page);
2150 head = bh;
2151
2152 /* skip blocks out of the range */
2153 do {
2154 if (cur_logical >= map->m_lblk)
2155 break;
2156 cur_logical++;
2157 } while ((bh = bh->b_this_page) != head);
2158
2159 do {
2160 if (cur_logical > map->m_lblk + (blocks - 1))
2161 break;
2162
2163 if (buffer_delay(bh) || buffer_unwritten(bh)) {
2164
2165 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
2166
2167 if (buffer_delay(bh)) {
2168 clear_buffer_delay(bh);
2169 bh->b_blocknr = pblock;
2170 } else {
2171 /*
2172 * unwritten already should have
2173 * blocknr assigned. Verify that
2174 */
2175 clear_buffer_unwritten(bh);
2176 BUG_ON(bh->b_blocknr != pblock);
2177 }
2178
2179 } else if (buffer_mapped(bh))
2180 BUG_ON(bh->b_blocknr != pblock);
2181
2182 if (map->m_flags & EXT4_MAP_UNINIT)
2183 set_buffer_uninit(bh);
2184 cur_logical++;
2185 pblock++;
2186 } while ((bh = bh->b_this_page) != head);
2187 }
2188 pagevec_release(&pvec);
2189 }
2190}
2191
2192
2193static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2137static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2194 sector_t logical, long blk_cnt) 2138 sector_t logical, long blk_cnt)
2195{ 2139{
@@ -2252,7 +2196,7 @@ static void ext4_print_free_blocks(struct inode *inode)
2252static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 2196static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2253{ 2197{
2254 int err, blks, get_blocks_flags; 2198 int err, blks, get_blocks_flags;
2255 struct ext4_map_blocks map; 2199 struct ext4_map_blocks map, *mapp = NULL;
2256 sector_t next = mpd->b_blocknr; 2200 sector_t next = mpd->b_blocknr;
2257 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2201 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2258 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2202 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
@@ -2343,6 +2287,7 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2343 } 2287 }
2344 BUG_ON(blks == 0); 2288 BUG_ON(blks == 0);
2345 2289
2290 mapp = &map;
2346 if (map.m_flags & EXT4_MAP_NEW) { 2291 if (map.m_flags & EXT4_MAP_NEW) {
2347 struct block_device *bdev = mpd->inode->i_sb->s_bdev; 2292 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
2348 int i; 2293 int i;
@@ -2351,14 +2296,6 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2351 unmap_underlying_metadata(bdev, map.m_pblk + i); 2296 unmap_underlying_metadata(bdev, map.m_pblk + i);
2352 } 2297 }
2353 2298
2354 /*
2355 * If blocks are delayed marked, we need to
2356 * put actual blocknr and drop delayed bit
2357 */
2358 if ((mpd->b_state & (1 << BH_Delay)) ||
2359 (mpd->b_state & (1 << BH_Unwritten)))
2360 mpage_put_bnr_to_bhs(mpd, &map);
2361
2362 if (ext4_should_order_data(mpd->inode)) { 2299 if (ext4_should_order_data(mpd->inode)) {
2363 err = ext4_jbd2_file_inode(handle, mpd->inode); 2300 err = ext4_jbd2_file_inode(handle, mpd->inode);
2364 if (err) 2301 if (err)
@@ -2382,7 +2319,7 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2382 } 2319 }
2383 2320
2384submit_io: 2321submit_io:
2385 mpage_da_submit_io(mpd); 2322 mpage_da_submit_io(mpd, mapp);
2386 mpd->io_done = 1; 2323 mpd->io_done = 1;
2387} 2324}
2388 2325