diff options
Diffstat (limited to 'fs/mpage.c')
-rw-r--r-- | fs/mpage.c | 104 |
1 files changed, 86 insertions, 18 deletions
diff --git a/fs/mpage.c b/fs/mpage.c index e431cb3878d6..9bf2eb30e6f4 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -163,9 +163,19 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | |||
163 | } while (page_bh != head); | 163 | } while (page_bh != head); |
164 | } | 164 | } |
165 | 165 | ||
166 | /* | ||
167 | * This is the worker routine which does all the work of mapping the disk | ||
168 | * blocks and constructs largest possible bios, submits them for IO if the | ||
169 | * blocks are not contiguous on the disk. | ||
170 | * | ||
171 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to | ||
172 | * represent the validity of its disk mapping and to decide when to do the next | ||
173 | * get_block() call. | ||
174 | */ | ||
166 | static struct bio * | 175 | static struct bio * |
167 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | 176 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, |
168 | sector_t *last_block_in_bio, get_block_t get_block) | 177 | sector_t *last_block_in_bio, struct buffer_head *map_bh, |
178 | unsigned long *first_logical_block, get_block_t get_block) | ||
169 | { | 179 | { |
170 | struct inode *inode = page->mapping->host; | 180 | struct inode *inode = page->mapping->host; |
171 | const unsigned blkbits = inode->i_blkbits; | 181 | const unsigned blkbits = inode->i_blkbits; |
@@ -173,33 +183,72 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
173 | const unsigned blocksize = 1 << blkbits; | 183 | const unsigned blocksize = 1 << blkbits; |
174 | sector_t block_in_file; | 184 | sector_t block_in_file; |
175 | sector_t last_block; | 185 | sector_t last_block; |
186 | sector_t last_block_in_file; | ||
176 | sector_t blocks[MAX_BUF_PER_PAGE]; | 187 | sector_t blocks[MAX_BUF_PER_PAGE]; |
177 | unsigned page_block; | 188 | unsigned page_block; |
178 | unsigned first_hole = blocks_per_page; | 189 | unsigned first_hole = blocks_per_page; |
179 | struct block_device *bdev = NULL; | 190 | struct block_device *bdev = NULL; |
180 | struct buffer_head bh; | ||
181 | int length; | 191 | int length; |
182 | int fully_mapped = 1; | 192 | int fully_mapped = 1; |
193 | unsigned nblocks; | ||
194 | unsigned relative_block; | ||
183 | 195 | ||
184 | if (page_has_buffers(page)) | 196 | if (page_has_buffers(page)) |
185 | goto confused; | 197 | goto confused; |
186 | 198 | ||
187 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 199 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); |
188 | last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; | 200 | last_block = block_in_file + nr_pages * blocks_per_page; |
201 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | ||
202 | if (last_block > last_block_in_file) | ||
203 | last_block = last_block_in_file; | ||
204 | page_block = 0; | ||
205 | |||
206 | /* | ||
207 | * Map blocks using the result from the previous get_blocks call first. | ||
208 | */ | ||
209 | nblocks = map_bh->b_size >> blkbits; | ||
210 | if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && | ||
211 | block_in_file < (*first_logical_block + nblocks)) { | ||
212 | unsigned map_offset = block_in_file - *first_logical_block; | ||
213 | unsigned last = nblocks - map_offset; | ||
214 | |||
215 | for (relative_block = 0; ; relative_block++) { | ||
216 | if (relative_block == last) { | ||
217 | clear_buffer_mapped(map_bh); | ||
218 | break; | ||
219 | } | ||
220 | if (page_block == blocks_per_page) | ||
221 | break; | ||
222 | blocks[page_block] = map_bh->b_blocknr + map_offset + | ||
223 | relative_block; | ||
224 | page_block++; | ||
225 | block_in_file++; | ||
226 | } | ||
227 | bdev = map_bh->b_bdev; | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * Then do more get_blocks calls until we are done with this page. | ||
232 | */ | ||
233 | map_bh->b_page = page; | ||
234 | while (page_block < blocks_per_page) { | ||
235 | map_bh->b_state = 0; | ||
236 | map_bh->b_size = 0; | ||
189 | 237 | ||
190 | bh.b_page = page; | ||
191 | for (page_block = 0; page_block < blocks_per_page; | ||
192 | page_block++, block_in_file++) { | ||
193 | bh.b_state = 0; | ||
194 | if (block_in_file < last_block) { | 238 | if (block_in_file < last_block) { |
195 | if (get_block(inode, block_in_file, &bh, 0)) | 239 | map_bh->b_size = (last_block-block_in_file) << blkbits; |
240 | if (get_block(inode, block_in_file, map_bh, 0)) | ||
196 | goto confused; | 241 | goto confused; |
242 | *first_logical_block = block_in_file; | ||
197 | } | 243 | } |
198 | 244 | ||
199 | if (!buffer_mapped(&bh)) { | 245 | if (!buffer_mapped(map_bh)) { |
200 | fully_mapped = 0; | 246 | fully_mapped = 0; |
201 | if (first_hole == blocks_per_page) | 247 | if (first_hole == blocks_per_page) |
202 | first_hole = page_block; | 248 | first_hole = page_block; |
249 | page_block++; | ||
250 | block_in_file++; | ||
251 | clear_buffer_mapped(map_bh); | ||
203 | continue; | 252 | continue; |
204 | } | 253 | } |
205 | 254 | ||
@@ -209,8 +258,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
209 | * we just collected from get_block into the page's buffers | 258 | * we just collected from get_block into the page's buffers |
210 | * so readpage doesn't have to repeat the get_block call | 259 | * so readpage doesn't have to repeat the get_block call |
211 | */ | 260 | */ |
212 | if (buffer_uptodate(&bh)) { | 261 | if (buffer_uptodate(map_bh)) { |
213 | map_buffer_to_page(page, &bh, page_block); | 262 | map_buffer_to_page(page, map_bh, page_block); |
214 | goto confused; | 263 | goto confused; |
215 | } | 264 | } |
216 | 265 | ||
@@ -218,10 +267,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
218 | goto confused; /* hole -> non-hole */ | 267 | goto confused; /* hole -> non-hole */ |
219 | 268 | ||
220 | /* Contiguous blocks? */ | 269 | /* Contiguous blocks? */ |
221 | if (page_block && blocks[page_block-1] != bh.b_blocknr-1) | 270 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) |
222 | goto confused; | 271 | goto confused; |
223 | blocks[page_block] = bh.b_blocknr; | 272 | nblocks = map_bh->b_size >> blkbits; |
224 | bdev = bh.b_bdev; | 273 | for (relative_block = 0; ; relative_block++) { |
274 | if (relative_block == nblocks) { | ||
275 | clear_buffer_mapped(map_bh); | ||
276 | break; | ||
277 | } else if (page_block == blocks_per_page) | ||
278 | break; | ||
279 | blocks[page_block] = map_bh->b_blocknr+relative_block; | ||
280 | page_block++; | ||
281 | block_in_file++; | ||
282 | } | ||
283 | bdev = map_bh->b_bdev; | ||
225 | } | 284 | } |
226 | 285 | ||
227 | if (first_hole != blocks_per_page) { | 286 | if (first_hole != blocks_per_page) { |
@@ -260,7 +319,7 @@ alloc_new: | |||
260 | goto alloc_new; | 319 | goto alloc_new; |
261 | } | 320 | } |
262 | 321 | ||
263 | if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) | 322 | if (buffer_boundary(map_bh) || (first_hole != blocks_per_page)) |
264 | bio = mpage_bio_submit(READ, bio); | 323 | bio = mpage_bio_submit(READ, bio); |
265 | else | 324 | else |
266 | *last_block_in_bio = blocks[blocks_per_page - 1]; | 325 | *last_block_in_bio = blocks[blocks_per_page - 1]; |
@@ -331,7 +390,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
331 | unsigned page_idx; | 390 | unsigned page_idx; |
332 | sector_t last_block_in_bio = 0; | 391 | sector_t last_block_in_bio = 0; |
333 | struct pagevec lru_pvec; | 392 | struct pagevec lru_pvec; |
393 | struct buffer_head map_bh; | ||
394 | unsigned long first_logical_block = 0; | ||
334 | 395 | ||
396 | clear_buffer_mapped(&map_bh); | ||
335 | pagevec_init(&lru_pvec, 0); | 397 | pagevec_init(&lru_pvec, 0); |
336 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 398 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
337 | struct page *page = list_entry(pages->prev, struct page, lru); | 399 | struct page *page = list_entry(pages->prev, struct page, lru); |
@@ -342,7 +404,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
342 | page->index, GFP_KERNEL)) { | 404 | page->index, GFP_KERNEL)) { |
343 | bio = do_mpage_readpage(bio, page, | 405 | bio = do_mpage_readpage(bio, page, |
344 | nr_pages - page_idx, | 406 | nr_pages - page_idx, |
345 | &last_block_in_bio, get_block); | 407 | &last_block_in_bio, &map_bh, |
408 | &first_logical_block, | ||
409 | get_block); | ||
346 | if (!pagevec_add(&lru_pvec, page)) | 410 | if (!pagevec_add(&lru_pvec, page)) |
347 | __pagevec_lru_add(&lru_pvec); | 411 | __pagevec_lru_add(&lru_pvec); |
348 | } else { | 412 | } else { |
@@ -364,9 +428,12 @@ int mpage_readpage(struct page *page, get_block_t get_block) | |||
364 | { | 428 | { |
365 | struct bio *bio = NULL; | 429 | struct bio *bio = NULL; |
366 | sector_t last_block_in_bio = 0; | 430 | sector_t last_block_in_bio = 0; |
431 | struct buffer_head map_bh; | ||
432 | unsigned long first_logical_block = 0; | ||
367 | 433 | ||
368 | bio = do_mpage_readpage(bio, page, 1, | 434 | clear_buffer_mapped(&map_bh); |
369 | &last_block_in_bio, get_block); | 435 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
436 | &map_bh, &first_logical_block, get_block); | ||
370 | if (bio) | 437 | if (bio) |
371 | mpage_bio_submit(READ, bio); | 438 | mpage_bio_submit(READ, bio); |
372 | return 0; | 439 | return 0; |
@@ -472,6 +539,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, | |||
472 | for (page_block = 0; page_block < blocks_per_page; ) { | 539 | for (page_block = 0; page_block < blocks_per_page; ) { |
473 | 540 | ||
474 | map_bh.b_state = 0; | 541 | map_bh.b_state = 0; |
542 | map_bh.b_size = 1 << blkbits; | ||
475 | if (get_block(inode, block_in_file, &map_bh, 1)) | 543 | if (get_block(inode, block_in_file, &map_bh, 1)) |
476 | goto confused; | 544 | goto confused; |
477 | if (buffer_new(&map_bh)) | 545 | if (buffer_new(&map_bh)) |