diff options
author | Badari Pulavarty <pbadari@us.ibm.com> | 2006-03-26 04:38:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-26 11:57:01 -0500 |
commit | fa30bd058b746c0e2318a77ff8b4977faa924c2c (patch) | |
tree | a91abdcc4909da5dd16ed1bab91b5764eafed174 | |
parent | b0cf2321c6599138f860517745503691556d8453 (diff) |
[PATCH] map multiple blocks for mpage_readpages()
This patch changes mpage_readpages() and get_block() to get the disk mapping
information for multiple blocks at the same time.
b_size represents the amount of disk mapping that needs to mapped. On the
successful get_block() b_size indicates the amount of disk mapping thats
actually mapped. Only the filesystems who care to use this information and
provide multiple disk blocks at a time can choose to do so.
No changes are needed for the filesystems who wants to ignore this.
[akpm@osdl.org: cleanups]
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | fs/jfs/inode.c | 3 | ||||
-rw-r--r-- | fs/mpage.c | 104 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 5 |
3 files changed, 90 insertions, 22 deletions
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 51a5fed90cca..7239ef339489 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -258,7 +258,8 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, | |||
258 | static int jfs_get_block(struct inode *ip, sector_t lblock, | 258 | static int jfs_get_block(struct inode *ip, sector_t lblock, |
259 | struct buffer_head *bh_result, int create) | 259 | struct buffer_head *bh_result, int create) |
260 | { | 260 | { |
261 | return jfs_get_blocks(ip, lblock, 1, bh_result, create); | 261 | return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits, |
262 | bh_result, create); | ||
262 | } | 263 | } |
263 | 264 | ||
264 | static int jfs_writepage(struct page *page, struct writeback_control *wbc) | 265 | static int jfs_writepage(struct page *page, struct writeback_control *wbc) |
diff --git a/fs/mpage.c b/fs/mpage.c index 7903b740cc11..9bf2eb30e6f4 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -163,9 +163,19 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | |||
163 | } while (page_bh != head); | 163 | } while (page_bh != head); |
164 | } | 164 | } |
165 | 165 | ||
166 | /* | ||
167 | * This is the worker routine which does all the work of mapping the disk | ||
168 | * blocks and constructs largest possible bios, submits them for IO if the | ||
169 | * blocks are not contiguous on the disk. | ||
170 | * | ||
171 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to | ||
172 | * represent the validity of its disk mapping and to decide when to do the next | ||
173 | * get_block() call. | ||
174 | */ | ||
166 | static struct bio * | 175 | static struct bio * |
167 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | 176 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, |
168 | sector_t *last_block_in_bio, get_block_t get_block) | 177 | sector_t *last_block_in_bio, struct buffer_head *map_bh, |
178 | unsigned long *first_logical_block, get_block_t get_block) | ||
169 | { | 179 | { |
170 | struct inode *inode = page->mapping->host; | 180 | struct inode *inode = page->mapping->host; |
171 | const unsigned blkbits = inode->i_blkbits; | 181 | const unsigned blkbits = inode->i_blkbits; |
@@ -173,34 +183,72 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
173 | const unsigned blocksize = 1 << blkbits; | 183 | const unsigned blocksize = 1 << blkbits; |
174 | sector_t block_in_file; | 184 | sector_t block_in_file; |
175 | sector_t last_block; | 185 | sector_t last_block; |
186 | sector_t last_block_in_file; | ||
176 | sector_t blocks[MAX_BUF_PER_PAGE]; | 187 | sector_t blocks[MAX_BUF_PER_PAGE]; |
177 | unsigned page_block; | 188 | unsigned page_block; |
178 | unsigned first_hole = blocks_per_page; | 189 | unsigned first_hole = blocks_per_page; |
179 | struct block_device *bdev = NULL; | 190 | struct block_device *bdev = NULL; |
180 | struct buffer_head bh; | ||
181 | int length; | 191 | int length; |
182 | int fully_mapped = 1; | 192 | int fully_mapped = 1; |
193 | unsigned nblocks; | ||
194 | unsigned relative_block; | ||
183 | 195 | ||
184 | if (page_has_buffers(page)) | 196 | if (page_has_buffers(page)) |
185 | goto confused; | 197 | goto confused; |
186 | 198 | ||
187 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 199 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); |
188 | last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; | 200 | last_block = block_in_file + nr_pages * blocks_per_page; |
201 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | ||
202 | if (last_block > last_block_in_file) | ||
203 | last_block = last_block_in_file; | ||
204 | page_block = 0; | ||
205 | |||
206 | /* | ||
207 | * Map blocks using the result from the previous get_blocks call first. | ||
208 | */ | ||
209 | nblocks = map_bh->b_size >> blkbits; | ||
210 | if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && | ||
211 | block_in_file < (*first_logical_block + nblocks)) { | ||
212 | unsigned map_offset = block_in_file - *first_logical_block; | ||
213 | unsigned last = nblocks - map_offset; | ||
214 | |||
215 | for (relative_block = 0; ; relative_block++) { | ||
216 | if (relative_block == last) { | ||
217 | clear_buffer_mapped(map_bh); | ||
218 | break; | ||
219 | } | ||
220 | if (page_block == blocks_per_page) | ||
221 | break; | ||
222 | blocks[page_block] = map_bh->b_blocknr + map_offset + | ||
223 | relative_block; | ||
224 | page_block++; | ||
225 | block_in_file++; | ||
226 | } | ||
227 | bdev = map_bh->b_bdev; | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * Then do more get_blocks calls until we are done with this page. | ||
232 | */ | ||
233 | map_bh->b_page = page; | ||
234 | while (page_block < blocks_per_page) { | ||
235 | map_bh->b_state = 0; | ||
236 | map_bh->b_size = 0; | ||
189 | 237 | ||
190 | bh.b_page = page; | ||
191 | for (page_block = 0; page_block < blocks_per_page; | ||
192 | page_block++, block_in_file++) { | ||
193 | bh.b_state = 0; | ||
194 | if (block_in_file < last_block) { | 238 | if (block_in_file < last_block) { |
195 | bh.b_size = blocksize; | 239 | map_bh->b_size = (last_block-block_in_file) << blkbits; |
196 | if (get_block(inode, block_in_file, &bh, 0)) | 240 | if (get_block(inode, block_in_file, map_bh, 0)) |
197 | goto confused; | 241 | goto confused; |
242 | *first_logical_block = block_in_file; | ||
198 | } | 243 | } |
199 | 244 | ||
200 | if (!buffer_mapped(&bh)) { | 245 | if (!buffer_mapped(map_bh)) { |
201 | fully_mapped = 0; | 246 | fully_mapped = 0; |
202 | if (first_hole == blocks_per_page) | 247 | if (first_hole == blocks_per_page) |
203 | first_hole = page_block; | 248 | first_hole = page_block; |
249 | page_block++; | ||
250 | block_in_file++; | ||
251 | clear_buffer_mapped(map_bh); | ||
204 | continue; | 252 | continue; |
205 | } | 253 | } |
206 | 254 | ||
@@ -210,8 +258,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
210 | * we just collected from get_block into the page's buffers | 258 | * we just collected from get_block into the page's buffers |
211 | * so readpage doesn't have to repeat the get_block call | 259 | * so readpage doesn't have to repeat the get_block call |
212 | */ | 260 | */ |
213 | if (buffer_uptodate(&bh)) { | 261 | if (buffer_uptodate(map_bh)) { |
214 | map_buffer_to_page(page, &bh, page_block); | 262 | map_buffer_to_page(page, map_bh, page_block); |
215 | goto confused; | 263 | goto confused; |
216 | } | 264 | } |
217 | 265 | ||
@@ -219,10 +267,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
219 | goto confused; /* hole -> non-hole */ | 267 | goto confused; /* hole -> non-hole */ |
220 | 268 | ||
221 | /* Contiguous blocks? */ | 269 | /* Contiguous blocks? */ |
222 | if (page_block && blocks[page_block-1] != bh.b_blocknr-1) | 270 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) |
223 | goto confused; | 271 | goto confused; |
224 | blocks[page_block] = bh.b_blocknr; | 272 | nblocks = map_bh->b_size >> blkbits; |
225 | bdev = bh.b_bdev; | 273 | for (relative_block = 0; ; relative_block++) { |
274 | if (relative_block == nblocks) { | ||
275 | clear_buffer_mapped(map_bh); | ||
276 | break; | ||
277 | } else if (page_block == blocks_per_page) | ||
278 | break; | ||
279 | blocks[page_block] = map_bh->b_blocknr+relative_block; | ||
280 | page_block++; | ||
281 | block_in_file++; | ||
282 | } | ||
283 | bdev = map_bh->b_bdev; | ||
226 | } | 284 | } |
227 | 285 | ||
228 | if (first_hole != blocks_per_page) { | 286 | if (first_hole != blocks_per_page) { |
@@ -261,7 +319,7 @@ alloc_new: | |||
261 | goto alloc_new; | 319 | goto alloc_new; |
262 | } | 320 | } |
263 | 321 | ||
264 | if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) | 322 | if (buffer_boundary(map_bh) || (first_hole != blocks_per_page)) |
265 | bio = mpage_bio_submit(READ, bio); | 323 | bio = mpage_bio_submit(READ, bio); |
266 | else | 324 | else |
267 | *last_block_in_bio = blocks[blocks_per_page - 1]; | 325 | *last_block_in_bio = blocks[blocks_per_page - 1]; |
@@ -332,7 +390,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
332 | unsigned page_idx; | 390 | unsigned page_idx; |
333 | sector_t last_block_in_bio = 0; | 391 | sector_t last_block_in_bio = 0; |
334 | struct pagevec lru_pvec; | 392 | struct pagevec lru_pvec; |
393 | struct buffer_head map_bh; | ||
394 | unsigned long first_logical_block = 0; | ||
335 | 395 | ||
396 | clear_buffer_mapped(&map_bh); | ||
336 | pagevec_init(&lru_pvec, 0); | 397 | pagevec_init(&lru_pvec, 0); |
337 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 398 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
338 | struct page *page = list_entry(pages->prev, struct page, lru); | 399 | struct page *page = list_entry(pages->prev, struct page, lru); |
@@ -343,7 +404,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
343 | page->index, GFP_KERNEL)) { | 404 | page->index, GFP_KERNEL)) { |
344 | bio = do_mpage_readpage(bio, page, | 405 | bio = do_mpage_readpage(bio, page, |
345 | nr_pages - page_idx, | 406 | nr_pages - page_idx, |
346 | &last_block_in_bio, get_block); | 407 | &last_block_in_bio, &map_bh, |
408 | &first_logical_block, | ||
409 | get_block); | ||
347 | if (!pagevec_add(&lru_pvec, page)) | 410 | if (!pagevec_add(&lru_pvec, page)) |
348 | __pagevec_lru_add(&lru_pvec); | 411 | __pagevec_lru_add(&lru_pvec); |
349 | } else { | 412 | } else { |
@@ -365,9 +428,12 @@ int mpage_readpage(struct page *page, get_block_t get_block) | |||
365 | { | 428 | { |
366 | struct bio *bio = NULL; | 429 | struct bio *bio = NULL; |
367 | sector_t last_block_in_bio = 0; | 430 | sector_t last_block_in_bio = 0; |
431 | struct buffer_head map_bh; | ||
432 | unsigned long first_logical_block = 0; | ||
368 | 433 | ||
369 | bio = do_mpage_readpage(bio, page, 1, | 434 | clear_buffer_mapped(&map_bh); |
370 | &last_block_in_bio, get_block); | 435 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
436 | &map_bh, &first_logical_block, get_block); | ||
371 | if (bio) | 437 | if (bio) |
372 | mpage_bio_submit(READ, bio); | 438 | mpage_bio_submit(READ, bio); |
373 | return 0; | 439 | return 0; |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 4f2476f188b0..a79b84f8b55c 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -1310,8 +1310,9 @@ xfs_get_block( | |||
1310 | struct buffer_head *bh_result, | 1310 | struct buffer_head *bh_result, |
1311 | int create) | 1311 | int create) |
1312 | { | 1312 | { |
1313 | return __xfs_get_block(inode, iblock, 0, bh_result, | 1313 | return __xfs_get_block(inode, iblock, |
1314 | create, 0, BMAPI_WRITE); | 1314 | bh_result->b_size >> inode->i_blkbits, |
1315 | bh_result, create, 0, BMAPI_WRITE); | ||
1315 | } | 1316 | } |
1316 | 1317 | ||
1317 | STATIC int | 1318 | STATIC int |