aboutsummaryrefslogtreecommitdiffstats
path: root/fs/mpage.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/mpage.c')
-rw-r--r--fs/mpage.c92
1 files changed, 46 insertions, 46 deletions
diff --git a/fs/mpage.c b/fs/mpage.c
index 3923facf94eb..32c7c8fcfce7 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -160,52 +160,6 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
160 } while (page_bh != head); 160 } while (page_bh != head);
161} 161}
162 162
163/**
164 * mpage_readpages - populate an address space with some pages, and
165 * start reads against them.
166 *
167 * @mapping: the address_space
168 * @pages: The address of a list_head which contains the target pages. These
169 * pages have their ->index populated and are otherwise uninitialised.
170 *
171 * The page at @pages->prev has the lowest file offset, and reads should be
172 * issued in @pages->prev to @pages->next order.
173 *
174 * @nr_pages: The number of pages at *@pages
175 * @get_block: The filesystem's block mapper function.
176 *
177 * This function walks the pages and the blocks within each page, building and
178 * emitting large BIOs.
179 *
180 * If anything unusual happens, such as:
181 *
182 * - encountering a page which has buffers
183 * - encountering a page which has a non-hole after a hole
184 * - encountering a page with non-contiguous blocks
185 *
186 * then this code just gives up and calls the buffer_head-based read function.
187 * It does handle a page which has holes at the end - that is a common case:
188 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
189 *
190 * BH_Boundary explanation:
191 *
192 * There is a problem. The mpage read code assembles several pages, gets all
193 * their disk mappings, and then submits them all. That's fine, but obtaining
194 * the disk mappings may require I/O. Reads of indirect blocks, for example.
195 *
196 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
197 * submitted in the following order:
198 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
199 * because the indirect block has to be read to get the mappings of blocks
200 * 13,14,15,16. Obviously, this impacts performance.
201 *
202 * So what we do it to allow the filesystem's get_block() function to set
203 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
204 * after this one will require I/O against a block which is probably close to
205 * this one. So you should push what I/O you have currently accumulated.
206 *
207 * This all causes the disk requests to be issued in the correct order.
208 */
209static struct bio * 163static struct bio *
210do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, 164do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
211 sector_t *last_block_in_bio, get_block_t get_block) 165 sector_t *last_block_in_bio, get_block_t get_block)
@@ -320,6 +274,52 @@ confused:
320 goto out; 274 goto out;
321} 275}
322 276
277/**
278 * mpage_readpages - populate an address space with some pages, and
279 * start reads against them.
280 *
281 * @mapping: the address_space
282 * @pages: The address of a list_head which contains the target pages. These
283 * pages have their ->index populated and are otherwise uninitialised.
284 *
285 * The page at @pages->prev has the lowest file offset, and reads should be
286 * issued in @pages->prev to @pages->next order.
287 *
288 * @nr_pages: The number of pages at *@pages
289 * @get_block: The filesystem's block mapper function.
290 *
291 * This function walks the pages and the blocks within each page, building and
292 * emitting large BIOs.
293 *
294 * If anything unusual happens, such as:
295 *
296 * - encountering a page which has buffers
297 * - encountering a page which has a non-hole after a hole
298 * - encountering a page with non-contiguous blocks
299 *
300 * then this code just gives up and calls the buffer_head-based read function.
301 * It does handle a page which has holes at the end - that is a common case:
302 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
303 *
304 * BH_Boundary explanation:
305 *
306 * There is a problem. The mpage read code assembles several pages, gets all
307 * their disk mappings, and then submits them all. That's fine, but obtaining
308 * the disk mappings may require I/O. Reads of indirect blocks, for example.
309 *
310 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
311 * submitted in the following order:
312 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
313 * because the indirect block has to be read to get the mappings of blocks
314 * 13,14,15,16. Obviously, this impacts performance.
315 *
316 * So what we do it to allow the filesystem's get_block() function to set
317 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
318 * after this one will require I/O against a block which is probably close to
319 * this one. So you should push what I/O you have currently accumulated.
320 *
321 * This all causes the disk requests to be issued in the correct order.
322 */
323int 323int
324mpage_readpages(struct address_space *mapping, struct list_head *pages, 324mpage_readpages(struct address_space *mapping, struct list_head *pages,
325 unsigned nr_pages, get_block_t get_block) 325 unsigned nr_pages, get_block_t get_block)