aboutsummaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-06-16 18:31:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:30 -0400
commit045a2529a3513faed2d45bd82f9013b124309d94 (patch)
tree99b7743b6dab54286afe94d4d7b8113a271661b5 /mm/readahead.c
parentdc566127dd161b6c997466a2349ac179527ea89b (diff)
readahead: move the random read case to bottom
Split all readahead cases, and move the random one to bottom. No behavior changes. This is to prepare for the introduction of context readahead, and make it easy for inserting accounting/tracing points for each case. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Vladislav Bolkhovitin <vst@vlnb.net> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Ying Han <yinghan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c46
1 files changed, 25 insertions, 21 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index a7f01fcce9e7..ceed7e4790bd 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -339,33 +339,25 @@ ondemand_readahead(struct address_space *mapping,
339 unsigned long req_size) 339 unsigned long req_size)
340{ 340{
341 unsigned long max = max_sane_readahead(ra->ra_pages); 341 unsigned long max = max_sane_readahead(ra->ra_pages);
342 pgoff_t prev_offset; 342
343 int sequential; 343 /*
344 * start of file
345 */
346 if (!offset)
347 goto initial_readahead;
344 348
345 /* 349 /*
346 * It's the expected callback offset, assume sequential access. 350 * It's the expected callback offset, assume sequential access.
347 * Ramp up sizes, and push forward the readahead window. 351 * Ramp up sizes, and push forward the readahead window.
348 */ 352 */
349 if (offset && (offset == (ra->start + ra->size - ra->async_size) || 353 if ((offset == (ra->start + ra->size - ra->async_size) ||
350 offset == (ra->start + ra->size))) { 354 offset == (ra->start + ra->size))) {
351 ra->start += ra->size; 355 ra->start += ra->size;
352 ra->size = get_next_ra_size(ra, max); 356 ra->size = get_next_ra_size(ra, max);
353 ra->async_size = ra->size; 357 ra->async_size = ra->size;
354 goto readit; 358 goto readit;
355 } 359 }
356 360
357 prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
358 sequential = offset - prev_offset <= 1UL || req_size > max;
359
360 /*
361 * Standalone, small read.
362 * Read as is, and do not pollute the readahead state.
363 */
364 if (!hit_readahead_marker && !sequential) {
365 return __do_page_cache_readahead(mapping, filp,
366 offset, req_size, 0);
367 }
368
369 /* 361 /*
370 * Hit a marked page without valid readahead state. 362 * Hit a marked page without valid readahead state.
371 * E.g. interleaved reads. 363 * E.g. interleaved reads.
@@ -391,12 +383,24 @@ ondemand_readahead(struct address_space *mapping,
391 } 383 }
392 384
393 /* 385 /*
394 * It may be one of 386 * oversize read
395 * - first read on start of file
396 * - sequential cache miss
397 * - oversize random read
398 * Start readahead for it.
399 */ 387 */
388 if (req_size > max)
389 goto initial_readahead;
390
391 /*
392 * sequential cache miss
393 */
394 if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
395 goto initial_readahead;
396
397 /*
398 * standalone, small random read
399 * Read as is, and do not pollute the readahead state.
400 */
401 return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
402
403initial_readahead:
400 ra->start = offset; 404 ra->start = offset;
401 ra->size = get_init_ra_size(req_size, max); 405 ra->size = get_init_ra_size(req_size, max);
402 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; 406 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;