diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-07-19 04:48:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:44 -0400 |
commit | cf914a7d656e62b9dd3e0dffe4f62b953ae6048d (patch) | |
tree | baf7e79de006ca80eac426d2d1be4c52f5f19624 /mm/readahead.c | |
parent | fe3cba17c49471e99d3421e675fc8b3deaaf0b70 (diff) |
readahead: split ondemand readahead interface into two functions
Split ondemand readahead interface into two functions. I think this makes it
a little clearer for non-readahead experts (like Rusty).
Internally they both call ondemand_readahead(), but the page argument is
changed to an obvious boolean flag.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r-- | mm/readahead.c | 97 |
1 files changed, 59 insertions, 38 deletions
diff --git a/mm/readahead.c b/mm/readahead.c index 205a4a431516..3d262bb738a9 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
@@ -359,7 +359,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra, | |||
359 | static unsigned long | 359 | static unsigned long |
360 | ondemand_readahead(struct address_space *mapping, | 360 | ondemand_readahead(struct address_space *mapping, |
361 | struct file_ra_state *ra, struct file *filp, | 361 | struct file_ra_state *ra, struct file *filp, |
362 | struct page *page, pgoff_t offset, | 362 | bool hit_readahead_marker, pgoff_t offset, |
363 | unsigned long req_size) | 363 | unsigned long req_size) |
364 | { | 364 | { |
365 | unsigned long max; /* max readahead pages */ | 365 | unsigned long max; /* max readahead pages */ |
@@ -387,7 +387,7 @@ ondemand_readahead(struct address_space *mapping, | |||
387 | * Standalone, small read. | 387 | * Standalone, small read. |
388 | * Read as is, and do not pollute the readahead state. | 388 | * Read as is, and do not pollute the readahead state. |
389 | */ | 389 | */ |
390 | if (!page && !sequential) { | 390 | if (!hit_readahead_marker && !sequential) { |
391 | return __do_page_cache_readahead(mapping, filp, | 391 | return __do_page_cache_readahead(mapping, filp, |
392 | offset, req_size, 0); | 392 | offset, req_size, 0); |
393 | } | 393 | } |
@@ -408,7 +408,7 @@ ondemand_readahead(struct address_space *mapping, | |||
408 | * E.g. interleaved reads. | 408 | * E.g. interleaved reads. |
409 | * Not knowing its readahead pos/size, bet on the minimal possible one. | 409 | * Not knowing its readahead pos/size, bet on the minimal possible one. |
410 | */ | 410 | */ |
411 | if (page) { | 411 | if (hit_readahead_marker) { |
412 | ra_index++; | 412 | ra_index++; |
413 | ra_size = min(4 * ra_size, max); | 413 | ra_size = min(4 * ra_size, max); |
414 | } | 414 | } |
@@ -421,50 +421,71 @@ fill_ra: | |||
421 | } | 421 | } |
422 | 422 | ||
423 | /** | 423 | /** |
424 | * page_cache_readahead_ondemand - generic file readahead | 424 | * page_cache_sync_readahead - generic file readahead |
425 | * @mapping: address_space which holds the pagecache and I/O vectors | 425 | * @mapping: address_space which holds the pagecache and I/O vectors |
426 | * @ra: file_ra_state which holds the readahead state | 426 | * @ra: file_ra_state which holds the readahead state |
427 | * @filp: passed on to ->readpage() and ->readpages() | 427 | * @filp: passed on to ->readpage() and ->readpages() |
428 | * @page: the page at @offset, or NULL if non-present | 428 | * @offset: start offset into @mapping, in pagecache page-sized units |
429 | * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units | ||
430 | * @req_size: hint: total size of the read which the caller is performing in | 429 | * @req_size: hint: total size of the read which the caller is performing in |
431 | * PAGE_CACHE_SIZE units | 430 | * pagecache pages |
432 | * | 431 | * |
433 | * page_cache_readahead_ondemand() is the entry point of readahead logic. | 432 | * page_cache_sync_readahead() should be called when a cache miss happened: |
434 | * This function should be called when it is time to perform readahead: | 433 | * it will submit the read. The readahead logic may decide to piggyback more |
435 | * 1) @page == NULL | 434 | * pages onto the read request if access patterns suggest it will improve |
436 | * A cache miss happened, time for synchronous readahead. | 435 | * performance. |
437 | * 2) @page != NULL && PageReadahead(@page) | ||
438 | * A look-ahead hit occured, time for asynchronous readahead. | ||
439 | */ | 436 | */ |
440 | unsigned long | 437 | void page_cache_sync_readahead(struct address_space *mapping, |
441 | page_cache_readahead_ondemand(struct address_space *mapping, | 438 | struct file_ra_state *ra, struct file *filp, |
442 | struct file_ra_state *ra, struct file *filp, | 439 | pgoff_t offset, unsigned long req_size) |
443 | struct page *page, pgoff_t offset, | ||
444 | unsigned long req_size) | ||
445 | { | 440 | { |
446 | /* no read-ahead */ | 441 | /* no read-ahead */ |
447 | if (!ra->ra_pages) | 442 | if (!ra->ra_pages) |
448 | return 0; | 443 | return; |
449 | 444 | ||
450 | if (page) { | 445 | /* do read-ahead */ |
451 | /* | 446 | ondemand_readahead(mapping, ra, filp, false, offset, req_size); |
452 | * It can be PG_reclaim. | 447 | } |
453 | */ | 448 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); |
454 | if (PageWriteback(page)) | 449 | |
455 | return 0; | 450 | /** |
456 | 451 | * page_cache_async_readahead - file readahead for marked pages | |
457 | ClearPageReadahead(page); | 452 | * @mapping: address_space which holds the pagecache and I/O vectors |
458 | 453 | * @ra: file_ra_state which holds the readahead state | |
459 | /* | 454 | * @filp: passed on to ->readpage() and ->readpages() |
460 | * Defer asynchronous read-ahead on IO congestion. | 455 | * @page: the page at @offset which has the PG_readahead flag set |
461 | */ | 456 | * @offset: start offset into @mapping, in pagecache page-sized units |
462 | if (bdi_read_congested(mapping->backing_dev_info)) | 457 | * @req_size: hint: total size of the read which the caller is performing in |
463 | return 0; | 458 | * pagecache pages |
464 | } | 459 | * |
460 | * page_cache_async_ondemand() should be called when a page is used which | ||
461 | * has the PG_readahead flag: this is a marker to suggest that the application | ||
462 | * has used up enough of the readahead window that we should start pulling in | ||
463 | * more pages. */ | ||
464 | void | ||
465 | page_cache_async_readahead(struct address_space *mapping, | ||
466 | struct file_ra_state *ra, struct file *filp, | ||
467 | struct page *page, pgoff_t offset, | ||
468 | unsigned long req_size) | ||
469 | { | ||
470 | /* no read-ahead */ | ||
471 | if (!ra->ra_pages) | ||
472 | return; | ||
473 | |||
474 | /* | ||
475 | * Same bit is used for PG_readahead and PG_reclaim. | ||
476 | */ | ||
477 | if (PageWriteback(page)) | ||
478 | return; | ||
479 | |||
480 | ClearPageReadahead(page); | ||
481 | |||
482 | /* | ||
483 | * Defer asynchronous read-ahead on IO congestion. | ||
484 | */ | ||
485 | if (bdi_read_congested(mapping->backing_dev_info)) | ||
486 | return; | ||
465 | 487 | ||
466 | /* do read-ahead */ | 488 | /* do read-ahead */ |
467 | return ondemand_readahead(mapping, ra, filp, page, | 489 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
468 | offset, req_size); | ||
469 | } | 490 | } |
470 | EXPORT_SYMBOL_GPL(page_cache_readahead_ondemand); | 491 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |