diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 10 | ||||
-rw-r--r-- | mm/readahead.c | 97 |
2 files changed, 64 insertions, 43 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 5eb0a6b9d607..49a6fe375d01 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -894,15 +894,15 @@ void do_generic_mapping_read(struct address_space *mapping, | |||
894 | find_page: | 894 | find_page: |
895 | page = find_get_page(mapping, index); | 895 | page = find_get_page(mapping, index); |
896 | if (!page) { | 896 | if (!page) { |
897 | page_cache_readahead_ondemand(mapping, | 897 | page_cache_sync_readahead(mapping, |
898 | &ra, filp, page, | 898 | &ra, filp, |
899 | index, last_index - index); | 899 | index, last_index - index); |
900 | page = find_get_page(mapping, index); | 900 | page = find_get_page(mapping, index); |
901 | if (unlikely(page == NULL)) | 901 | if (unlikely(page == NULL)) |
902 | goto no_cached_page; | 902 | goto no_cached_page; |
903 | } | 903 | } |
904 | if (PageReadahead(page)) { | 904 | if (PageReadahead(page)) { |
905 | page_cache_readahead_ondemand(mapping, | 905 | page_cache_async_readahead(mapping, |
906 | &ra, filp, page, | 906 | &ra, filp, page, |
907 | index, last_index - index); | 907 | index, last_index - index); |
908 | } | 908 | } |
@@ -1348,14 +1348,14 @@ retry_find: | |||
1348 | */ | 1348 | */ |
1349 | if (VM_SequentialReadHint(vma)) { | 1349 | if (VM_SequentialReadHint(vma)) { |
1350 | if (!page) { | 1350 | if (!page) { |
1351 | page_cache_readahead_ondemand(mapping, ra, file, page, | 1351 | page_cache_sync_readahead(mapping, ra, file, |
1352 | vmf->pgoff, 1); | 1352 | vmf->pgoff, 1); |
1353 | page = find_lock_page(mapping, vmf->pgoff); | 1353 | page = find_lock_page(mapping, vmf->pgoff); |
1354 | if (!page) | 1354 | if (!page) |
1355 | goto no_cached_page; | 1355 | goto no_cached_page; |
1356 | } | 1356 | } |
1357 | if (PageReadahead(page)) { | 1357 | if (PageReadahead(page)) { |
1358 | page_cache_readahead_ondemand(mapping, ra, file, page, | 1358 | page_cache_async_readahead(mapping, ra, file, page, |
1359 | vmf->pgoff, 1); | 1359 | vmf->pgoff, 1); |
1360 | } | 1360 | } |
1361 | } | 1361 | } |
diff --git a/mm/readahead.c b/mm/readahead.c index 205a4a431516..3d262bb738a9 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
@@ -359,7 +359,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra, | |||
359 | static unsigned long | 359 | static unsigned long |
360 | ondemand_readahead(struct address_space *mapping, | 360 | ondemand_readahead(struct address_space *mapping, |
361 | struct file_ra_state *ra, struct file *filp, | 361 | struct file_ra_state *ra, struct file *filp, |
362 | struct page *page, pgoff_t offset, | 362 | bool hit_readahead_marker, pgoff_t offset, |
363 | unsigned long req_size) | 363 | unsigned long req_size) |
364 | { | 364 | { |
365 | unsigned long max; /* max readahead pages */ | 365 | unsigned long max; /* max readahead pages */ |
@@ -387,7 +387,7 @@ ondemand_readahead(struct address_space *mapping, | |||
387 | * Standalone, small read. | 387 | * Standalone, small read. |
388 | * Read as is, and do not pollute the readahead state. | 388 | * Read as is, and do not pollute the readahead state. |
389 | */ | 389 | */ |
390 | if (!page && !sequential) { | 390 | if (!hit_readahead_marker && !sequential) { |
391 | return __do_page_cache_readahead(mapping, filp, | 391 | return __do_page_cache_readahead(mapping, filp, |
392 | offset, req_size, 0); | 392 | offset, req_size, 0); |
393 | } | 393 | } |
@@ -408,7 +408,7 @@ ondemand_readahead(struct address_space *mapping, | |||
408 | * E.g. interleaved reads. | 408 | * E.g. interleaved reads. |
409 | * Not knowing its readahead pos/size, bet on the minimal possible one. | 409 | * Not knowing its readahead pos/size, bet on the minimal possible one. |
410 | */ | 410 | */ |
411 | if (page) { | 411 | if (hit_readahead_marker) { |
412 | ra_index++; | 412 | ra_index++; |
413 | ra_size = min(4 * ra_size, max); | 413 | ra_size = min(4 * ra_size, max); |
414 | } | 414 | } |
@@ -421,50 +421,71 @@ fill_ra: | |||
421 | } | 421 | } |
422 | 422 | ||
423 | /** | 423 | /** |
424 | * page_cache_readahead_ondemand - generic file readahead | 424 | * page_cache_sync_readahead - generic file readahead |
425 | * @mapping: address_space which holds the pagecache and I/O vectors | 425 | * @mapping: address_space which holds the pagecache and I/O vectors |
426 | * @ra: file_ra_state which holds the readahead state | 426 | * @ra: file_ra_state which holds the readahead state |
427 | * @filp: passed on to ->readpage() and ->readpages() | 427 | * @filp: passed on to ->readpage() and ->readpages() |
428 | * @page: the page at @offset, or NULL if non-present | 428 | * @offset: start offset into @mapping, in pagecache page-sized units |
429 | * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units | ||
430 | * @req_size: hint: total size of the read which the caller is performing in | 429 | * @req_size: hint: total size of the read which the caller is performing in |
431 | * PAGE_CACHE_SIZE units | 430 | * pagecache pages |
432 | * | 431 | * |
433 | * page_cache_readahead_ondemand() is the entry point of readahead logic. | 432 | * page_cache_sync_readahead() should be called when a cache miss happened: |
434 | * This function should be called when it is time to perform readahead: | 433 | * it will submit the read. The readahead logic may decide to piggyback more |
435 | * 1) @page == NULL | 434 | * pages onto the read request if access patterns suggest it will improve |
436 | * A cache miss happened, time for synchronous readahead. | 435 | * performance. |
437 | * 2) @page != NULL && PageReadahead(@page) | ||
438 | * A look-ahead hit occured, time for asynchronous readahead. | ||
439 | */ | 436 | */ |
440 | unsigned long | 437 | void page_cache_sync_readahead(struct address_space *mapping, |
441 | page_cache_readahead_ondemand(struct address_space *mapping, | 438 | struct file_ra_state *ra, struct file *filp, |
442 | struct file_ra_state *ra, struct file *filp, | 439 | pgoff_t offset, unsigned long req_size) |
443 | struct page *page, pgoff_t offset, | ||
444 | unsigned long req_size) | ||
445 | { | 440 | { |
446 | /* no read-ahead */ | 441 | /* no read-ahead */ |
447 | if (!ra->ra_pages) | 442 | if (!ra->ra_pages) |
448 | return 0; | 443 | return; |
449 | 444 | ||
450 | if (page) { | 445 | /* do read-ahead */ |
451 | /* | 446 | ondemand_readahead(mapping, ra, filp, false, offset, req_size); |
452 | * It can be PG_reclaim. | 447 | } |
453 | */ | 448 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); |
454 | if (PageWriteback(page)) | 449 | |
455 | return 0; | 450 | /** |
456 | 451 | * page_cache_async_readahead - file readahead for marked pages | |
457 | ClearPageReadahead(page); | 452 | * @mapping: address_space which holds the pagecache and I/O vectors |
458 | 453 | * @ra: file_ra_state which holds the readahead state | |
459 | /* | 454 | * @filp: passed on to ->readpage() and ->readpages() |
460 | * Defer asynchronous read-ahead on IO congestion. | 455 | * @page: the page at @offset which has the PG_readahead flag set |
461 | */ | 456 | * @offset: start offset into @mapping, in pagecache page-sized units |
462 | if (bdi_read_congested(mapping->backing_dev_info)) | 457 | * @req_size: hint: total size of the read which the caller is performing in |
463 | return 0; | 458 | * pagecache pages |
464 | } | 459 | * |
460 | * page_cache_async_ondemand() should be called when a page is used which | ||
461 | * has the PG_readahead flag: this is a marker to suggest that the application | ||
462 | * has used up enough of the readahead window that we should start pulling in | ||
463 | * more pages. */ | ||
464 | void | ||
465 | page_cache_async_readahead(struct address_space *mapping, | ||
466 | struct file_ra_state *ra, struct file *filp, | ||
467 | struct page *page, pgoff_t offset, | ||
468 | unsigned long req_size) | ||
469 | { | ||
470 | /* no read-ahead */ | ||
471 | if (!ra->ra_pages) | ||
472 | return; | ||
473 | |||
474 | /* | ||
475 | * Same bit is used for PG_readahead and PG_reclaim. | ||
476 | */ | ||
477 | if (PageWriteback(page)) | ||
478 | return; | ||
479 | |||
480 | ClearPageReadahead(page); | ||
481 | |||
482 | /* | ||
483 | * Defer asynchronous read-ahead on IO congestion. | ||
484 | */ | ||
485 | if (bdi_read_congested(mapping->backing_dev_info)) | ||
486 | return; | ||
465 | 487 | ||
466 | /* do read-ahead */ | 488 | /* do read-ahead */ |
467 | return ondemand_readahead(mapping, ra, filp, page, | 489 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
468 | offset, req_size); | ||
469 | } | 490 | } |
470 | EXPORT_SYMBOL_GPL(page_cache_readahead_ondemand); | 491 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |