summaryrefslogtreecommitdiffstats
path: root/mm/swap_state.c
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2017-09-06 19:24:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 20:27:29 -0400
commitc4fa63092f216737b60c789968371d9960a598e5 (patch)
treec68329a01a3925a41522ab8ea6b3ae437d97185c /mm/swap_state.c
parentcbc65df240c104bf540af1ad58595bf1eaa5ee10 (diff)
mm, swap: fix swap readahead marking
In the original implementation, it is possible that the existing pages in the swap cache (not newly readahead) could be marked as the readahead pages. This will cause the statistics of swap readahead be wrong and influence the swap readahead algorithm too. This is fixed via marking a page as the readahead page only if it is newly allocated and read from the disk. When testing with linpack, after the fixing the swap readahead hit rate increased from ~66% to ~86%. Link: http://lkml.kernel.org/r/20170807054038.1843-3-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Tim Chen <tim.c.chen@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d1bdb31cab13..a901afe9da61 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -498,7 +498,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
498 unsigned long start_offset, end_offset; 498 unsigned long start_offset, end_offset;
499 unsigned long mask; 499 unsigned long mask;
500 struct blk_plug plug; 500 struct blk_plug plug;
501 bool do_poll = true; 501 bool do_poll = true, page_allocated;
502 502
503 mask = swapin_nr_pages(offset) - 1; 503 mask = swapin_nr_pages(offset) - 1;
504 if (!mask) 504 if (!mask)
@@ -514,14 +514,18 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
514 blk_start_plug(&plug); 514 blk_start_plug(&plug);
515 for (offset = start_offset; offset <= end_offset ; offset++) { 515 for (offset = start_offset; offset <= end_offset ; offset++) {
516 /* Ok, do the async read-ahead now */ 516 /* Ok, do the async read-ahead now */
517 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), 517 page = __read_swap_cache_async(
518 gfp_mask, vma, addr, false); 518 swp_entry(swp_type(entry), offset),
519 gfp_mask, vma, addr, &page_allocated);
519 if (!page) 520 if (!page)
520 continue; 521 continue;
521 if (offset != entry_offset && 522 if (page_allocated) {
522 likely(!PageTransCompound(page))) { 523 swap_readpage(page, false);
523 SetPageReadahead(page); 524 if (offset != entry_offset &&
524 count_vm_event(SWAP_RA); 525 likely(!PageTransCompound(page))) {
526 SetPageReadahead(page);
527 count_vm_event(SWAP_RA);
528 }
525 } 529 }
526 put_page(page); 530 put_page(page);
527 } 531 }