summaryrefslogtreecommitdiffstats
path: root/mm/swap_state.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2018-04-05 19:23:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 00:36:25 -0400
commite9e9b7ecee4a139a6fbe2e15ef224ca6b6c47d57 (patch)
tree179430c7efc50a0da618934d117b152258e82353 /mm/swap_state.c
parenteaf649ebc3acfbb235ce31cebd06e4876d05758e (diff)
mm: swap: unify cluster-based and vma-based swap readahead
This patch makes do_swap_page() not need to be aware of two different swap readahead algorithms. Just unify cluster-based and vma-based readahead function call. Link: http://lkml.kernel.org/r/1509520520-32367-3-git-send-email-minchan@kernel.org Link: http://lkml.kernel.org/r/20180220085249.151400-3-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Hugh Dickins <hughd@google.com> Cc: Huang Ying <ying.huang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c48
1 files changed, 37 insertions, 11 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index db5da2baafb1..b97da97b6846 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -38,7 +38,7 @@ static const struct address_space_operations swap_aops = {
38 38
39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; 39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; 40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
41bool swap_vma_readahead __read_mostly = true; 41bool enable_vma_readahead __read_mostly = true;
42 42
43#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 43#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 44#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
@@ -322,6 +322,11 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
322 release_pages(pagep, nr); 322 release_pages(pagep, nr);
323} 323}
324 324
325static inline bool swap_use_vma_readahead(void)
326{
327 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
328}
329
325/* 330/*
326 * Lookup a swap entry in the swap cache. A found page will be returned 331 * Lookup a swap entry in the swap cache. A found page will be returned
327 * unlocked and with its refcount incremented - we rely on the kernel 332 * unlocked and with its refcount incremented - we rely on the kernel
@@ -544,11 +549,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
544} 549}
545 550
546/** 551/**
547 * swapin_readahead - swap in pages in hope we need them soon 552 * swap_cluster_readahead - swap in pages in hope we need them soon
548 * @entry: swap entry of this memory 553 * @entry: swap entry of this memory
549 * @gfp_mask: memory allocation flags 554 * @gfp_mask: memory allocation flags
550 * @vma: user vma this address belongs to 555 * @vmf: fault information
551 * @addr: target address for mempolicy
552 * 556 *
553 * Returns the struct page for entry and addr, after queueing swapin. 557 * Returns the struct page for entry and addr, after queueing swapin.
554 * 558 *
@@ -560,10 +564,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
560 * This has been extended to use the NUMA policies from the mm triggering 564 * This has been extended to use the NUMA policies from the mm triggering
561 * the readahead. 565 * the readahead.
562 * 566 *
563 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 567 * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
564 */ 568 */
565struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 569struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
566 struct vm_area_struct *vma, unsigned long addr) 570 struct vm_fault *vmf)
567{ 571{
568 struct page *page; 572 struct page *page;
569 unsigned long entry_offset = swp_offset(entry); 573 unsigned long entry_offset = swp_offset(entry);
@@ -573,6 +577,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
573 struct swap_info_struct *si = swp_swap_info(entry); 577 struct swap_info_struct *si = swp_swap_info(entry);
574 struct blk_plug plug; 578 struct blk_plug plug;
575 bool do_poll = true, page_allocated; 579 bool do_poll = true, page_allocated;
580 struct vm_area_struct *vma = vmf->vma;
581 unsigned long addr = vmf->address;
576 582
577 mask = swapin_nr_pages(offset) - 1; 583 mask = swapin_nr_pages(offset) - 1;
578 if (!mask) 584 if (!mask)
@@ -727,7 +733,7 @@ static void swap_ra_info(struct vm_fault *vmf,
727 pte_unmap(orig_pte); 733 pte_unmap(orig_pte);
728} 734}
729 735
730struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, 736struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
731 struct vm_fault *vmf) 737 struct vm_fault *vmf)
732{ 738{
733 struct blk_plug plug; 739 struct blk_plug plug;
@@ -774,20 +780,40 @@ skip:
774 ra_info.win == 1); 780 ra_info.win == 1);
775} 781}
776 782
783/**
784 * swapin_readahead - swap in pages in hope we need them soon
785 * @entry: swap entry of this memory
786 * @gfp_mask: memory allocation flags
787 * @vmf: fault information
788 *
789 * Returns the struct page for entry and addr, after queueing swapin.
790 *
791 * It's a main entry function for swap readahead. By the configuration,
792 * it will read ahead blocks by cluster-based(ie, physical disk based)
793 * or vma-based(ie, virtual address based on faulty address) readahead.
794 */
795struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
796 struct vm_fault *vmf)
797{
798 return swap_use_vma_readahead() ?
799 swap_vma_readahead(entry, gfp_mask, vmf) :
800 swap_cluster_readahead(entry, gfp_mask, vmf);
801}
802
777#ifdef CONFIG_SYSFS 803#ifdef CONFIG_SYSFS
778static ssize_t vma_ra_enabled_show(struct kobject *kobj, 804static ssize_t vma_ra_enabled_show(struct kobject *kobj,
779 struct kobj_attribute *attr, char *buf) 805 struct kobj_attribute *attr, char *buf)
780{ 806{
781 return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false"); 807 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
782} 808}
783static ssize_t vma_ra_enabled_store(struct kobject *kobj, 809static ssize_t vma_ra_enabled_store(struct kobject *kobj,
784 struct kobj_attribute *attr, 810 struct kobj_attribute *attr,
785 const char *buf, size_t count) 811 const char *buf, size_t count)
786{ 812{
787 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 813 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
788 swap_vma_readahead = true; 814 enable_vma_readahead = true;
789 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 815 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
790 swap_vma_readahead = false; 816 enable_vma_readahead = false;
791 else 817 else
792 return -EINVAL; 818 return -EINVAL;
793 819