aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2017-09-06 19:24:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 20:27:30 -0400
commit81a0298bdfab0203d360df7c9bf690d1d457f999 (patch)
treeb235dc190ff9239e11aec51f1b206a21b7c80a2e
parentd9bfcfdc41e8e5d80f7591f95a09ccce7cb8ad05 (diff)
mm, swap: don't use VMA based swap readahead if HDD is used as swap
VMA based swap readahead will readahead the virtual pages that is continuous in the virtual address space. While the original swap readahead will readahead the swap slots that is continuous in the swap device. Although VMA based swap readahead is more correct for the swap slots to be readahead, it will trigger more small random readings, which may cause the performance of HDD (hard disk) to degrade heavily, and may finally exceed the benefit. To avoid the issue, in this patch, if the HDD is used as swap, the VMA based swap readahead will be disabled, and the original swap readahead will be used instead. Link: http://lkml.kernel.org/r/20170807054038.1843-6-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Tim Chen <tim.c.chen@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/swap.h11
-rw-r--r--mm/swapfile.c8
2 files changed, 13 insertions, 6 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 61d63379e956..9c4ae6f14eea 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -400,16 +400,17 @@ extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
400 struct vm_fault *vmf, 400 struct vm_fault *vmf,
401 struct vma_swap_readahead *swap_ra); 401 struct vma_swap_readahead *swap_ra);
402 402
403static inline bool swap_use_vma_readahead(void)
404{
405 return READ_ONCE(swap_vma_readahead);
406}
407
408/* linux/mm/swapfile.c */ 403/* linux/mm/swapfile.c */
409extern atomic_long_t nr_swap_pages; 404extern atomic_long_t nr_swap_pages;
410extern long total_swap_pages; 405extern long total_swap_pages;
406extern atomic_t nr_rotate_swap;
411extern bool has_usable_swap(void); 407extern bool has_usable_swap(void);
412 408
409static inline bool swap_use_vma_readahead(void)
410{
411 return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
412}
413
413/* Swap 50% full? Release swapcache more aggressively.. */ 414/* Swap 50% full? Release swapcache more aggressively.. */
414static inline bool vm_swap_full(void) 415static inline bool vm_swap_full(void)
415{ 416{
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 42eff9e4e972..4f8b3e08a547 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -96,6 +96,8 @@ static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
96/* Activity counter to indicate that a swapon or swapoff has occurred */ 96/* Activity counter to indicate that a swapon or swapoff has occurred */
97static atomic_t proc_poll_event = ATOMIC_INIT(0); 97static atomic_t proc_poll_event = ATOMIC_INIT(0);
98 98
99atomic_t nr_rotate_swap = ATOMIC_INIT(0);
100
99static inline unsigned char swap_count(unsigned char ent) 101static inline unsigned char swap_count(unsigned char ent)
100{ 102{
101 return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ 103 return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
@@ -2569,6 +2571,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2569 if (p->flags & SWP_CONTINUED) 2571 if (p->flags & SWP_CONTINUED)
2570 free_swap_count_continuations(p); 2572 free_swap_count_continuations(p);
2571 2573
2574 if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2575 atomic_dec(&nr_rotate_swap);
2576
2572 mutex_lock(&swapon_mutex); 2577 mutex_lock(&swapon_mutex);
2573 spin_lock(&swap_lock); 2578 spin_lock(&swap_lock);
2574 spin_lock(&p->lock); 2579 spin_lock(&p->lock);
@@ -3145,7 +3150,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3145 cluster = per_cpu_ptr(p->percpu_cluster, cpu); 3150 cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3146 cluster_set_null(&cluster->index); 3151 cluster_set_null(&cluster->index);
3147 } 3152 }
3148 } 3153 } else
3154 atomic_inc(&nr_rotate_swap);
3149 3155
3150 error = swap_cgroup_swapon(p->type, maxpages); 3156 error = swap_cgroup_swapon(p->type, maxpages);
3151 if (error) 3157 if (error)