summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2019-04-18 20:50:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-04-19 12:46:04 -0400
commit64165b1affc5bc16231ac971e66aae7d68d57f2c (patch)
treef076121056eafaf3790cbc23cc0be88b4bc85d79 /mm/swapfile.c
parentdd862deb151aad2548e72b077a82ad3aa91b715f (diff)
mm: swapoff: take notice of completion sooner
The old try_to_unuse() implementation was driven by find_next_to_unuse(), which terminated as soon as all the swap had been freed. Add inuse_pages checks now (alongside signal_pending()) to stop scanning mms and swap_map once finished. The same ought to be done in shmem_unuse() too, but never was before, and needs a different interface: so leave it as is for now. Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1904081258200.1523@eggly.anvils Fixes: b56a2d8af914 ("mm: rid swapoff of quadratic complexity") Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Alex Xu (Hello71)" <alex_y_xu@yahoo.ca> Cc: Huang Ying <ying.huang@intel.com> Cc: Kelley Nielsen <kelleynnn@gmail.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Rik van Riel <riel@surriel.com> Cc: Vineeth Pillai <vpillai@digitalocean.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index bf4ef2e40f23..71383625a582 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2051,11 +2051,9 @@ retry:
2051 2051
2052 spin_lock(&mmlist_lock); 2052 spin_lock(&mmlist_lock);
2053 p = &init_mm.mmlist; 2053 p = &init_mm.mmlist;
2054 while ((p = p->next) != &init_mm.mmlist) { 2054 while (si->inuse_pages &&
2055 if (signal_pending(current)) { 2055 !signal_pending(current) &&
2056 retval = -EINTR; 2056 (p = p->next) != &init_mm.mmlist) {
2057 break;
2058 }
2059 2057
2060 mm = list_entry(p, struct mm_struct, mmlist); 2058 mm = list_entry(p, struct mm_struct, mmlist);
2061 if (!mmget_not_zero(mm)) 2059 if (!mmget_not_zero(mm))
@@ -2082,7 +2080,9 @@ retry:
2082 mmput(prev_mm); 2080 mmput(prev_mm);
2083 2081
2084 i = 0; 2082 i = 0;
2085 while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { 2083 while (si->inuse_pages &&
2084 !signal_pending(current) &&
2085 (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2086 2086
2087 entry = swp_entry(type, i); 2087 entry = swp_entry(type, i);
2088 page = find_get_page(swap_address_space(entry), i); 2088 page = find_get_page(swap_address_space(entry), i);
@@ -2123,8 +2123,11 @@ retry:
2123 * separate lists, and wait for those lists to be emptied; but it's 2123 * separate lists, and wait for those lists to be emptied; but it's
2124 * easier and more robust (though cpu-intensive) just to keep retrying. 2124 * easier and more robust (though cpu-intensive) just to keep retrying.
2125 */ 2125 */
2126 if (si->inuse_pages) 2126 if (si->inuse_pages) {
2127 goto retry; 2127 if (!signal_pending(current))
2128 goto retry;
2129 retval = -EINTR;
2130 }
2128out: 2131out:
2129 return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval; 2132 return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
2130} 2133}