summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2016-12-12 19:44:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 21:55:08 -0500
commitdc644a073769dd8949a75691eb4a5bdeb70a7d51 (patch)
treeda576c3a8a1e38cbef5f00ce3974f45295832a63 /mm/swapfile.c
parenta6de734bc002fe2027ccc074fbbd87d72957b7a4 (diff)
mm: add three more cond_resched() in swapoff
Add a cond_resched() in the unuse_pmd_range() loop (so as to call it even when pmd none or trans_huge, like zap_pmd_range() does); and in the unuse_mm() loop (since that might skip over many vmas). shmem_unuse() and radix_tree_locate_item() look good enough already. Those were the obvious places, but in fact the stalls came from find_next_to_unuse(), which sometimes scans through many unused entries. Apply scan_swap_map()'s LATENCY_LIMIT of 256 there too; and only go off to test frontswap_map when a used entry is found. Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1612052155140.13021@eggly.anvils Signed-off-by: Hugh Dickins <hughd@google.com> Reported-by: Eric Dumazet <edumazet@google.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f30438970cd1..1c6e0321205d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1234,6 +1234,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1234 1234
1235 pmd = pmd_offset(pud, addr); 1235 pmd = pmd_offset(pud, addr);
1236 do { 1236 do {
1237 cond_resched();
1237 next = pmd_addr_end(addr, end); 1238 next = pmd_addr_end(addr, end);
1238 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 1239 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1239 continue; 1240 continue;
@@ -1313,6 +1314,7 @@ static int unuse_mm(struct mm_struct *mm,
1313 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1314 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1314 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) 1315 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1315 break; 1316 break;
1317 cond_resched();
1316 } 1318 }
1317 up_read(&mm->mmap_sem); 1319 up_read(&mm->mmap_sem);
1318 return (ret < 0)? ret: 0; 1320 return (ret < 0)? ret: 0;
@@ -1350,15 +1352,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1350 prev = 0; 1352 prev = 0;
1351 i = 1; 1353 i = 1;
1352 } 1354 }
1353 if (frontswap) {
1354 if (frontswap_test(si, i))
1355 break;
1356 else
1357 continue;
1358 }
1359 count = READ_ONCE(si->swap_map[i]); 1355 count = READ_ONCE(si->swap_map[i]);
1360 if (count && swap_count(count) != SWAP_MAP_BAD) 1356 if (count && swap_count(count) != SWAP_MAP_BAD)
1361 break; 1357 if (!frontswap || frontswap_test(si, i))
1358 break;
1359 if ((i % LATENCY_LIMIT) == 0)
1360 cond_resched();
1362 } 1361 }
1363 return i; 1362 return i;
1364} 1363}