summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2016-01-15 19:57:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commit9f8bdb3f3dad3f8f20df3e8903316cd5bb1c408e (patch)
tree5e1eb6e9a27377d4738ada576a2456629a310a27 /mm/swapfile.c
parent88f306b68cbb36e500da4b9601b2e3d13dd683c4 (diff)
mm: make swapoff more robust against soft dirty
Both s390 and powerpc have hit the issue of swapoff hanging, when CONFIG_HAVE_ARCH_SOFT_DIRTY and CONFIG_MEM_SOFT_DIRTY ifdefs were not quite as x86_64 had them. I think it would be much clearer if HAVE_ARCH_SOFT_DIRTY was just a Kconfig option set by architectures to determine whether the MEM_SOFT_DIRTY option should be offered, and the actual code depend upon CONFIG_MEM_SOFT_DIRTY alone. But won't embark on that change myself: instead make swapoff more robust, by using pte_swp_clear_soft_dirty() on each pte it encounters, without an explicit #ifdef CONFIG_MEM_SOFT_DIRTY. That being a no-op, whether the bit in question is defined as 0 or the asm-generic fallback is used, unless soft dirty is fully turned on. Why "maybe" in maybe_same_pte()? Rename it pte_same_as_swp(). Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Laurent Dufour <ldufour@linux.vnet.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c18
1 files changed, 4 insertions, 14 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 31dc94fb0f60..2bb30aa3a412 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1111,19 +1111,9 @@ unsigned int count_swap_pages(int type, int free)
1111} 1111}
1112#endif /* CONFIG_HIBERNATION */ 1112#endif /* CONFIG_HIBERNATION */
1113 1113
1114static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) 1114static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1115{ 1115{
1116#ifdef CONFIG_MEM_SOFT_DIRTY 1116 return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
1117 /*
1118 * When pte keeps soft dirty bit the pte generated
1119 * from swap entry does not has it, still it's same
1120 * pte from logical point of view.
1121 */
1122 pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
1123 return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
1124#else
1125 return pte_same(pte, swp_pte);
1126#endif
1127} 1117}
1128 1118
1129/* 1119/*
@@ -1152,7 +1142,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1152 } 1142 }
1153 1143
1154 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1144 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1155 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { 1145 if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1156 mem_cgroup_cancel_charge(page, memcg, false); 1146 mem_cgroup_cancel_charge(page, memcg, false);
1157 ret = 0; 1147 ret = 0;
1158 goto out; 1148 goto out;
@@ -1210,7 +1200,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1210 * swapoff spends a _lot_ of time in this loop! 1200 * swapoff spends a _lot_ of time in this loop!
1211 * Test inline before going to call unuse_pte. 1201 * Test inline before going to call unuse_pte.
1212 */ 1202 */
1213 if (unlikely(maybe_same_pte(*pte, swp_pte))) { 1203 if (unlikely(pte_same_as_swp(*pte, swp_pte))) {
1214 pte_unmap(pte); 1204 pte_unmap(pte);
1215 ret = unuse_pte(vma, pmd, addr, entry, page); 1205 ret = unuse_pte(vma, pmd, addr, entry, page);
1216 if (ret) 1206 if (ret)