aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/pgtable.h15
-rw-r--r--arch/x86/include/asm/pgtable_types.h13
-rw-r--r--fs/proc/task_mmu.c21
-rw-r--r--include/asm-generic/pgtable.h15
-rw-r--r--include/linux/swapops.h2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/swapfile.c19
8 files changed, 84 insertions, 9 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a46058..bd0518a7f197 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -314,6 +314,21 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
315} 315}
316 316
317static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
318{
319 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
320}
321
322static inline int pte_swp_soft_dirty(pte_t pte)
323{
324 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
325}
326
327static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
328{
329 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
330}
331
317/* 332/*
318 * Mask out unsupported bits in a present pgprot. Non-present pgprots 333 * Mask out unsupported bits in a present pgprot. Non-present pgprots
319 * can use those bits for other purposes, so leave them be. 334 * can use those bits for other purposes, so leave them be.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c98ac63aae48..5e8442f178f9 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -67,6 +67,19 @@
67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) 67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
68#endif 68#endif
69 69
70/*
71 * Tracking soft dirty bit when a page goes to a swap is tricky.
72 * We need a bit which can be stored in pte _and_ not conflict
73 * with swap entry format. On x86 bits 6 and 7 are *not* involved
74 * into swap entry computation, but bit 6 is used for nonlinear
75 * file mapping, so we borrow bit 7 for soft dirty tracking.
76 */
77#ifdef CONFIG_MEM_SOFT_DIRTY
78#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
79#else
80#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
81#endif
82
70#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 83#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
71#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 84#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
72#else 85#else
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbf61f6174f0..e2d9bdce5e7e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -730,8 +730,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
730 * of how soft-dirty works. 730 * of how soft-dirty works.
731 */ 731 */
732 pte_t ptent = *pte; 732 pte_t ptent = *pte;
733 ptent = pte_wrprotect(ptent); 733
734 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 734 if (pte_present(ptent)) {
735 ptent = pte_wrprotect(ptent);
736 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
737 } else if (is_swap_pte(ptent)) {
738 ptent = pte_swp_clear_soft_dirty(ptent);
739 }
740
735 set_pte_at(vma->vm_mm, addr, pte, ptent); 741 set_pte_at(vma->vm_mm, addr, pte, ptent);
736#endif 742#endif
737} 743}
@@ -752,14 +758,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
752 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 758 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
753 for (; addr != end; pte++, addr += PAGE_SIZE) { 759 for (; addr != end; pte++, addr += PAGE_SIZE) {
754 ptent = *pte; 760 ptent = *pte;
755 if (!pte_present(ptent))
756 continue;
757 761
758 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 762 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
759 clear_soft_dirty(vma, addr, pte); 763 clear_soft_dirty(vma, addr, pte);
760 continue; 764 continue;
761 } 765 }
762 766
767 if (!pte_present(ptent))
768 continue;
769
763 page = vm_normal_page(vma, addr, ptent); 770 page = vm_normal_page(vma, addr, ptent);
764 if (!page) 771 if (!page)
765 continue; 772 continue;
@@ -930,8 +937,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
930 flags = PM_PRESENT; 937 flags = PM_PRESENT;
931 page = vm_normal_page(vma, addr, pte); 938 page = vm_normal_page(vma, addr, pte);
932 } else if (is_swap_pte(pte)) { 939 } else if (is_swap_pte(pte)) {
933 swp_entry_t entry = pte_to_swp_entry(pte); 940 swp_entry_t entry;
934 941 if (pte_swp_soft_dirty(pte))
942 flags2 |= __PM_SOFT_DIRTY;
943 entry = pte_to_swp_entry(pte);
935 frame = swp_type(entry) | 944 frame = swp_type(entry) |
936 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 945 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
937 flags = PM_SWAP; 946 flags = PM_SWAP;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2f47ade1b567..2a7e0d10ad9a 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -417,6 +417,21 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
417{ 417{
418 return pmd; 418 return pmd;
419} 419}
420
421static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
422{
423 return pte;
424}
425
426static inline int pte_swp_soft_dirty(pte_t pte)
427{
428 return 0;
429}
430
431static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
432{
433 return pte;
434}
420#endif 435#endif
421 436
422#ifndef __HAVE_PFNMAP_TRACKING 437#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d2a415..8d4fa82bfb91 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
67 swp_entry_t arch_entry; 67 swp_entry_t arch_entry;
68 68
69 BUG_ON(pte_file(pte)); 69 BUG_ON(pte_file(pte));
70 if (pte_swp_soft_dirty(pte))
71 pte = pte_swp_clear_soft_dirty(pte);
70 arch_entry = __pte_to_swp_entry(pte); 72 arch_entry = __pte_to_swp_entry(pte);
71 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 73 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
72} 74}
diff --git a/mm/memory.c b/mm/memory.c
index 1ce2e2a734fc..e98ecad2b9c8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3115,6 +3115,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3115 exclusive = 1; 3115 exclusive = 1;
3116 } 3116 }
3117 flush_icache_page(vma, page); 3117 flush_icache_page(vma, page);
3118 if (pte_swp_soft_dirty(orig_pte))
3119 pte = pte_mksoft_dirty(pte);
3118 set_pte_at(mm, address, page_table, pte); 3120 set_pte_at(mm, address, page_table, pte);
3119 if (page == swapcache) 3121 if (page == swapcache)
3120 do_page_add_anon_rmap(page, vma, address, exclusive); 3122 do_page_add_anon_rmap(page, vma, address, exclusive);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd356df4f71a..83325b80142b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1236 swp_entry_to_pte(make_hwpoison_entry(page))); 1236 swp_entry_to_pte(make_hwpoison_entry(page)));
1237 } else if (PageAnon(page)) { 1237 } else if (PageAnon(page)) {
1238 swp_entry_t entry = { .val = page_private(page) }; 1238 swp_entry_t entry = { .val = page_private(page) };
1239 pte_t swp_pte;
1239 1240
1240 if (PageSwapCache(page)) { 1241 if (PageSwapCache(page)) {
1241 /* 1242 /*
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1264 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1265 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1265 entry = make_migration_entry(page, pte_write(pteval)); 1266 entry = make_migration_entry(page, pte_write(pteval));
1266 } 1267 }
1267 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1268 swp_pte = swp_entry_to_pte(entry);
1269 if (pte_soft_dirty(pteval))
1270 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1271 set_pte_at(mm, address, pte, swp_pte);
1268 BUG_ON(pte_file(*pte)); 1272 BUG_ON(pte_file(*pte));
1269 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1273 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1270 (TTU_ACTION(flags) == TTU_MIGRATION)) { 1274 (TTU_ACTION(flags) == TTU_MIGRATION)) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 36af6eeaa67e..6cf2e60983b7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
866} 866}
867#endif /* CONFIG_HIBERNATION */ 867#endif /* CONFIG_HIBERNATION */
868 868
869static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
870{
871#ifdef CONFIG_MEM_SOFT_DIRTY
872 /*
873 * When pte keeps soft dirty bit the pte generated
874 * from swap entry does not has it, still it's same
875 * pte from logical point of view.
876 */
877 pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
878 return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
879#else
880 return pte_same(pte, swp_pte);
881#endif
882}
883
869/* 884/*
870 * No need to decide whether this PTE shares the swap entry with others, 885 * No need to decide whether this PTE shares the swap entry with others,
871 * just let do_wp_page work it out if a write is requested later - to 886 * just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
892 } 907 }
893 908
894 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 909 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
895 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 910 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
896 mem_cgroup_cancel_charge_swapin(memcg); 911 mem_cgroup_cancel_charge_swapin(memcg);
897 ret = 0; 912 ret = 0;
898 goto out; 913 goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
947 * swapoff spends a _lot_ of time in this loop! 962 * swapoff spends a _lot_ of time in this loop!
948 * Test inline before going to call unuse_pte. 963 * Test inline before going to call unuse_pte.
949 */ 964 */
950 if (unlikely(pte_same(*pte, swp_pte))) { 965 if (unlikely(maybe_same_pte(*pte, swp_pte))) {
951 pte_unmap(pte); 966 pte_unmap(pte);
952 ret = unuse_pte(vma, pmd, addr, entry, page); 967 ret = unuse_pte(vma, pmd, addr, entry, page);
953 if (ret) 968 if (ret)