diff options
author | Cyrill Gorcunov <gorcunov@gmail.com> | 2013-08-13 19:00:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-13 20:57:47 -0400 |
commit | 179ef71cbc085252e3fe6b8159263a7ed1d88ea4 (patch) | |
tree | 423b58912b0bf0dc9697fff3d2205b8ca653968e /fs | |
parent | 3e6b11df245180949938734bc192eaf32f3a06b3 (diff) |
mm: save soft-dirty bits on swapped pages
Andy Lutomirski reported that if a page with _PAGE_SOFT_DIRTY bit set
get swapped out, the bit is getting lost and no longer available when
pte read back.
To resolve this we introduce _PTE_SWP_SOFT_DIRTY bit which is saved in
pte entry for the page being swapped out. When such page is to be read
back from a swap cache we check for bit presence and if it's there we
clear it and restore the former _PAGE_SOFT_DIRTY bit back.
One of the problem was to find a place in pte entry where we can save
the _PTE_SWP_SOFT_DIRTY bit while page is in swap. The _PAGE_PSE was
chosen for that, it doesn't intersect with swap entry format stored in
pte.
Reported-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org>
Acked-by: Pavel Emelyanov <xemul@parallels.com>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/proc/task_mmu.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dbf61f6174f0..e2d9bdce5e7e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -730,8 +730,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, | |||
730 | * of how soft-dirty works. | 730 | * of how soft-dirty works. |
731 | */ | 731 | */ |
732 | pte_t ptent = *pte; | 732 | pte_t ptent = *pte; |
733 | ptent = pte_wrprotect(ptent); | 733 | |
734 | ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); | 734 | if (pte_present(ptent)) { |
735 | ptent = pte_wrprotect(ptent); | ||
736 | ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); | ||
737 | } else if (is_swap_pte(ptent)) { | ||
738 | ptent = pte_swp_clear_soft_dirty(ptent); | ||
739 | } | ||
740 | |||
735 | set_pte_at(vma->vm_mm, addr, pte, ptent); | 741 | set_pte_at(vma->vm_mm, addr, pte, ptent); |
736 | #endif | 742 | #endif |
737 | } | 743 | } |
@@ -752,14 +758,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
752 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 758 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
753 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 759 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
754 | ptent = *pte; | 760 | ptent = *pte; |
755 | if (!pte_present(ptent)) | ||
756 | continue; | ||
757 | 761 | ||
758 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { | 762 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { |
759 | clear_soft_dirty(vma, addr, pte); | 763 | clear_soft_dirty(vma, addr, pte); |
760 | continue; | 764 | continue; |
761 | } | 765 | } |
762 | 766 | ||
767 | if (!pte_present(ptent)) | ||
768 | continue; | ||
769 | |||
763 | page = vm_normal_page(vma, addr, ptent); | 770 | page = vm_normal_page(vma, addr, ptent); |
764 | if (!page) | 771 | if (!page) |
765 | continue; | 772 | continue; |
@@ -930,8 +937,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, | |||
930 | flags = PM_PRESENT; | 937 | flags = PM_PRESENT; |
931 | page = vm_normal_page(vma, addr, pte); | 938 | page = vm_normal_page(vma, addr, pte); |
932 | } else if (is_swap_pte(pte)) { | 939 | } else if (is_swap_pte(pte)) { |
933 | swp_entry_t entry = pte_to_swp_entry(pte); | 940 | swp_entry_t entry; |
934 | 941 | if (pte_swp_soft_dirty(pte)) | |
942 | flags2 |= __PM_SOFT_DIRTY; | ||
943 | entry = pte_to_swp_entry(pte); | ||
935 | frame = swp_type(entry) | | 944 | frame = swp_type(entry) | |
936 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); | 945 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); |
937 | flags = PM_SWAP; | 946 | flags = PM_SWAP; |