aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorPeter Feiner <pfeiner@google.com>2014-09-25 19:05:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-09-26 11:10:35 -0400
commit87e6d49a000fa6148bd539f632706b02c3485c5d (patch)
tree9d848bb2f91295314dfb423485879cbffb4ad101 /fs/proc/task_mmu.c
parent5760a97c7143c208fa3a8f8cad0ed7dd672ebd28 (diff)
mm: softdirty: addresses before VMAs in PTE holes aren't softdirty
In PTE holes that contain VM_SOFTDIRTY VMAs, unmapped addresses before VM_SOFTDIRTY VMAs are reported as softdirty by /proc/pid/pagemap. This bug was introduced in commit 68b5a6524856 ("mm: softdirty: respect VM_SOFTDIRTY in PTE holes"). That commit made /proc/pid/pagemap look at VM_SOFTDIRTY in PTE holes but neglected to observe the start of VMAs returned by find_vma. Tested: Wrote a selftest that creates a PMD-sized VMA then unmaps the first page and asserts that the page is not softdirty. I'm going to send the pagemap selftest in a later commit. Signed-off-by: Peter Feiner <pfeiner@google.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Hugh Dickins <hughd@google.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Jamie Liu <jamieliu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dfc791c42d64..c34156888d70 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -931,23 +931,32 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
931 while (addr < end) { 931 while (addr < end) {
932 struct vm_area_struct *vma = find_vma(walk->mm, addr); 932 struct vm_area_struct *vma = find_vma(walk->mm, addr);
933 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); 933 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
934 unsigned long vm_end; 934 /* End of address space hole, which we mark as non-present. */
935 unsigned long hole_end;
935 936
936 if (!vma) { 937 if (vma)
937 vm_end = end; 938 hole_end = min(end, vma->vm_start);
938 } else { 939 else
939 vm_end = min(end, vma->vm_end); 940 hole_end = end;
940 if (vma->vm_flags & VM_SOFTDIRTY) 941
941 pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); 942 for (; addr < hole_end; addr += PAGE_SIZE) {
943 err = add_to_pagemap(addr, &pme, pm);
944 if (err)
945 goto out;
942 } 946 }
943 947
944 for (; addr < vm_end; addr += PAGE_SIZE) { 948 if (!vma)
949 break;
950
951 /* Addresses in the VMA. */
952 if (vma->vm_flags & VM_SOFTDIRTY)
953 pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
954 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
945 err = add_to_pagemap(addr, &pme, pm); 955 err = add_to_pagemap(addr, &pme, pm);
946 if (err) 956 if (err)
947 goto out; 957 goto out;
948 } 958 }
949 } 959 }
950
951out: 960out:
952 return err; 961 return err;
953} 962}