diff options
author | Peter Feiner <pfeiner@google.com> | 2014-08-06 19:08:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:22 -0400 |
commit | 68b5a652485682f67eacdee3deae640fb7845b63 (patch) | |
tree | 727c13d01ca9c377d5936984c826c902a38b32be /fs/proc | |
parent | 3a91053aebb23205caf67927be00c54cef6424b3 (diff) |
mm: softdirty: respect VM_SOFTDIRTY in PTE holes
After a VMA is created with the VM_SOFTDIRTY flag set, /proc/pid/pagemap
should report that the VMA's virtual pages are soft-dirty until
VM_SOFTDIRTY is cleared (i.e., by the next write of "4" to
/proc/pid/clear_refs). However, pagemap ignores the VM_SOFTDIRTY flag
for virtual addresses that fall in PTE holes (i.e., virtual addresses
that don't have a PMD, PUD, or PGD allocated yet).
To observe this bug, use mmap to create a VMA large enough such that
there's a good chance that the VMA will occupy an unused PMD, then test
the soft-dirty bit on its pages. In practice, I found that a VMA that
covered a PMD's worth of address space was big enough.
This patch adds the necessary VMA lookup to the PTE hole callback in
/proc/pid/pagemap's page walk and sets soft-dirty according to the VMAs'
VM_SOFTDIRTY flag.
Signed-off-by: Peter Feiner <pfeiner@google.com>
Acked-by: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Hugh Dickins <hughd@google.com>
Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 27 |
1 files changed, 21 insertions, 6 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index cfa63ee92c96..dfc791c42d64 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -925,15 +925,30 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, | |||
925 | struct mm_walk *walk) | 925 | struct mm_walk *walk) |
926 | { | 926 | { |
927 | struct pagemapread *pm = walk->private; | 927 | struct pagemapread *pm = walk->private; |
928 | unsigned long addr; | 928 | unsigned long addr = start; |
929 | int err = 0; | 929 | int err = 0; |
930 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); | ||
931 | 930 | ||
932 | for (addr = start; addr < end; addr += PAGE_SIZE) { | 931 | while (addr < end) { |
933 | err = add_to_pagemap(addr, &pme, pm); | 932 | struct vm_area_struct *vma = find_vma(walk->mm, addr); |
934 | if (err) | 933 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); |
935 | break; | 934 | unsigned long vm_end; |
935 | |||
936 | if (!vma) { | ||
937 | vm_end = end; | ||
938 | } else { | ||
939 | vm_end = min(end, vma->vm_end); | ||
940 | if (vma->vm_flags & VM_SOFTDIRTY) | ||
941 | pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); | ||
942 | } | ||
943 | |||
944 | for (; addr < vm_end; addr += PAGE_SIZE) { | ||
945 | err = add_to_pagemap(addr, &pme, pm); | ||
946 | if (err) | ||
947 | goto out; | ||
948 | } | ||
936 | } | 949 | } |
950 | |||
951 | out: | ||
937 | return err; | 952 | return err; |
938 | } | 953 | } |
939 | 954 | ||