diff options
author | Peter Feiner <pfeiner@google.com> | 2014-10-13 18:55:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 20:18:28 -0400 |
commit | 64e455079e1bd7787cc47be30b7f601ce682a5f6 (patch) | |
tree | 05193bd91be3ffc0d33ddd3ffb654ef4c23778f9 /mm/mprotect.c | |
parent | 63a12d9d01831208a47f5c0fbbf93f503d1fb162 (diff) |
mm: softdirty: enable write notifications on VMAs after VM_SOFTDIRTY cleared
For VMAs that don't want write notifications, PTEs created for read faults
have their write bit set. If the read fault happens after VM_SOFTDIRTY is
cleared, then the PTE's softdirty bit will remain clear after subsequent
writes.
Here's a simple code snippet to demonstrate the bug:
char* m = mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_SHARED, -1, 0);
system("echo 4 > /proc/$PPID/clear_refs"); /* clear VM_SOFTDIRTY */
assert(*m == '\0'); /* new PTE allows write access */
assert(!soft_dirty(x));
*m = 'x'; /* should dirty the page */
assert(soft_dirty(x)); /* fails */
With this patch, write notifications are enabled when VM_SOFTDIRTY is
cleared. Furthermore, to avoid unnecessary faults, write notifications
are disabled when VM_SOFTDIRTY is set.
As a side effect of enabling and disabling write notifications with
care, this patch fixes a bug in mprotect where vm_page_prot bits set by
drivers were zapped on mprotect. An analogous bug was fixed in mmap by
commit c9d0bf241451 ("mm: uncached vma support with writenotify").
Signed-off-by: Peter Feiner <pfeiner@google.com>
Reported-by: Peter Feiner <pfeiner@google.com>
Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Jamie Liu <jamieliu@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r-- | mm/mprotect.c | 20 |
1 files changed, 5 insertions, 15 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index c43d557941f8..ace93454ce8e 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -29,13 +29,6 @@ | |||
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | 31 | ||
32 | #ifndef pgprot_modify | ||
33 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | ||
34 | { | ||
35 | return newprot; | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | /* | 32 | /* |
40 | * For a prot_numa update we only hold mmap_sem for read so there is a | 33 | * For a prot_numa update we only hold mmap_sem for read so there is a |
41 | * potential race with faulting where a pmd was temporarily none. This | 34 | * potential race with faulting where a pmd was temporarily none. This |
@@ -93,7 +86,9 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
93 | * Avoid taking write faults for pages we | 86 | * Avoid taking write faults for pages we |
94 | * know to be dirty. | 87 | * know to be dirty. |
95 | */ | 88 | */ |
96 | if (dirty_accountable && pte_dirty(ptent)) | 89 | if (dirty_accountable && pte_dirty(ptent) && |
90 | (pte_soft_dirty(ptent) || | ||
91 | !(vma->vm_flags & VM_SOFTDIRTY))) | ||
97 | ptent = pte_mkwrite(ptent); | 92 | ptent = pte_mkwrite(ptent); |
98 | ptep_modify_prot_commit(mm, addr, pte, ptent); | 93 | ptep_modify_prot_commit(mm, addr, pte, ptent); |
99 | updated = true; | 94 | updated = true; |
@@ -320,13 +315,8 @@ success: | |||
320 | * held in write mode. | 315 | * held in write mode. |
321 | */ | 316 | */ |
322 | vma->vm_flags = newflags; | 317 | vma->vm_flags = newflags; |
323 | vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, | 318 | dirty_accountable = vma_wants_writenotify(vma); |
324 | vm_get_page_prot(newflags)); | 319 | vma_set_page_prot(vma); |
325 | |||
326 | if (vma_wants_writenotify(vma)) { | ||
327 | vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); | ||
328 | dirty_accountable = 1; | ||
329 | } | ||
330 | 320 | ||
331 | change_protection(vma, start, end, vma->vm_page_prot, | 321 | change_protection(vma, start, end, vma->vm_page_prot, |
332 | dirty_accountable, 0); | 322 | dirty_accountable, 0); |