aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPeter Feiner <pfeiner@google.com>2014-10-13 18:55:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 20:18:28 -0400
commit64e455079e1bd7787cc47be30b7f601ce682a5f6 (patch)
tree05193bd91be3ffc0d33ddd3ffb654ef4c23778f9 /mm
parent63a12d9d01831208a47f5c0fbbf93f503d1fb162 (diff)
mm: softdirty: enable write notifications on VMAs after VM_SOFTDIRTY cleared
For VMAs that don't want write notifications, PTEs created for read faults have their write bit set. If the read fault happens after VM_SOFTDIRTY is cleared, then the PTE's softdirty bit will remain clear after subsequent writes. Here's a simple code snippet to demonstrate the bug: char* m = mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0); system("echo 4 > /proc/$PPID/clear_refs"); /* clear VM_SOFTDIRTY */ assert(*m == '\0'); /* new PTE allows write access */ assert(!soft_dirty(x)); *m = 'x'; /* should dirty the page */ assert(soft_dirty(x)); /* fails */ With this patch, write notifications are enabled when VM_SOFTDIRTY is cleared. Furthermore, to avoid unnecessary faults, write notifications are disabled when VM_SOFTDIRTY is set. As a side effect of enabling and disabling write notifications with care, this patch fixes a bug in mprotect where vm_page_prot bits set by drivers were zapped on mprotect. An analogous bug was fixed in mmap by commit c9d0bf241451 ("mm: uncached vma support with writenotify"). Signed-off-by: Peter Feiner <pfeiner@google.com> Reported-by: Peter Feiner <pfeiner@google.com> Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Jamie Liu <jamieliu@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mmap.c45
-rw-r--r--mm/mprotect.c20
3 files changed, 35 insertions, 33 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e229970e4223..1cc6bfbd872e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2053,7 +2053,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2053 old_page = vm_normal_page(vma, address, orig_pte); 2053 old_page = vm_normal_page(vma, address, orig_pte);
2054 if (!old_page) { 2054 if (!old_page) {
2055 /* 2055 /*
2056 * VM_MIXEDMAP !pfn_valid() case 2056 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
2057 * VM_PFNMAP VMA.
2057 * 2058 *
2058 * We should not cow pages in a shared writeable mapping. 2059 * We should not cow pages in a shared writeable mapping.
2059 * Just mark the pages writable as we can't do any dirty 2060 * Just mark the pages writable as we can't do any dirty
diff --git a/mm/mmap.c b/mm/mmap.c
index 93d28c7e5420..7f855206e7fb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -89,6 +89,25 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
89} 89}
90EXPORT_SYMBOL(vm_get_page_prot); 90EXPORT_SYMBOL(vm_get_page_prot);
91 91
92static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
93{
94 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
95}
96
97/* Update vma->vm_page_prot to reflect vma->vm_flags. */
98void vma_set_page_prot(struct vm_area_struct *vma)
99{
100 unsigned long vm_flags = vma->vm_flags;
101
102 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
103 if (vma_wants_writenotify(vma)) {
104 vm_flags &= ~VM_SHARED;
105 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
106 vm_flags);
107 }
108}
109
110
92int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ 111int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
93int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ 112int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
94unsigned long sysctl_overcommit_kbytes __read_mostly; 113unsigned long sysctl_overcommit_kbytes __read_mostly;
@@ -1475,11 +1494,16 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
1475 if (vma->vm_ops && vma->vm_ops->page_mkwrite) 1494 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1476 return 1; 1495 return 1;
1477 1496
1478 /* The open routine did something to the protections already? */ 1497 /* The open routine did something to the protections that pgprot_modify
1498 * won't preserve? */
1479 if (pgprot_val(vma->vm_page_prot) != 1499 if (pgprot_val(vma->vm_page_prot) !=
1480 pgprot_val(vm_get_page_prot(vm_flags))) 1500 pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
1481 return 0; 1501 return 0;
1482 1502
1503 /* Do we need to track softdirty? */
1504 if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
1505 return 1;
1506
1483 /* Specialty mapping? */ 1507 /* Specialty mapping? */
1484 if (vm_flags & VM_PFNMAP) 1508 if (vm_flags & VM_PFNMAP)
1485 return 0; 1509 return 0;
@@ -1615,21 +1639,6 @@ munmap_back:
1615 goto free_vma; 1639 goto free_vma;
1616 } 1640 }
1617 1641
1618 if (vma_wants_writenotify(vma)) {
1619 pgprot_t pprot = vma->vm_page_prot;
1620
1621 /* Can vma->vm_page_prot have changed??
1622 *
1623 * Answer: Yes, drivers may have changed it in their
1624 * f_op->mmap method.
1625 *
1626 * Ensures that vmas marked as uncached stay that way.
1627 */
1628 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1629 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1630 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1631 }
1632
1633 vma_link(mm, vma, prev, rb_link, rb_parent); 1642 vma_link(mm, vma, prev, rb_link, rb_parent);
1634 /* Once vma denies write, undo our temporary denial count */ 1643 /* Once vma denies write, undo our temporary denial count */
1635 if (file) { 1644 if (file) {
@@ -1663,6 +1672,8 @@ out:
1663 */ 1672 */
1664 vma->vm_flags |= VM_SOFTDIRTY; 1673 vma->vm_flags |= VM_SOFTDIRTY;
1665 1674
1675 vma_set_page_prot(vma);
1676
1666 return addr; 1677 return addr;
1667 1678
1668unmap_and_free_vma: 1679unmap_and_free_vma:
diff --git a/mm/mprotect.c b/mm/mprotect.c
index c43d557941f8..ace93454ce8e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -29,13 +29,6 @@
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31 31
32#ifndef pgprot_modify
33static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
34{
35 return newprot;
36}
37#endif
38
39/* 32/*
40 * For a prot_numa update we only hold mmap_sem for read so there is a 33 * For a prot_numa update we only hold mmap_sem for read so there is a
41 * potential race with faulting where a pmd was temporarily none. This 34 * potential race with faulting where a pmd was temporarily none. This
@@ -93,7 +86,9 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
93 * Avoid taking write faults for pages we 86 * Avoid taking write faults for pages we
94 * know to be dirty. 87 * know to be dirty.
95 */ 88 */
96 if (dirty_accountable && pte_dirty(ptent)) 89 if (dirty_accountable && pte_dirty(ptent) &&
90 (pte_soft_dirty(ptent) ||
91 !(vma->vm_flags & VM_SOFTDIRTY)))
97 ptent = pte_mkwrite(ptent); 92 ptent = pte_mkwrite(ptent);
98 ptep_modify_prot_commit(mm, addr, pte, ptent); 93 ptep_modify_prot_commit(mm, addr, pte, ptent);
99 updated = true; 94 updated = true;
@@ -320,13 +315,8 @@ success:
320 * held in write mode. 315 * held in write mode.
321 */ 316 */
322 vma->vm_flags = newflags; 317 vma->vm_flags = newflags;
323 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, 318 dirty_accountable = vma_wants_writenotify(vma);
324 vm_get_page_prot(newflags)); 319 vma_set_page_prot(vma);
325
326 if (vma_wants_writenotify(vma)) {
327 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
328 dirty_accountable = 1;
329 }
330 320
331 change_protection(vma, start, end, vma->vm_page_prot, 321 change_protection(vma, start, end, vma->vm_page_prot,
332 dirty_accountable, 0); 322 dirty_accountable, 0);