aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mprotect.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r--mm/mprotect.c51
1 files changed, 30 insertions, 21 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 638edabaff71..955f9d0e38aa 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -27,7 +27,8 @@
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, 29static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
30 unsigned long addr, unsigned long end, pgprot_t newprot) 30 unsigned long addr, unsigned long end, pgprot_t newprot,
31 int dirty_accountable)
31{ 32{
32 pte_t *pte, oldpte; 33 pte_t *pte, oldpte;
33 spinlock_t *ptl; 34 spinlock_t *ptl;
@@ -42,7 +43,14 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
42 * bits by wiping the pte and then setting the new pte 43 * bits by wiping the pte and then setting the new pte
43 * into place. 44 * into place.
44 */ 45 */
45 ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot); 46 ptent = ptep_get_and_clear(mm, addr, pte);
47 ptent = pte_modify(ptent, newprot);
48 /*
49 * Avoid taking write faults for pages we know to be
50 * dirty.
51 */
52 if (dirty_accountable && pte_dirty(ptent))
53 ptent = pte_mkwrite(ptent);
46 set_pte_at(mm, addr, pte, ptent); 54 set_pte_at(mm, addr, pte, ptent);
47 lazy_mmu_prot_update(ptent); 55 lazy_mmu_prot_update(ptent);
48#ifdef CONFIG_MIGRATION 56#ifdef CONFIG_MIGRATION
@@ -66,7 +74,8 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
66} 74}
67 75
68static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, 76static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
69 unsigned long addr, unsigned long end, pgprot_t newprot) 77 unsigned long addr, unsigned long end, pgprot_t newprot,
78 int dirty_accountable)
70{ 79{
71 pmd_t *pmd; 80 pmd_t *pmd;
72 unsigned long next; 81 unsigned long next;
@@ -76,12 +85,13 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
76 next = pmd_addr_end(addr, end); 85 next = pmd_addr_end(addr, end);
77 if (pmd_none_or_clear_bad(pmd)) 86 if (pmd_none_or_clear_bad(pmd))
78 continue; 87 continue;
79 change_pte_range(mm, pmd, addr, next, newprot); 88 change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
80 } while (pmd++, addr = next, addr != end); 89 } while (pmd++, addr = next, addr != end);
81} 90}
82 91
83static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, 92static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
84 unsigned long addr, unsigned long end, pgprot_t newprot) 93 unsigned long addr, unsigned long end, pgprot_t newprot,
94 int dirty_accountable)
85{ 95{
86 pud_t *pud; 96 pud_t *pud;
87 unsigned long next; 97 unsigned long next;
@@ -91,12 +101,13 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
91 next = pud_addr_end(addr, end); 101 next = pud_addr_end(addr, end);
92 if (pud_none_or_clear_bad(pud)) 102 if (pud_none_or_clear_bad(pud))
93 continue; 103 continue;
94 change_pmd_range(mm, pud, addr, next, newprot); 104 change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
95 } while (pud++, addr = next, addr != end); 105 } while (pud++, addr = next, addr != end);
96} 106}
97 107
98static void change_protection(struct vm_area_struct *vma, 108static void change_protection(struct vm_area_struct *vma,
99 unsigned long addr, unsigned long end, pgprot_t newprot) 109 unsigned long addr, unsigned long end, pgprot_t newprot,
110 int dirty_accountable)
100{ 111{
101 struct mm_struct *mm = vma->vm_mm; 112 struct mm_struct *mm = vma->vm_mm;
102 pgd_t *pgd; 113 pgd_t *pgd;
@@ -110,7 +121,7 @@ static void change_protection(struct vm_area_struct *vma,
110 next = pgd_addr_end(addr, end); 121 next = pgd_addr_end(addr, end);
111 if (pgd_none_or_clear_bad(pgd)) 122 if (pgd_none_or_clear_bad(pgd))
112 continue; 123 continue;
113 change_pud_range(mm, pgd, addr, next, newprot); 124 change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
114 } while (pgd++, addr = next, addr != end); 125 } while (pgd++, addr = next, addr != end);
115 flush_tlb_range(vma, start, end); 126 flush_tlb_range(vma, start, end);
116} 127}
@@ -123,10 +134,9 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
123 unsigned long oldflags = vma->vm_flags; 134 unsigned long oldflags = vma->vm_flags;
124 long nrpages = (end - start) >> PAGE_SHIFT; 135 long nrpages = (end - start) >> PAGE_SHIFT;
125 unsigned long charged = 0; 136 unsigned long charged = 0;
126 unsigned int mask;
127 pgprot_t newprot;
128 pgoff_t pgoff; 137 pgoff_t pgoff;
129 int error; 138 int error;
139 int dirty_accountable = 0;
130 140
131 if (newflags == oldflags) { 141 if (newflags == oldflags) {
132 *pprev = vma; 142 *pprev = vma;
@@ -176,24 +186,23 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
176 } 186 }
177 187
178success: 188success:
179 /* Don't make the VMA automatically writable if it's shared, but the
180 * backer wishes to know when pages are first written to */
181 mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED;
182 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
183 mask &= ~VM_SHARED;
184
185 newprot = protection_map[newflags & mask];
186
187 /* 189 /*
188 * vm_flags and vm_page_prot are protected by the mmap_sem 190 * vm_flags and vm_page_prot are protected by the mmap_sem
189 * held in write mode. 191 * held in write mode.
190 */ 192 */
191 vma->vm_flags = newflags; 193 vma->vm_flags = newflags;
192 vma->vm_page_prot = newprot; 194 vma->vm_page_prot = protection_map[newflags &
195 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
196 if (vma_wants_writenotify(vma)) {
197 vma->vm_page_prot = protection_map[newflags &
198 (VM_READ|VM_WRITE|VM_EXEC)];
199 dirty_accountable = 1;
200 }
201
193 if (is_vm_hugetlb_page(vma)) 202 if (is_vm_hugetlb_page(vma))
194 hugetlb_change_protection(vma, start, end, newprot); 203 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
195 else 204 else
196 change_protection(vma, start, end, newprot); 205 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
197 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 206 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
198 vm_stat_account(mm, newflags, vma->vm_file, nrpages); 207 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
199 return 0; 208 return 0;