diff options
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r-- | mm/mprotect.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index 3dca970367db..94722a4d6b43 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -114,7 +114,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
114 | 114 | ||
115 | #ifdef CONFIG_NUMA_BALANCING | 115 | #ifdef CONFIG_NUMA_BALANCING |
116 | static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, | 116 | static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, |
117 | pmd_t *pmd) | 117 | pmd_t *pmd) |
118 | { | 118 | { |
119 | spin_lock(&mm->page_table_lock); | 119 | spin_lock(&mm->page_table_lock); |
120 | set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd)); | 120 | set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd)); |
@@ -122,15 +122,15 @@ static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, | |||
122 | } | 122 | } |
123 | #else | 123 | #else |
124 | static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, | 124 | static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, |
125 | pmd_t *pmd) | 125 | pmd_t *pmd) |
126 | { | 126 | { |
127 | BUG(); | 127 | BUG(); |
128 | } | 128 | } |
129 | #endif /* CONFIG_NUMA_BALANCING */ | 129 | #endif /* CONFIG_NUMA_BALANCING */ |
130 | 130 | ||
131 | static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, | 131 | static inline unsigned long change_pmd_range(struct vm_area_struct *vma, |
132 | unsigned long addr, unsigned long end, pgprot_t newprot, | 132 | pud_t *pud, unsigned long addr, unsigned long end, |
133 | int dirty_accountable, int prot_numa) | 133 | pgprot_t newprot, int dirty_accountable, int prot_numa) |
134 | { | 134 | { |
135 | pmd_t *pmd; | 135 | pmd_t *pmd; |
136 | unsigned long next; | 136 | unsigned long next; |
@@ -143,7 +143,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t * | |||
143 | if (pmd_trans_huge(*pmd)) { | 143 | if (pmd_trans_huge(*pmd)) { |
144 | if (next - addr != HPAGE_PMD_SIZE) | 144 | if (next - addr != HPAGE_PMD_SIZE) |
145 | split_huge_page_pmd(vma, addr, pmd); | 145 | split_huge_page_pmd(vma, addr, pmd); |
146 | else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) { | 146 | else if (change_huge_pmd(vma, pmd, addr, newprot, |
147 | prot_numa)) { | ||
147 | pages += HPAGE_PMD_NR; | 148 | pages += HPAGE_PMD_NR; |
148 | continue; | 149 | continue; |
149 | } | 150 | } |
@@ -167,9 +168,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t * | |||
167 | return pages; | 168 | return pages; |
168 | } | 169 | } |
169 | 170 | ||
170 | static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, | 171 | static inline unsigned long change_pud_range(struct vm_area_struct *vma, |
171 | unsigned long addr, unsigned long end, pgprot_t newprot, | 172 | pgd_t *pgd, unsigned long addr, unsigned long end, |
172 | int dirty_accountable, int prot_numa) | 173 | pgprot_t newprot, int dirty_accountable, int prot_numa) |
173 | { | 174 | { |
174 | pud_t *pud; | 175 | pud_t *pud; |
175 | unsigned long next; | 176 | unsigned long next; |
@@ -304,7 +305,8 @@ success: | |||
304 | dirty_accountable = 1; | 305 | dirty_accountable = 1; |
305 | } | 306 | } |
306 | 307 | ||
307 | change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0); | 308 | change_protection(vma, start, end, vma->vm_page_prot, |
309 | dirty_accountable, 0); | ||
308 | 310 | ||
309 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); | 311 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); |
310 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); | 312 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); |
@@ -361,8 +363,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, | |||
361 | error = -EINVAL; | 363 | error = -EINVAL; |
362 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 364 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
363 | goto out; | 365 | goto out; |
364 | } | 366 | } else { |
365 | else { | ||
366 | if (vma->vm_start > start) | 367 | if (vma->vm_start > start) |
367 | goto out; | 368 | goto out; |
368 | if (unlikely(grows & PROT_GROWSUP)) { | 369 | if (unlikely(grows & PROT_GROWSUP)) { |
@@ -378,9 +379,10 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, | |||
378 | for (nstart = start ; ; ) { | 379 | for (nstart = start ; ; ) { |
379 | unsigned long newflags; | 380 | unsigned long newflags; |
380 | 381 | ||
381 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ | 382 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
382 | 383 | ||
383 | newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | 384 | newflags = vm_flags; |
385 | newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | ||
384 | 386 | ||
385 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ | 387 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ |
386 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { | 388 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { |