diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /mm/mprotect.c |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r-- | mm/mprotect.c | 282 |
1 files changed, 282 insertions, 0 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c new file mode 100644 index 00000000000..e9fbd013ad9 --- /dev/null +++ b/mm/mprotect.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * mm/mprotect.c | ||
3 | * | ||
4 | * (C) Copyright 1994 Linus Torvalds | ||
5 | * (C) Copyright 2002 Christoph Hellwig | ||
6 | * | ||
7 | * Address space accounting code <alan@redhat.com> | ||
8 | * (C) Copyright 2002 Red Hat Inc, All Rights Reserved | ||
9 | */ | ||
10 | |||
11 | #include <linux/mm.h> | ||
12 | #include <linux/hugetlb.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/shm.h> | ||
15 | #include <linux/mman.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/highmem.h> | ||
18 | #include <linux/security.h> | ||
19 | #include <linux/mempolicy.h> | ||
20 | #include <linux/personality.h> | ||
21 | #include <linux/syscalls.h> | ||
22 | |||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | |||
28 | static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, | ||
29 | unsigned long addr, unsigned long end, pgprot_t newprot) | ||
30 | { | ||
31 | pte_t *pte; | ||
32 | |||
33 | pte = pte_offset_map(pmd, addr); | ||
34 | do { | ||
35 | if (pte_present(*pte)) { | ||
36 | pte_t ptent; | ||
37 | |||
38 | /* Avoid an SMP race with hardware updated dirty/clean | ||
39 | * bits by wiping the pte and then setting the new pte | ||
40 | * into place. | ||
41 | */ | ||
42 | ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot); | ||
43 | set_pte_at(mm, addr, pte, ptent); | ||
44 | lazy_mmu_prot_update(ptent); | ||
45 | } | ||
46 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
47 | pte_unmap(pte - 1); | ||
48 | } | ||
49 | |||
50 | static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, | ||
51 | unsigned long addr, unsigned long end, pgprot_t newprot) | ||
52 | { | ||
53 | pmd_t *pmd; | ||
54 | unsigned long next; | ||
55 | |||
56 | pmd = pmd_offset(pud, addr); | ||
57 | do { | ||
58 | next = pmd_addr_end(addr, end); | ||
59 | if (pmd_none_or_clear_bad(pmd)) | ||
60 | continue; | ||
61 | change_pte_range(mm, pmd, addr, next, newprot); | ||
62 | } while (pmd++, addr = next, addr != end); | ||
63 | } | ||
64 | |||
65 | static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, | ||
66 | unsigned long addr, unsigned long end, pgprot_t newprot) | ||
67 | { | ||
68 | pud_t *pud; | ||
69 | unsigned long next; | ||
70 | |||
71 | pud = pud_offset(pgd, addr); | ||
72 | do { | ||
73 | next = pud_addr_end(addr, end); | ||
74 | if (pud_none_or_clear_bad(pud)) | ||
75 | continue; | ||
76 | change_pmd_range(mm, pud, addr, next, newprot); | ||
77 | } while (pud++, addr = next, addr != end); | ||
78 | } | ||
79 | |||
80 | static void change_protection(struct vm_area_struct *vma, | ||
81 | unsigned long addr, unsigned long end, pgprot_t newprot) | ||
82 | { | ||
83 | struct mm_struct *mm = vma->vm_mm; | ||
84 | pgd_t *pgd; | ||
85 | unsigned long next; | ||
86 | unsigned long start = addr; | ||
87 | |||
88 | BUG_ON(addr >= end); | ||
89 | pgd = pgd_offset(mm, addr); | ||
90 | flush_cache_range(vma, addr, end); | ||
91 | spin_lock(&mm->page_table_lock); | ||
92 | do { | ||
93 | next = pgd_addr_end(addr, end); | ||
94 | if (pgd_none_or_clear_bad(pgd)) | ||
95 | continue; | ||
96 | change_pud_range(mm, pgd, addr, next, newprot); | ||
97 | } while (pgd++, addr = next, addr != end); | ||
98 | flush_tlb_range(vma, start, end); | ||
99 | spin_unlock(&mm->page_table_lock); | ||
100 | } | ||
101 | |||
102 | static int | ||
103 | mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, | ||
104 | unsigned long start, unsigned long end, unsigned long newflags) | ||
105 | { | ||
106 | struct mm_struct *mm = vma->vm_mm; | ||
107 | unsigned long oldflags = vma->vm_flags; | ||
108 | long nrpages = (end - start) >> PAGE_SHIFT; | ||
109 | unsigned long charged = 0; | ||
110 | pgprot_t newprot; | ||
111 | pgoff_t pgoff; | ||
112 | int error; | ||
113 | |||
114 | if (newflags == oldflags) { | ||
115 | *pprev = vma; | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * If we make a private mapping writable we increase our commit; | ||
121 | * but (without finer accounting) cannot reduce our commit if we | ||
122 | * make it unwritable again. | ||
123 | * | ||
124 | * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting | ||
125 | * a MAP_NORESERVE private mapping to writable will now reserve. | ||
126 | */ | ||
127 | if (newflags & VM_WRITE) { | ||
128 | if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED|VM_HUGETLB))) { | ||
129 | charged = nrpages; | ||
130 | if (security_vm_enough_memory(charged)) | ||
131 | return -ENOMEM; | ||
132 | newflags |= VM_ACCOUNT; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | newprot = protection_map[newflags & 0xf]; | ||
137 | |||
138 | /* | ||
139 | * First try to merge with previous and/or next vma. | ||
140 | */ | ||
141 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | ||
142 | *pprev = vma_merge(mm, *pprev, start, end, newflags, | ||
143 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); | ||
144 | if (*pprev) { | ||
145 | vma = *pprev; | ||
146 | goto success; | ||
147 | } | ||
148 | |||
149 | *pprev = vma; | ||
150 | |||
151 | if (start != vma->vm_start) { | ||
152 | error = split_vma(mm, vma, start, 1); | ||
153 | if (error) | ||
154 | goto fail; | ||
155 | } | ||
156 | |||
157 | if (end != vma->vm_end) { | ||
158 | error = split_vma(mm, vma, end, 0); | ||
159 | if (error) | ||
160 | goto fail; | ||
161 | } | ||
162 | |||
163 | success: | ||
164 | /* | ||
165 | * vm_flags and vm_page_prot are protected by the mmap_sem | ||
166 | * held in write mode. | ||
167 | */ | ||
168 | vma->vm_flags = newflags; | ||
169 | vma->vm_page_prot = newprot; | ||
170 | change_protection(vma, start, end, newprot); | ||
171 | __vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); | ||
172 | __vm_stat_account(mm, newflags, vma->vm_file, nrpages); | ||
173 | return 0; | ||
174 | |||
175 | fail: | ||
176 | vm_unacct_memory(charged); | ||
177 | return error; | ||
178 | } | ||
179 | |||
180 | asmlinkage long | ||
181 | sys_mprotect(unsigned long start, size_t len, unsigned long prot) | ||
182 | { | ||
183 | unsigned long vm_flags, nstart, end, tmp, reqprot; | ||
184 | struct vm_area_struct *vma, *prev; | ||
185 | int error = -EINVAL; | ||
186 | const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); | ||
187 | prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); | ||
188 | if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ | ||
189 | return -EINVAL; | ||
190 | |||
191 | if (start & ~PAGE_MASK) | ||
192 | return -EINVAL; | ||
193 | if (!len) | ||
194 | return 0; | ||
195 | len = PAGE_ALIGN(len); | ||
196 | end = start + len; | ||
197 | if (end <= start) | ||
198 | return -ENOMEM; | ||
199 | if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) | ||
200 | return -EINVAL; | ||
201 | |||
202 | reqprot = prot; | ||
203 | /* | ||
204 | * Does the application expect PROT_READ to imply PROT_EXEC: | ||
205 | */ | ||
206 | if (unlikely((prot & PROT_READ) && | ||
207 | (current->personality & READ_IMPLIES_EXEC))) | ||
208 | prot |= PROT_EXEC; | ||
209 | |||
210 | vm_flags = calc_vm_prot_bits(prot); | ||
211 | |||
212 | down_write(¤t->mm->mmap_sem); | ||
213 | |||
214 | vma = find_vma_prev(current->mm, start, &prev); | ||
215 | error = -ENOMEM; | ||
216 | if (!vma) | ||
217 | goto out; | ||
218 | if (unlikely(grows & PROT_GROWSDOWN)) { | ||
219 | if (vma->vm_start >= end) | ||
220 | goto out; | ||
221 | start = vma->vm_start; | ||
222 | error = -EINVAL; | ||
223 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
224 | goto out; | ||
225 | } | ||
226 | else { | ||
227 | if (vma->vm_start > start) | ||
228 | goto out; | ||
229 | if (unlikely(grows & PROT_GROWSUP)) { | ||
230 | end = vma->vm_end; | ||
231 | error = -EINVAL; | ||
232 | if (!(vma->vm_flags & VM_GROWSUP)) | ||
233 | goto out; | ||
234 | } | ||
235 | } | ||
236 | if (start > vma->vm_start) | ||
237 | prev = vma; | ||
238 | |||
239 | for (nstart = start ; ; ) { | ||
240 | unsigned long newflags; | ||
241 | |||
242 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ | ||
243 | |||
244 | if (is_vm_hugetlb_page(vma)) { | ||
245 | error = -EACCES; | ||
246 | goto out; | ||
247 | } | ||
248 | |||
249 | newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | ||
250 | |||
251 | if ((newflags & ~(newflags >> 4)) & 0xf) { | ||
252 | error = -EACCES; | ||
253 | goto out; | ||
254 | } | ||
255 | |||
256 | error = security_file_mprotect(vma, reqprot, prot); | ||
257 | if (error) | ||
258 | goto out; | ||
259 | |||
260 | tmp = vma->vm_end; | ||
261 | if (tmp > end) | ||
262 | tmp = end; | ||
263 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); | ||
264 | if (error) | ||
265 | goto out; | ||
266 | nstart = tmp; | ||
267 | |||
268 | if (nstart < prev->vm_end) | ||
269 | nstart = prev->vm_end; | ||
270 | if (nstart >= end) | ||
271 | goto out; | ||
272 | |||
273 | vma = prev->vm_next; | ||
274 | if (!vma || vma->vm_start != nstart) { | ||
275 | error = -ENOMEM; | ||
276 | goto out; | ||
277 | } | ||
278 | } | ||
279 | out: | ||
280 | up_write(¤t->mm->mmap_sem); | ||
281 | return error; | ||
282 | } | ||