aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c88
1 files changed, 55 insertions, 33 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index 97bff2547719..67761361c469 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -261,6 +261,58 @@ static unsigned long move_vma(struct vm_area_struct *vma,
261 return new_addr; 261 return new_addr;
262} 262}
263 263
264static struct vm_area_struct *vma_to_resize(unsigned long addr,
265 unsigned long old_len, unsigned long new_len, unsigned long *p)
266{
267 struct mm_struct *mm = current->mm;
268 struct vm_area_struct *vma = find_vma(mm, addr);
269
270 if (!vma || vma->vm_start > addr)
271 goto Efault;
272
273 if (is_vm_hugetlb_page(vma))
274 goto Einval;
275
276 /* We can't remap across vm area boundaries */
277 if (old_len > vma->vm_end - addr)
278 goto Efault;
279
280 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
281 if (new_len > old_len)
282 goto Efault;
283 }
284
285 if (vma->vm_flags & VM_LOCKED) {
286 unsigned long locked, lock_limit;
287 locked = mm->locked_vm << PAGE_SHIFT;
288 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
289 locked += new_len - old_len;
290 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
291 goto Eagain;
292 }
293
294 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
295 goto Enomem;
296
297 if (vma->vm_flags & VM_ACCOUNT) {
298 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
299 if (security_vm_enough_memory(charged))
300 goto Efault;
301 *p = charged;
302 }
303
304 return vma;
305
306Efault: /* very odd choice for most of the cases, but... */
307 return ERR_PTR(-EFAULT);
308Einval:
309 return ERR_PTR(-EINVAL);
310Enomem:
311 return ERR_PTR(-ENOMEM);
312Eagain:
313 return ERR_PTR(-EAGAIN);
314}
315
264/* 316/*
265 * Expand (or shrink) an existing mapping, potentially moving it at the 317 * Expand (or shrink) an existing mapping, potentially moving it at the
266 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 318 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
@@ -340,41 +392,12 @@ unsigned long do_mremap(unsigned long addr,
340 /* 392 /*
341 * Ok, we need to grow.. or relocate. 393 * Ok, we need to grow.. or relocate.
342 */ 394 */
343 ret = -EFAULT; 395 vma = vma_to_resize(addr, old_len, new_len, &charged);
344 vma = find_vma(mm, addr); 396 if (IS_ERR(vma)) {
345 if (!vma || vma->vm_start > addr) 397 ret = PTR_ERR(vma);
346 goto out;
347 if (is_vm_hugetlb_page(vma)) {
348 ret = -EINVAL;
349 goto out;
350 }
351 /* We can't remap across vm area boundaries */
352 if (old_len > vma->vm_end - addr)
353 goto out;
354 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
355 if (new_len > old_len)
356 goto out;
357 }
358 if (vma->vm_flags & VM_LOCKED) {
359 unsigned long locked, lock_limit;
360 locked = mm->locked_vm << PAGE_SHIFT;
361 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
362 locked += new_len - old_len;
363 ret = -EAGAIN;
364 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
365 goto out;
366 }
367 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
368 ret = -ENOMEM;
369 goto out; 398 goto out;
370 } 399 }
371 400
372 if (vma->vm_flags & VM_ACCOUNT) {
373 charged = (new_len - old_len) >> PAGE_SHIFT;
374 if (security_vm_enough_memory(charged))
375 goto out_nc;
376 }
377
378 /* old_len exactly to the end of the area.. 401 /* old_len exactly to the end of the area..
379 * And we're not relocating the area. 402 * And we're not relocating the area.
380 */ 403 */
@@ -430,7 +453,6 @@ unsigned long do_mremap(unsigned long addr,
430out: 453out:
431 if (ret & ~PAGE_MASK) 454 if (ret & ~PAGE_MASK)
432 vm_unacct_memory(charged); 455 vm_unacct_memory(charged);
433out_nc:
434 return ret; 456 return ret;
435} 457}
436 458