aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c73
1 files changed, 21 insertions, 52 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index cbe9e0581b75..45eb650b9654 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -31,7 +31,6 @@ int can_do_mlock(void)
31} 31}
32EXPORT_SYMBOL(can_do_mlock); 32EXPORT_SYMBOL(can_do_mlock);
33 33
34#ifdef CONFIG_UNEVICTABLE_LRU
35/* 34/*
36 * Mlocked pages are marked with PageMlocked() flag for efficient testing 35 * Mlocked pages are marked with PageMlocked() flag for efficient testing
37 * in vmscan and, possibly, the fault path; and to support semi-accurate 36 * in vmscan and, possibly, the fault path; and to support semi-accurate
@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval)
261 return retval; 260 return retval;
262} 261}
263 262
264#else /* CONFIG_UNEVICTABLE_LRU */
265
266/*
267 * Just make pages present if VM_LOCKED. No-op if unlocking.
268 */
269static long __mlock_vma_pages_range(struct vm_area_struct *vma,
270 unsigned long start, unsigned long end,
271 int mlock)
272{
273 if (mlock && (vma->vm_flags & VM_LOCKED))
274 return make_pages_present(start, end);
275 return 0;
276}
277
278static inline int __mlock_posix_error_return(long retval)
279{
280 return 0;
281}
282
283#endif /* CONFIG_UNEVICTABLE_LRU */
284
285/** 263/**
286 * mlock_vma_pages_range() - mlock pages in specified vma range. 264 * mlock_vma_pages_range() - mlock pages in specified vma range.
287 * @vma - the vma containing the specfied address range 265 * @vma - the vma containing the specfied address range
@@ -629,52 +607,43 @@ void user_shm_unlock(size_t size, struct user_struct *user)
629 free_uid(user); 607 free_uid(user);
630} 608}
631 609
632void *alloc_locked_buffer(size_t size) 610int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
611 size_t size)
633{ 612{
634 unsigned long rlim, vm, pgsz; 613 unsigned long lim, vm, pgsz;
635 void *buffer = NULL; 614 int error = -ENOMEM;
636 615
637 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; 616 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
638 617
639 down_write(&current->mm->mmap_sem); 618 down_write(&mm->mmap_sem);
640 619
641 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; 620 lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
642 vm = current->mm->total_vm + pgsz; 621 vm = mm->total_vm + pgsz;
643 if (rlim < vm) 622 if (lim < vm)
644 goto out; 623 goto out;
645 624
646 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 625 lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
647 vm = current->mm->locked_vm + pgsz; 626 vm = mm->locked_vm + pgsz;
648 if (rlim < vm) 627 if (lim < vm)
649 goto out; 628 goto out;
650 629
651 buffer = kzalloc(size, GFP_KERNEL); 630 mm->total_vm += pgsz;
652 if (!buffer) 631 mm->locked_vm += pgsz;
653 goto out;
654
655 current->mm->total_vm += pgsz;
656 current->mm->locked_vm += pgsz;
657 632
633 error = 0;
658 out: 634 out:
659 up_write(&current->mm->mmap_sem); 635 up_write(&mm->mmap_sem);
660 return buffer; 636 return error;
661} 637}
662 638
663void release_locked_buffer(void *buffer, size_t size) 639void refund_locked_memory(struct mm_struct *mm, size_t size)
664{ 640{
665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; 641 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
666 642
667 down_write(&current->mm->mmap_sem); 643 down_write(&mm->mmap_sem);
668
669 current->mm->total_vm -= pgsz;
670 current->mm->locked_vm -= pgsz;
671
672 up_write(&current->mm->mmap_sem);
673}
674 644
675void free_locked_buffer(void *buffer, size_t size) 645 mm->total_vm -= pgsz;
676{ 646 mm->locked_vm -= pgsz;
677 release_locked_buffer(buffer, size);
678 647
679 kfree(buffer); 648 up_write(&mm->mmap_sem);
680} 649}