aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-10-31 20:07:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:46 -0400
commitbc3e53f682d93df677dbd5006a404722b3adfe18 (patch)
treef386c29f13626e2b7d98d5a52525a78a9b59e447 /kernel
parentf11c0ca501af89fc07b0d9f17531ba3b68a4ef39 (diff)
mm: distinguish between mlocked and pinned pages
Some kernel components pin user space memory (infiniband and perf) (by increasing the page count) and account that memory as "mlocked". The difference between mlocking and pinning is: A. mlocked pages are marked with PG_mlocked and are exempt from swapping. Page migration may move them around though. They are kept on a special LRU list. B. Pinned pages cannot be moved because something needs to directly access physical memory. They may not be on any LRU list. I recently saw an mlockalled process where mm->locked_vm became bigger than the virtual size of the process (!) because some memory was accounted for twice: Once when the page was mlocked and once when the Infiniband layer increased the refcount because it needt to pin the RDMA memory. This patch introduces a separate counter for pinned pages and accounts them seperately. Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Mike Marciniszyn <infinipath@qlogic.com> Cc: Roland Dreier <roland@kernel.org> Cc: Sean Hefty <sean.hefty@intel.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d1a1bee35228..12a0287e0358 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3544,7 +3544,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3544 struct ring_buffer *rb = event->rb; 3544 struct ring_buffer *rb = event->rb;
3545 3545
3546 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); 3546 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3547 vma->vm_mm->locked_vm -= event->mmap_locked; 3547 vma->vm_mm->pinned_vm -= event->mmap_locked;
3548 rcu_assign_pointer(event->rb, NULL); 3548 rcu_assign_pointer(event->rb, NULL);
3549 mutex_unlock(&event->mmap_mutex); 3549 mutex_unlock(&event->mmap_mutex);
3550 3550
@@ -3625,7 +3625,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3625 3625
3626 lock_limit = rlimit(RLIMIT_MEMLOCK); 3626 lock_limit = rlimit(RLIMIT_MEMLOCK);
3627 lock_limit >>= PAGE_SHIFT; 3627 lock_limit >>= PAGE_SHIFT;
3628 locked = vma->vm_mm->locked_vm + extra; 3628 locked = vma->vm_mm->pinned_vm + extra;
3629 3629
3630 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 3630 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3631 !capable(CAP_IPC_LOCK)) { 3631 !capable(CAP_IPC_LOCK)) {
@@ -3651,7 +3651,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3651 atomic_long_add(user_extra, &user->locked_vm); 3651 atomic_long_add(user_extra, &user->locked_vm);
3652 event->mmap_locked = extra; 3652 event->mmap_locked = extra;
3653 event->mmap_user = get_current_user(); 3653 event->mmap_user = get_current_user();
3654 vma->vm_mm->locked_vm += event->mmap_locked; 3654 vma->vm_mm->pinned_vm += event->mmap_locked;
3655 3655
3656unlock: 3656unlock:
3657 if (!ret) 3657 if (!ret)