aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-15 09:19:27 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 09:26:56 -0400
commit789f90fcf6b0b54e655740e9396c954378542c79 (patch)
treedccfe1ffac1202729238385923e74a8b5ebab979 /kernel
parent548e1ddf255b4ebfb4ef20c08936fd8d4deb3bd9 (diff)
perf_counter: per user mlock gift
Instead of a per-process mlock gift for perf-counters, use a per-user gift so that there is less of a DoS potential. [ Impact: allow less worst-case unprivileged memory consumption ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <20090515132018.496182835@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0173738dd548..93f4a0e4b873 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -45,7 +45,7 @@ static atomic_t nr_munmap_tracking __read_mostly;
45static atomic_t nr_comm_tracking __read_mostly; 45static atomic_t nr_comm_tracking __read_mostly;
46 46
47int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ 47int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */ 48int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49 49
50/* 50/*
51 * Lock for (sysadmin-configurable) counter reservations: 51 * Lock for (sysadmin-configurable) counter reservations:
@@ -1522,6 +1522,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
1522 1522
1523 if (atomic_dec_and_mutex_lock(&counter->mmap_count, 1523 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1524 &counter->mmap_mutex)) { 1524 &counter->mmap_mutex)) {
1525 struct user_struct *user = current_user();
1526
1527 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1525 vma->vm_mm->locked_vm -= counter->data->nr_locked; 1528 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1526 perf_mmap_data_free(counter); 1529 perf_mmap_data_free(counter);
1527 mutex_unlock(&counter->mmap_mutex); 1530 mutex_unlock(&counter->mmap_mutex);
@@ -1537,11 +1540,13 @@ static struct vm_operations_struct perf_mmap_vmops = {
1537static int perf_mmap(struct file *file, struct vm_area_struct *vma) 1540static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1538{ 1541{
1539 struct perf_counter *counter = file->private_data; 1542 struct perf_counter *counter = file->private_data;
1543 struct user_struct *user = current_user();
1540 unsigned long vma_size; 1544 unsigned long vma_size;
1541 unsigned long nr_pages; 1545 unsigned long nr_pages;
1546 unsigned long user_locked, user_lock_limit;
1542 unsigned long locked, lock_limit; 1547 unsigned long locked, lock_limit;
1548 long user_extra, extra;
1543 int ret = 0; 1549 int ret = 0;
1544 long extra;
1545 1550
1546 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) 1551 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1547 return -EINVAL; 1552 return -EINVAL;
@@ -1569,15 +1574,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1569 goto unlock; 1574 goto unlock;
1570 } 1575 }
1571 1576
1572 extra = nr_pages /* + 1 only account the data pages */; 1577 user_extra = nr_pages + 1;
1573 extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); 1578 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1574 if (extra < 0) 1579 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1575 extra = 0;
1576 1580
1577 locked = vma->vm_mm->locked_vm + extra; 1581 extra = 0;
1582 if (user_locked > user_lock_limit)
1583 extra = user_locked - user_lock_limit;
1578 1584
1579 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 1585 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1580 lock_limit >>= PAGE_SHIFT; 1586 lock_limit >>= PAGE_SHIFT;
1587 locked = vma->vm_mm->locked_vm + extra;
1581 1588
1582 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { 1589 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1583 ret = -EPERM; 1590 ret = -EPERM;
@@ -1590,6 +1597,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1590 goto unlock; 1597 goto unlock;
1591 1598
1592 atomic_set(&counter->mmap_count, 1); 1599 atomic_set(&counter->mmap_count, 1);
1600 atomic_long_add(user_extra, &user->locked_vm);
1593 vma->vm_mm->locked_vm += extra; 1601 vma->vm_mm->locked_vm += extra;
1594 counter->data->nr_locked = extra; 1602 counter->data->nr_locked = extra;
1595unlock: 1603unlock: