aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/perf_counter.c22
2 files changed, 19 insertions, 7 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d1857580a132..ff59d1231519 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -674,6 +674,10 @@ struct user_struct {
674 struct work_struct work; 674 struct work_struct work;
675#endif 675#endif
676#endif 676#endif
677
678#ifdef CONFIG_PERF_COUNTERS
679 atomic_long_t locked_vm;
680#endif
677}; 681};
678 682
679extern int uids_sysfs_init(void); 683extern int uids_sysfs_init(void);
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0173738dd548..93f4a0e4b873 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -45,7 +45,7 @@ static atomic_t nr_munmap_tracking __read_mostly;
45static atomic_t nr_comm_tracking __read_mostly; 45static atomic_t nr_comm_tracking __read_mostly;
46 46
47int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ 47int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */ 48int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49 49
50/* 50/*
51 * Lock for (sysadmin-configurable) counter reservations: 51 * Lock for (sysadmin-configurable) counter reservations:
@@ -1522,6 +1522,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
1522 1522
1523 if (atomic_dec_and_mutex_lock(&counter->mmap_count, 1523 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1524 &counter->mmap_mutex)) { 1524 &counter->mmap_mutex)) {
1525 struct user_struct *user = current_user();
1526
1527 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1525 vma->vm_mm->locked_vm -= counter->data->nr_locked; 1528 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1526 perf_mmap_data_free(counter); 1529 perf_mmap_data_free(counter);
1527 mutex_unlock(&counter->mmap_mutex); 1530 mutex_unlock(&counter->mmap_mutex);
@@ -1537,11 +1540,13 @@ static struct vm_operations_struct perf_mmap_vmops = {
1537static int perf_mmap(struct file *file, struct vm_area_struct *vma) 1540static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1538{ 1541{
1539 struct perf_counter *counter = file->private_data; 1542 struct perf_counter *counter = file->private_data;
1543 struct user_struct *user = current_user();
1540 unsigned long vma_size; 1544 unsigned long vma_size;
1541 unsigned long nr_pages; 1545 unsigned long nr_pages;
1546 unsigned long user_locked, user_lock_limit;
1542 unsigned long locked, lock_limit; 1547 unsigned long locked, lock_limit;
1548 long user_extra, extra;
1543 int ret = 0; 1549 int ret = 0;
1544 long extra;
1545 1550
1546 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) 1551 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1547 return -EINVAL; 1552 return -EINVAL;
@@ -1569,15 +1574,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1569 goto unlock; 1574 goto unlock;
1570 } 1575 }
1571 1576
1572 extra = nr_pages /* + 1 only account the data pages */; 1577 user_extra = nr_pages + 1;
1573 extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); 1578 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1574 if (extra < 0) 1579 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1575 extra = 0;
1576 1580
1577 locked = vma->vm_mm->locked_vm + extra; 1581 extra = 0;
1582 if (user_locked > user_lock_limit)
1583 extra = user_locked - user_lock_limit;
1578 1584
1579 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 1585 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1580 lock_limit >>= PAGE_SHIFT; 1586 lock_limit >>= PAGE_SHIFT;
1587 locked = vma->vm_mm->locked_vm + extra;
1581 1588
1582 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { 1589 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1583 ret = -EPERM; 1590 ret = -EPERM;
@@ -1590,6 +1597,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1590 goto unlock; 1597 goto unlock;
1591 1598
1592 atomic_set(&counter->mmap_count, 1); 1599 atomic_set(&counter->mmap_count, 1);
1600 atomic_long_add(user_extra, &user->locked_vm);
1593 vma->vm_mm->locked_vm += extra; 1601 vma->vm_mm->locked_vm += extra;
1594 counter->data->nr_locked = extra; 1602 counter->data->nr_locked = extra;
1595unlock: 1603unlock: