aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-05 11:50:24 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-05 14:18:32 -0400
commitc5078f78b455fbf67ea71442c7e7ca8acf9ff095 (patch)
treebdd3b699919f2b23faf13177954e82b570653711 /kernel/perf_counter.c
parent6de6a7b95705b859b61430fa3afa1403034eb3e6 (diff)
perf_counter: provide an mlock threshold
Provide a threshold to relax the mlock accounting, increasing usability. Each counter gets perf_counter_mlock_kb for free. [ Impact: allow more mmap buffering ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090505155437.112113632@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 6e6834e0587e..2d1342738305 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -44,6 +44,7 @@ static atomic_t nr_munmap_tracking __read_mostly;
44static atomic_t nr_comm_tracking __read_mostly; 44static atomic_t nr_comm_tracking __read_mostly;
45 45
46int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ 46int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
47int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */
47 48
48/* 49/*
49 * Lock for (sysadmin-configurable) counter reservations: 50 * Lock for (sysadmin-configurable) counter reservations:
@@ -1461,7 +1462,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
1461 1462
1462 if (atomic_dec_and_mutex_lock(&counter->mmap_count, 1463 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1463 &counter->mmap_mutex)) { 1464 &counter->mmap_mutex)) {
1464 vma->vm_mm->locked_vm -= counter->data->nr_pages + 1; 1465 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1465 perf_mmap_data_free(counter); 1466 perf_mmap_data_free(counter);
1466 mutex_unlock(&counter->mmap_mutex); 1467 mutex_unlock(&counter->mmap_mutex);
1467 } 1468 }
@@ -1480,6 +1481,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1480 unsigned long nr_pages; 1481 unsigned long nr_pages;
1481 unsigned long locked, lock_limit; 1482 unsigned long locked, lock_limit;
1482 int ret = 0; 1483 int ret = 0;
1484 long extra;
1483 1485
1484 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) 1486 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1485 return -EINVAL; 1487 return -EINVAL;
@@ -1507,8 +1509,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1507 goto unlock; 1509 goto unlock;
1508 } 1510 }
1509 1511
1510 locked = vma->vm_mm->locked_vm; 1512 extra = nr_pages /* + 1 only account the data pages */;
1511 locked += nr_pages + 1; 1513 extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1514 if (extra < 0)
1515 extra = 0;
1516
1517 locked = vma->vm_mm->locked_vm + extra;
1512 1518
1513 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 1519 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1514 lock_limit >>= PAGE_SHIFT; 1520 lock_limit >>= PAGE_SHIFT;
@@ -1524,7 +1530,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1524 goto unlock; 1530 goto unlock;
1525 1531
1526 atomic_set(&counter->mmap_count, 1); 1532 atomic_set(&counter->mmap_count, 1);
1527 vma->vm_mm->locked_vm += nr_pages + 1; 1533 vma->vm_mm->locked_vm += extra;
1534 counter->data->nr_locked = extra;
1528unlock: 1535unlock:
1529 mutex_unlock(&counter->mmap_mutex); 1536 mutex_unlock(&counter->mmap_mutex);
1530 1537