aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-09 04:53:44 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-09 05:50:43 -0400
commit9ee318a7825929bc3734110b83ae8e20e53d9de3 (patch)
tree4823b4e82bb99971559d3569c7f4d710ac7f232d
parentb3828ebb3901adfe989d8d4157ed28247aeec132 (diff)
perf_counter: optimize mmap/comm tracking
Impact: performance optimization The mmap/comm tracking code does quite a lot of work before it discovers there's no interest in it, avoid that by keeping a counter. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090409085524.427173196@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/perf_counter.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index b07195bbd228..76376ecb23b5 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -38,6 +38,10 @@ int perf_max_counters __read_mostly = 1;
38static int perf_reserved_percpu __read_mostly; 38static int perf_reserved_percpu __read_mostly;
39static int perf_overcommit __read_mostly = 1; 39static int perf_overcommit __read_mostly = 1;
40 40
41static atomic_t nr_mmap_tracking __read_mostly;
42static atomic_t nr_munmap_tracking __read_mostly;
43static atomic_t nr_comm_tracking __read_mostly;
44
41/* 45/*
42 * Mutex for (sysadmin-configurable) counter reservations: 46 * Mutex for (sysadmin-configurable) counter reservations:
43 */ 47 */
@@ -1186,6 +1190,13 @@ static void free_counter(struct perf_counter *counter)
1186{ 1190{
1187 perf_pending_sync(counter); 1191 perf_pending_sync(counter);
1188 1192
1193 if (counter->hw_event.mmap)
1194 atomic_dec(&nr_mmap_tracking);
1195 if (counter->hw_event.munmap)
1196 atomic_dec(&nr_munmap_tracking);
1197 if (counter->hw_event.comm)
1198 atomic_dec(&nr_comm_tracking);
1199
1189 if (counter->destroy) 1200 if (counter->destroy)
1190 counter->destroy(counter); 1201 counter->destroy(counter);
1191 1202
@@ -2005,7 +2016,12 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2005 2016
2006void perf_counter_comm(struct task_struct *task) 2017void perf_counter_comm(struct task_struct *task)
2007{ 2018{
2008 struct perf_comm_event comm_event = { 2019 struct perf_comm_event comm_event;
2020
2021 if (!atomic_read(&nr_comm_tracking))
2022 return;
2023
2024 comm_event = (struct perf_comm_event){
2009 .task = task, 2025 .task = task,
2010 .event = { 2026 .event = {
2011 .header = { .type = PERF_EVENT_COMM, }, 2027 .header = { .type = PERF_EVENT_COMM, },
@@ -2128,7 +2144,12 @@ got_name:
2128void perf_counter_mmap(unsigned long addr, unsigned long len, 2144void perf_counter_mmap(unsigned long addr, unsigned long len,
2129 unsigned long pgoff, struct file *file) 2145 unsigned long pgoff, struct file *file)
2130{ 2146{
2131 struct perf_mmap_event mmap_event = { 2147 struct perf_mmap_event mmap_event;
2148
2149 if (!atomic_read(&nr_mmap_tracking))
2150 return;
2151
2152 mmap_event = (struct perf_mmap_event){
2132 .file = file, 2153 .file = file,
2133 .event = { 2154 .event = {
2134 .header = { .type = PERF_EVENT_MMAP, }, 2155 .header = { .type = PERF_EVENT_MMAP, },
@@ -2146,7 +2167,12 @@ void perf_counter_mmap(unsigned long addr, unsigned long len,
2146void perf_counter_munmap(unsigned long addr, unsigned long len, 2167void perf_counter_munmap(unsigned long addr, unsigned long len,
2147 unsigned long pgoff, struct file *file) 2168 unsigned long pgoff, struct file *file)
2148{ 2169{
2149 struct perf_mmap_event mmap_event = { 2170 struct perf_mmap_event mmap_event;
2171
2172 if (!atomic_read(&nr_munmap_tracking))
2173 return;
2174
2175 mmap_event = (struct perf_mmap_event){
2150 .file = file, 2176 .file = file,
2151 .event = { 2177 .event = {
2152 .header = { .type = PERF_EVENT_MUNMAP, }, 2178 .header = { .type = PERF_EVENT_MUNMAP, },
@@ -2725,6 +2751,13 @@ done:
2725 2751
2726 counter->hw_ops = hw_ops; 2752 counter->hw_ops = hw_ops;
2727 2753
2754 if (counter->hw_event.mmap)
2755 atomic_inc(&nr_mmap_tracking);
2756 if (counter->hw_event.munmap)
2757 atomic_inc(&nr_munmap_tracking);
2758 if (counter->hw_event.comm)
2759 atomic_inc(&nr_comm_tracking);
2760
2728 return counter; 2761 return counter;
2729} 2762}
2730 2763