aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-08-14 14:45:13 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-08-18 19:32:53 -0400
commit7ae07ea3a48d30689ee037cb136bc21f0b37d8ae (patch)
tree2cb895a0794bcb2e45a4f48ef7e93302c1f6332c /kernel
parent927c7a9e92c4f69097a6e9e086d11fc2f8a5b40b (diff)
perf: Humanize the number of contexts
Instead of hardcoding the number of contexts for the recursions barriers, define a cpp constant to make the code more self-explanatory. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Stephane Eranian <eranian@google.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c4
-rw-r--r--kernel/trace/trace_event_perf.c8
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 75ab8a2df6b2..f416aef242c3 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1772,7 +1772,7 @@ struct callchain_cpus_entries {
1772 struct perf_callchain_entry *cpu_entries[0]; 1772 struct perf_callchain_entry *cpu_entries[0];
1773}; 1773};
1774 1774
1775static DEFINE_PER_CPU(int, callchain_recursion[4]); 1775static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
1776static atomic_t nr_callchain_events; 1776static atomic_t nr_callchain_events;
1777static DEFINE_MUTEX(callchain_mutex); 1777static DEFINE_MUTEX(callchain_mutex);
1778struct callchain_cpus_entries *callchain_cpus_entries; 1778struct callchain_cpus_entries *callchain_cpus_entries;
@@ -1828,7 +1828,7 @@ static int alloc_callchain_buffers(void)
1828 if (!entries) 1828 if (!entries)
1829 return -ENOMEM; 1829 return -ENOMEM;
1830 1830
1831 size = sizeof(struct perf_callchain_entry) * 4; 1831 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
1832 1832
1833 for_each_possible_cpu(cpu) { 1833 for_each_possible_cpu(cpu) {
1834 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, 1834 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 000e6e85b445..db2eae2efcf2 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -9,7 +9,7 @@
9#include <linux/kprobes.h> 9#include <linux/kprobes.h>
10#include "trace.h" 10#include "trace.h"
11 11
12static char *perf_trace_buf[4]; 12static char *perf_trace_buf[PERF_NR_CONTEXTS];
13 13
14/* 14/*
15 * Force it to be aligned to unsigned long to avoid misaligned accesses 15 * Force it to be aligned to unsigned long to avoid misaligned accesses
@@ -45,7 +45,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
45 char *buf; 45 char *buf;
46 int i; 46 int i;
47 47
48 for (i = 0; i < 4; i++) { 48 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
49 buf = (char *)alloc_percpu(perf_trace_t); 49 buf = (char *)alloc_percpu(perf_trace_t);
50 if (!buf) 50 if (!buf)
51 goto fail; 51 goto fail;
@@ -65,7 +65,7 @@ fail:
65 if (!total_ref_count) { 65 if (!total_ref_count) {
66 int i; 66 int i;
67 67
68 for (i = 0; i < 4; i++) { 68 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
69 free_percpu(perf_trace_buf[i]); 69 free_percpu(perf_trace_buf[i]);
70 perf_trace_buf[i] = NULL; 70 perf_trace_buf[i] = NULL;
71 } 71 }
@@ -140,7 +140,7 @@ void perf_trace_destroy(struct perf_event *p_event)
140 tp_event->perf_events = NULL; 140 tp_event->perf_events = NULL;
141 141
142 if (!--total_ref_count) { 142 if (!--total_ref_count) {
143 for (i = 0; i < 4; i++) { 143 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
144 free_percpu(perf_trace_buf[i]); 144 free_percpu(perf_trace_buf[i]);
145 perf_trace_buf[i] = NULL; 145 perf_trace_buf[i] = NULL;
146 } 146 }