diff options
-rw-r--r-- | include/linux/perf_event.h | 14 | ||||
-rw-r--r-- | kernel/perf_event.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 8 |
3 files changed, 14 insertions, 12 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d7e8ea690864..ae6fa6050925 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -808,6 +808,12 @@ struct perf_event_context { | |||
808 | struct rcu_head rcu_head; | 808 | struct rcu_head rcu_head; |
809 | }; | 809 | }; |
810 | 810 | ||
811 | /* | ||
812 | * Number of contexts where an event can trigger: | ||
813 | * task, softirq, hardirq, nmi. | ||
814 | */ | ||
815 | #define PERF_NR_CONTEXTS 4 | ||
816 | |||
811 | /** | 817 | /** |
812 | * struct perf_event_cpu_context - per cpu event context structure | 818 | * struct perf_event_cpu_context - per cpu event context structure |
813 | */ | 819 | */ |
@@ -821,12 +827,8 @@ struct perf_cpu_context { | |||
821 | struct mutex hlist_mutex; | 827 | struct mutex hlist_mutex; |
822 | int hlist_refcount; | 828 | int hlist_refcount; |
823 | 829 | ||
824 | /* | 830 | /* Recursion avoidance in each contexts */ |
825 | * Recursion avoidance: | 831 | int recursion[PERF_NR_CONTEXTS]; |
826 | * | ||
827 | * task, softirq, irq, nmi context | ||
828 | */ | ||
829 | int recursion[4]; | ||
830 | }; | 832 | }; |
831 | 833 | ||
832 | struct perf_output_handle { | 834 | struct perf_output_handle { |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 75ab8a2df6b2..f416aef242c3 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1772,7 +1772,7 @@ struct callchain_cpus_entries { | |||
1772 | struct perf_callchain_entry *cpu_entries[0]; | 1772 | struct perf_callchain_entry *cpu_entries[0]; |
1773 | }; | 1773 | }; |
1774 | 1774 | ||
1775 | static DEFINE_PER_CPU(int, callchain_recursion[4]); | 1775 | static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); |
1776 | static atomic_t nr_callchain_events; | 1776 | static atomic_t nr_callchain_events; |
1777 | static DEFINE_MUTEX(callchain_mutex); | 1777 | static DEFINE_MUTEX(callchain_mutex); |
1778 | struct callchain_cpus_entries *callchain_cpus_entries; | 1778 | struct callchain_cpus_entries *callchain_cpus_entries; |
@@ -1828,7 +1828,7 @@ static int alloc_callchain_buffers(void) | |||
1828 | if (!entries) | 1828 | if (!entries) |
1829 | return -ENOMEM; | 1829 | return -ENOMEM; |
1830 | 1830 | ||
1831 | size = sizeof(struct perf_callchain_entry) * 4; | 1831 | size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; |
1832 | 1832 | ||
1833 | for_each_possible_cpu(cpu) { | 1833 | for_each_possible_cpu(cpu) { |
1834 | entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, | 1834 | entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b445..db2eae2efcf2 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
10 | #include "trace.h" | 10 | #include "trace.h" |
11 | 11 | ||
12 | static char *perf_trace_buf[4]; | 12 | static char *perf_trace_buf[PERF_NR_CONTEXTS]; |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * Force it to be aligned to unsigned long to avoid misaligned accesses | 15 | * Force it to be aligned to unsigned long to avoid misaligned accesses |
@@ -45,7 +45,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, | |||
45 | char *buf; | 45 | char *buf; |
46 | int i; | 46 | int i; |
47 | 47 | ||
48 | for (i = 0; i < 4; i++) { | 48 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
49 | buf = (char *)alloc_percpu(perf_trace_t); | 49 | buf = (char *)alloc_percpu(perf_trace_t); |
50 | if (!buf) | 50 | if (!buf) |
51 | goto fail; | 51 | goto fail; |
@@ -65,7 +65,7 @@ fail: | |||
65 | if (!total_ref_count) { | 65 | if (!total_ref_count) { |
66 | int i; | 66 | int i; |
67 | 67 | ||
68 | for (i = 0; i < 4; i++) { | 68 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
69 | free_percpu(perf_trace_buf[i]); | 69 | free_percpu(perf_trace_buf[i]); |
70 | perf_trace_buf[i] = NULL; | 70 | perf_trace_buf[i] = NULL; |
71 | } | 71 | } |
@@ -140,7 +140,7 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
140 | tp_event->perf_events = NULL; | 140 | tp_event->perf_events = NULL; |
141 | 141 | ||
142 | if (!--total_ref_count) { | 142 | if (!--total_ref_count) { |
143 | for (i = 0; i < 4; i++) { | 143 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
144 | free_percpu(perf_trace_buf[i]); | 144 | free_percpu(perf_trace_buf[i]); |
145 | perf_trace_buf[i] = NULL; | 145 | perf_trace_buf[i] = NULL; |
146 | } | 146 | } |