diff options
author | Steven Rostedt <srostedt@redhat.com> | 2010-05-21 11:49:57 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-05-21 11:49:57 -0400 |
commit | ff5f149b6aec8edbfa3698721667acd043009a33 (patch) | |
tree | d052553eb296dfee3f01b1cb2b717cb7ccf3127a /kernel/trace/trace_event_perf.c | |
parent | f0218b3e9974f06014b61be8987159f4a20e011e (diff) | |
parent | 580d607cd666dfabfc1c7b0fb08c8ac690c7c87f (diff) |
Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into trace/tip/tracing/core-7
Conflicts:
include/linux/ftrace_event.h
include/trace/ftrace.h
kernel/trace/trace_event_perf.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_syscalls.c
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_event_perf.c')
-rw-r--r-- | kernel/trace/trace_event_perf.c | 190 |
1 files changed, 94 insertions, 96 deletions
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0a47e8d6b491..26b8607a0abc 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -9,13 +9,9 @@ | |||
9 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
10 | #include "trace.h" | 10 | #include "trace.h" |
11 | 11 | ||
12 | DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); | ||
13 | EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); | ||
14 | |||
15 | EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | 12 | EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); |
16 | 13 | ||
17 | static char *perf_trace_buf; | 14 | static char *perf_trace_buf[4]; |
18 | static char *perf_trace_buf_nmi; | ||
19 | 15 | ||
20 | /* | 16 | /* |
21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | 17 | * Force it to be aligned to unsigned long to avoid misaligned accesses |
@@ -27,63 +23,82 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | |||
27 | /* Count the events in use (per event id, not per instance) */ | 23 | /* Count the events in use (per event id, not per instance) */ |
28 | static int total_ref_count; | 24 | static int total_ref_count; |
29 | 25 | ||
30 | static int perf_trace_event_enable(struct ftrace_event_call *event) | 26 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, |
27 | struct perf_event *p_event) | ||
31 | { | 28 | { |
32 | char *buf; | 29 | struct hlist_head *list; |
33 | int ret = -ENOMEM; | 30 | int ret = -ENOMEM; |
31 | int cpu; | ||
34 | 32 | ||
35 | if (event->perf_refcount++ > 0) | 33 | p_event->tp_event = tp_event; |
34 | if (tp_event->perf_refcount++ > 0) | ||
36 | return 0; | 35 | return 0; |
37 | 36 | ||
38 | if (!total_ref_count) { | 37 | list = alloc_percpu(struct hlist_head); |
39 | buf = (char *)alloc_percpu(perf_trace_t); | 38 | if (!list) |
40 | if (!buf) | 39 | goto fail; |
41 | goto fail_buf; | 40 | |
41 | for_each_possible_cpu(cpu) | ||
42 | INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); | ||
42 | 43 | ||
43 | rcu_assign_pointer(perf_trace_buf, buf); | 44 | tp_event->perf_events = list; |
45 | |||
46 | if (!total_ref_count) { | ||
47 | char *buf; | ||
48 | int i; | ||
44 | 49 | ||
45 | buf = (char *)alloc_percpu(perf_trace_t); | 50 | for (i = 0; i < 4; i++) { |
46 | if (!buf) | 51 | buf = (char *)alloc_percpu(perf_trace_t); |
47 | goto fail_buf_nmi; | 52 | if (!buf) |
53 | goto fail; | ||
48 | 54 | ||
49 | rcu_assign_pointer(perf_trace_buf_nmi, buf); | 55 | perf_trace_buf[i] = buf; |
56 | } | ||
50 | } | 57 | } |
51 | 58 | ||
52 | if (event->class->reg) | 59 | if (tp_event->class->reg) |
53 | ret = event->class->reg(event, TRACE_REG_PERF_REGISTER); | 60 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); |
54 | else | 61 | else |
55 | ret = tracepoint_probe_register(event->name, | 62 | ret = tracepoint_probe_register(tp_event->name, |
56 | event->class->perf_probe, | 63 | tp_event->class->perf_probe, |
57 | event); | 64 | tp_event); |
58 | if (!ret) { | 65 | |
59 | total_ref_count++; | 66 | if (ret) |
60 | return 0; | 67 | goto fail; |
61 | } | ||
62 | 68 | ||
63 | fail_buf_nmi: | 69 | total_ref_count++; |
70 | return 0; | ||
71 | |||
72 | fail: | ||
64 | if (!total_ref_count) { | 73 | if (!total_ref_count) { |
65 | free_percpu(perf_trace_buf_nmi); | 74 | int i; |
66 | free_percpu(perf_trace_buf); | 75 | |
67 | perf_trace_buf_nmi = NULL; | 76 | for (i = 0; i < 4; i++) { |
68 | perf_trace_buf = NULL; | 77 | free_percpu(perf_trace_buf[i]); |
78 | perf_trace_buf[i] = NULL; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | if (!--tp_event->perf_refcount) { | ||
83 | free_percpu(tp_event->perf_events); | ||
84 | tp_event->perf_events = NULL; | ||
69 | } | 85 | } |
70 | fail_buf: | ||
71 | event->perf_refcount--; | ||
72 | 86 | ||
73 | return ret; | 87 | return ret; |
74 | } | 88 | } |
75 | 89 | ||
76 | int perf_trace_enable(int event_id) | 90 | int perf_trace_init(struct perf_event *p_event) |
77 | { | 91 | { |
78 | struct ftrace_event_call *event; | 92 | struct ftrace_event_call *tp_event; |
93 | int event_id = p_event->attr.config; | ||
79 | int ret = -EINVAL; | 94 | int ret = -EINVAL; |
80 | 95 | ||
81 | mutex_lock(&event_mutex); | 96 | mutex_lock(&event_mutex); |
82 | list_for_each_entry(event, &ftrace_events, list) { | 97 | list_for_each_entry(tp_event, &ftrace_events, list) { |
83 | if (event->event.type == event_id && | 98 | if (tp_event->event.type == event_id && |
84 | event->class && event->class->perf_probe && | 99 | tp_event->class && tp_event->class->perf_probe && |
85 | try_module_get(event->mod)) { | 100 | try_module_get(tp_event->mod)) { |
86 | ret = perf_trace_event_enable(event); | 101 | ret = perf_trace_event_init(tp_event, p_event); |
87 | break; | 102 | break; |
88 | } | 103 | } |
89 | } | 104 | } |
@@ -92,93 +107,76 @@ int perf_trace_enable(int event_id) | |||
92 | return ret; | 107 | return ret; |
93 | } | 108 | } |
94 | 109 | ||
95 | static void perf_trace_event_disable(struct ftrace_event_call *event) | 110 | int perf_trace_enable(struct perf_event *p_event) |
96 | { | 111 | { |
97 | char *buf, *nmi_buf; | 112 | struct ftrace_event_call *tp_event = p_event->tp_event; |
98 | 113 | struct hlist_head *list; | |
99 | if (--event->perf_refcount > 0) | ||
100 | return; | ||
101 | |||
102 | if (event->class->reg) | ||
103 | event->class->reg(event, TRACE_REG_PERF_UNREGISTER); | ||
104 | else | ||
105 | tracepoint_probe_unregister(event->name, event->class->perf_probe, event); | ||
106 | 114 | ||
107 | if (!--total_ref_count) { | 115 | list = tp_event->perf_events; |
108 | buf = perf_trace_buf; | 116 | if (WARN_ON_ONCE(!list)) |
109 | rcu_assign_pointer(perf_trace_buf, NULL); | 117 | return -EINVAL; |
110 | 118 | ||
111 | nmi_buf = perf_trace_buf_nmi; | 119 | list = per_cpu_ptr(list, smp_processor_id()); |
112 | rcu_assign_pointer(perf_trace_buf_nmi, NULL); | 120 | hlist_add_head_rcu(&p_event->hlist_entry, list); |
113 | 121 | ||
114 | /* | 122 | return 0; |
115 | * Ensure every events in profiling have finished before | 123 | } |
116 | * releasing the buffers | ||
117 | */ | ||
118 | synchronize_sched(); | ||
119 | 124 | ||
120 | free_percpu(buf); | 125 | void perf_trace_disable(struct perf_event *p_event) |
121 | free_percpu(nmi_buf); | 126 | { |
122 | } | 127 | hlist_del_rcu(&p_event->hlist_entry); |
123 | } | 128 | } |
124 | 129 | ||
125 | void perf_trace_disable(int event_id) | 130 | void perf_trace_destroy(struct perf_event *p_event) |
126 | { | 131 | { |
127 | struct ftrace_event_call *event; | 132 | struct ftrace_event_call *tp_event = p_event->tp_event; |
133 | int i; | ||
128 | 134 | ||
129 | mutex_lock(&event_mutex); | 135 | if (--tp_event->perf_refcount > 0) |
130 | list_for_each_entry(event, &ftrace_events, list) { | 136 | return; |
131 | if (event->event.type == event_id) { | 137 | |
132 | perf_trace_event_disable(event); | 138 | if (tp_event->class->reg) |
133 | module_put(event->mod); | 139 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); |
134 | break; | 140 | else |
141 | tracepoint_probe_unregister(tp_event->name, | ||
142 | tp_event->class->perf_probe, | ||
143 | tp_event); | ||
144 | |||
145 | free_percpu(tp_event->perf_events); | ||
146 | tp_event->perf_events = NULL; | ||
147 | |||
148 | if (!--total_ref_count) { | ||
149 | for (i = 0; i < 4; i++) { | ||
150 | free_percpu(perf_trace_buf[i]); | ||
151 | perf_trace_buf[i] = NULL; | ||
135 | } | 152 | } |
136 | } | 153 | } |
137 | mutex_unlock(&event_mutex); | ||
138 | } | 154 | } |
139 | 155 | ||
140 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | 156 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, |
141 | int *rctxp, unsigned long *irq_flags) | 157 | struct pt_regs *regs, int *rctxp) |
142 | { | 158 | { |
143 | struct trace_entry *entry; | 159 | struct trace_entry *entry; |
144 | char *trace_buf, *raw_data; | 160 | char *raw_data; |
145 | int pc, cpu; | 161 | int pc; |
146 | 162 | ||
147 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | 163 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); |
148 | 164 | ||
149 | pc = preempt_count(); | 165 | pc = preempt_count(); |
150 | 166 | ||
151 | /* Protect the per cpu buffer, begin the rcu read side */ | ||
152 | local_irq_save(*irq_flags); | ||
153 | |||
154 | *rctxp = perf_swevent_get_recursion_context(); | 167 | *rctxp = perf_swevent_get_recursion_context(); |
155 | if (*rctxp < 0) | 168 | if (*rctxp < 0) |
156 | goto err_recursion; | 169 | return NULL; |
157 | |||
158 | cpu = smp_processor_id(); | ||
159 | |||
160 | if (in_nmi()) | ||
161 | trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); | ||
162 | else | ||
163 | trace_buf = rcu_dereference_sched(perf_trace_buf); | ||
164 | |||
165 | if (!trace_buf) | ||
166 | goto err; | ||
167 | 170 | ||
168 | raw_data = per_cpu_ptr(trace_buf, cpu); | 171 | raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id()); |
169 | 172 | ||
170 | /* zero the dead bytes from align to not leak stack to user */ | 173 | /* zero the dead bytes from align to not leak stack to user */ |
171 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); | 174 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
172 | 175 | ||
173 | entry = (struct trace_entry *)raw_data; | 176 | entry = (struct trace_entry *)raw_data; |
174 | tracing_generic_entry_update(entry, *irq_flags, pc); | 177 | tracing_generic_entry_update(entry, regs->flags, pc); |
175 | entry->type = type; | 178 | entry->type = type; |
176 | 179 | ||
177 | return raw_data; | 180 | return raw_data; |
178 | err: | ||
179 | perf_swevent_put_recursion_context(*rctxp); | ||
180 | err_recursion: | ||
181 | local_irq_restore(*irq_flags); | ||
182 | return NULL; | ||
183 | } | 181 | } |
184 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); | 182 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); |