diff options
Diffstat (limited to 'kernel/trace/trace_event_perf.c')
-rw-r--r-- | kernel/trace/trace_event_perf.c | 190 |
1 files changed, 104 insertions, 86 deletions
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0565bb42566f..e6f65887842c 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -9,13 +9,9 @@ | |||
9 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
10 | #include "trace.h" | 10 | #include "trace.h" |
11 | 11 | ||
12 | DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); | ||
13 | EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); | ||
14 | |||
15 | EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | 12 | EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); |
16 | 13 | ||
17 | static char *perf_trace_buf; | 14 | static char *perf_trace_buf[4]; |
18 | static char *perf_trace_buf_nmi; | ||
19 | 15 | ||
20 | /* | 16 | /* |
21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | 17 | * Force it to be aligned to unsigned long to avoid misaligned accesses |
@@ -27,57 +23,82 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | |||
27 | /* Count the events in use (per event id, not per instance) */ | 23 | /* Count the events in use (per event id, not per instance) */ |
28 | static int total_ref_count; | 24 | static int total_ref_count; |
29 | 25 | ||
30 | static int perf_trace_event_enable(struct ftrace_event_call *event) | 26 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, |
27 | struct perf_event *p_event) | ||
31 | { | 28 | { |
32 | char *buf; | 29 | struct hlist_head *list; |
33 | int ret = -ENOMEM; | 30 | int ret = -ENOMEM; |
31 | int cpu; | ||
34 | 32 | ||
35 | if (event->perf_refcount++ > 0) | 33 | p_event->tp_event = tp_event; |
34 | if (tp_event->perf_refcount++ > 0) | ||
36 | return 0; | 35 | return 0; |
37 | 36 | ||
38 | if (!total_ref_count) { | 37 | list = alloc_percpu(struct hlist_head); |
39 | buf = (char *)alloc_percpu(perf_trace_t); | 38 | if (!list) |
40 | if (!buf) | 39 | goto fail; |
41 | goto fail_buf; | ||
42 | 40 | ||
43 | rcu_assign_pointer(perf_trace_buf, buf); | 41 | for_each_possible_cpu(cpu) |
42 | INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); | ||
44 | 43 | ||
45 | buf = (char *)alloc_percpu(perf_trace_t); | 44 | tp_event->perf_events = list; |
46 | if (!buf) | ||
47 | goto fail_buf_nmi; | ||
48 | 45 | ||
49 | rcu_assign_pointer(perf_trace_buf_nmi, buf); | 46 | if (!total_ref_count) { |
50 | } | 47 | char *buf; |
48 | int i; | ||
51 | 49 | ||
52 | ret = event->perf_event_enable(event); | 50 | for (i = 0; i < 4; i++) { |
53 | if (!ret) { | 51 | buf = (char *)alloc_percpu(perf_trace_t); |
54 | total_ref_count++; | 52 | if (!buf) |
55 | return 0; | 53 | goto fail; |
54 | |||
55 | perf_trace_buf[i] = buf; | ||
56 | } | ||
56 | } | 57 | } |
57 | 58 | ||
58 | fail_buf_nmi: | 59 | if (tp_event->class->reg) |
60 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); | ||
61 | else | ||
62 | ret = tracepoint_probe_register(tp_event->name, | ||
63 | tp_event->class->perf_probe, | ||
64 | tp_event); | ||
65 | |||
66 | if (ret) | ||
67 | goto fail; | ||
68 | |||
69 | total_ref_count++; | ||
70 | return 0; | ||
71 | |||
72 | fail: | ||
59 | if (!total_ref_count) { | 73 | if (!total_ref_count) { |
60 | free_percpu(perf_trace_buf_nmi); | 74 | int i; |
61 | free_percpu(perf_trace_buf); | 75 | |
62 | perf_trace_buf_nmi = NULL; | 76 | for (i = 0; i < 4; i++) { |
63 | perf_trace_buf = NULL; | 77 | free_percpu(perf_trace_buf[i]); |
78 | perf_trace_buf[i] = NULL; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | if (!--tp_event->perf_refcount) { | ||
83 | free_percpu(tp_event->perf_events); | ||
84 | tp_event->perf_events = NULL; | ||
64 | } | 85 | } |
65 | fail_buf: | ||
66 | event->perf_refcount--; | ||
67 | 86 | ||
68 | return ret; | 87 | return ret; |
69 | } | 88 | } |
70 | 89 | ||
71 | int perf_trace_enable(int event_id) | 90 | int perf_trace_init(struct perf_event *p_event) |
72 | { | 91 | { |
73 | struct ftrace_event_call *event; | 92 | struct ftrace_event_call *tp_event; |
93 | int event_id = p_event->attr.config; | ||
74 | int ret = -EINVAL; | 94 | int ret = -EINVAL; |
75 | 95 | ||
76 | mutex_lock(&event_mutex); | 96 | mutex_lock(&event_mutex); |
77 | list_for_each_entry(event, &ftrace_events, list) { | 97 | list_for_each_entry(tp_event, &ftrace_events, list) { |
78 | if (event->id == event_id && event->perf_event_enable && | 98 | if (tp_event->event.type == event_id && |
79 | try_module_get(event->mod)) { | 99 | tp_event->class && tp_event->class->perf_probe && |
80 | ret = perf_trace_event_enable(event); | 100 | try_module_get(tp_event->mod)) { |
101 | ret = perf_trace_event_init(tp_event, p_event); | ||
81 | break; | 102 | break; |
82 | } | 103 | } |
83 | } | 104 | } |
@@ -86,90 +107,87 @@ int perf_trace_enable(int event_id) | |||
86 | return ret; | 107 | return ret; |
87 | } | 108 | } |
88 | 109 | ||
89 | static void perf_trace_event_disable(struct ftrace_event_call *event) | 110 | int perf_trace_enable(struct perf_event *p_event) |
90 | { | 111 | { |
91 | char *buf, *nmi_buf; | 112 | struct ftrace_event_call *tp_event = p_event->tp_event; |
113 | struct hlist_head *list; | ||
92 | 114 | ||
93 | if (--event->perf_refcount > 0) | 115 | list = tp_event->perf_events; |
94 | return; | 116 | if (WARN_ON_ONCE(!list)) |
117 | return -EINVAL; | ||
95 | 118 | ||
96 | event->perf_event_disable(event); | 119 | list = this_cpu_ptr(list); |
97 | 120 | hlist_add_head_rcu(&p_event->hlist_entry, list); | |
98 | if (!--total_ref_count) { | ||
99 | buf = perf_trace_buf; | ||
100 | rcu_assign_pointer(perf_trace_buf, NULL); | ||
101 | 121 | ||
102 | nmi_buf = perf_trace_buf_nmi; | 122 | return 0; |
103 | rcu_assign_pointer(perf_trace_buf_nmi, NULL); | 123 | } |
104 | |||
105 | /* | ||
106 | * Ensure every events in profiling have finished before | ||
107 | * releasing the buffers | ||
108 | */ | ||
109 | synchronize_sched(); | ||
110 | 124 | ||
111 | free_percpu(buf); | 125 | void perf_trace_disable(struct perf_event *p_event) |
112 | free_percpu(nmi_buf); | 126 | { |
113 | } | 127 | hlist_del_rcu(&p_event->hlist_entry); |
114 | } | 128 | } |
115 | 129 | ||
116 | void perf_trace_disable(int event_id) | 130 | void perf_trace_destroy(struct perf_event *p_event) |
117 | { | 131 | { |
118 | struct ftrace_event_call *event; | 132 | struct ftrace_event_call *tp_event = p_event->tp_event; |
133 | int i; | ||
119 | 134 | ||
120 | mutex_lock(&event_mutex); | 135 | mutex_lock(&event_mutex); |
121 | list_for_each_entry(event, &ftrace_events, list) { | 136 | if (--tp_event->perf_refcount > 0) |
122 | if (event->id == event_id) { | 137 | goto out; |
123 | perf_trace_event_disable(event); | 138 | |
124 | module_put(event->mod); | 139 | if (tp_event->class->reg) |
125 | break; | 140 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); |
141 | else | ||
142 | tracepoint_probe_unregister(tp_event->name, | ||
143 | tp_event->class->perf_probe, | ||
144 | tp_event); | ||
145 | |||
146 | /* | ||
147 | * Ensure our callback won't be called anymore. See | ||
148 | * tracepoint_probe_unregister() and __DO_TRACE(). | ||
149 | */ | ||
150 | synchronize_sched(); | ||
151 | |||
152 | free_percpu(tp_event->perf_events); | ||
153 | tp_event->perf_events = NULL; | ||
154 | |||
155 | if (!--total_ref_count) { | ||
156 | for (i = 0; i < 4; i++) { | ||
157 | free_percpu(perf_trace_buf[i]); | ||
158 | perf_trace_buf[i] = NULL; | ||
126 | } | 159 | } |
127 | } | 160 | } |
161 | out: | ||
128 | mutex_unlock(&event_mutex); | 162 | mutex_unlock(&event_mutex); |
129 | } | 163 | } |
130 | 164 | ||
131 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | 165 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, |
132 | int *rctxp, unsigned long *irq_flags) | 166 | struct pt_regs *regs, int *rctxp) |
133 | { | 167 | { |
134 | struct trace_entry *entry; | 168 | struct trace_entry *entry; |
135 | char *trace_buf, *raw_data; | 169 | unsigned long flags; |
136 | int pc, cpu; | 170 | char *raw_data; |
171 | int pc; | ||
137 | 172 | ||
138 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | 173 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); |
139 | 174 | ||
140 | pc = preempt_count(); | 175 | pc = preempt_count(); |
141 | 176 | ||
142 | /* Protect the per cpu buffer, begin the rcu read side */ | ||
143 | local_irq_save(*irq_flags); | ||
144 | |||
145 | *rctxp = perf_swevent_get_recursion_context(); | 177 | *rctxp = perf_swevent_get_recursion_context(); |
146 | if (*rctxp < 0) | 178 | if (*rctxp < 0) |
147 | goto err_recursion; | 179 | return NULL; |
148 | |||
149 | cpu = smp_processor_id(); | ||
150 | |||
151 | if (in_nmi()) | ||
152 | trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); | ||
153 | else | ||
154 | trace_buf = rcu_dereference_sched(perf_trace_buf); | ||
155 | |||
156 | if (!trace_buf) | ||
157 | goto err; | ||
158 | 180 | ||
159 | raw_data = per_cpu_ptr(trace_buf, cpu); | 181 | raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); |
160 | 182 | ||
161 | /* zero the dead bytes from align to not leak stack to user */ | 183 | /* zero the dead bytes from align to not leak stack to user */ |
162 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); | 184 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
163 | 185 | ||
164 | entry = (struct trace_entry *)raw_data; | 186 | entry = (struct trace_entry *)raw_data; |
165 | tracing_generic_entry_update(entry, *irq_flags, pc); | 187 | local_save_flags(flags); |
188 | tracing_generic_entry_update(entry, flags, pc); | ||
166 | entry->type = type; | 189 | entry->type = type; |
167 | 190 | ||
168 | return raw_data; | 191 | return raw_data; |
169 | err: | ||
170 | perf_swevent_put_recursion_context(*rctxp); | ||
171 | err_recursion: | ||
172 | local_irq_restore(*irq_flags); | ||
173 | return NULL; | ||
174 | } | 192 | } |
175 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); | 193 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); |