diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-11-14 10:23:04 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-19 10:57:40 -0500 |
commit | d5b5f391d434c5cc8bcb1ab2d759738797b85f52 (patch) | |
tree | 9be9680fd08dd943cac38b278dde12d83b4a9856 /include/linux/ftrace_event.h | |
parent | 801a76050bcf8d4e500eb8d048ff6265f37a61c8 (diff) |
ftrace, perf: Avoid infinite event generation loop
Vince's perf-trinity fuzzer found yet another 'interesting' problem.
When we sample the irq_work_exit tracepoint with period==1 (or
PERF_SAMPLE_PERIOD) and we add an fasync SIGNAL handler we create an
infinite event generation loop:
,-> <IPI>
| irq_work_exit() ->
| trace_irq_work_exit() ->
| ...
| __perf_event_overflow() -> (due to fasync)
| irq_work_queue() -> (irq_work_list must be empty)
'--------- arch_irq_work_raise()
Similar things can happen due to regular poll() wakeups if we exceed
the ring-buffer wakeup watermark, or have an event_limit.
To avoid this, dis-allow sampling this particular tracepoint.
In order to achieve this, create a special perf_perm function pointer
for each event and call this (when set) on trying to create a
tracepoint perf event.
[ roasted: use expr... to allow for ',' in your expression ]
Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Tested-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Dave Jones <davej@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20131114152304.GC5364@laptop.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/ftrace_event.h')
-rw-r--r-- | include/linux/ftrace_event.h | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 9abbe630c456..8c9b7a1c4138 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -248,6 +248,9 @@ struct ftrace_event_call { | |||
248 | #ifdef CONFIG_PERF_EVENTS | 248 | #ifdef CONFIG_PERF_EVENTS |
249 | int perf_refcount; | 249 | int perf_refcount; |
250 | struct hlist_head __percpu *perf_events; | 250 | struct hlist_head __percpu *perf_events; |
251 | |||
252 | int (*perf_perm)(struct ftrace_event_call *, | ||
253 | struct perf_event *); | ||
251 | #endif | 254 | #endif |
252 | }; | 255 | }; |
253 | 256 | ||
@@ -317,6 +320,19 @@ struct ftrace_event_file { | |||
317 | } \ | 320 | } \ |
318 | early_initcall(trace_init_flags_##name); | 321 | early_initcall(trace_init_flags_##name); |
319 | 322 | ||
323 | #define __TRACE_EVENT_PERF_PERM(name, expr...) \ | ||
324 | static int perf_perm_##name(struct ftrace_event_call *tp_event, \ | ||
325 | struct perf_event *p_event) \ | ||
326 | { \ | ||
327 | return ({ expr; }); \ | ||
328 | } \ | ||
329 | static int __init trace_init_perf_perm_##name(void) \ | ||
330 | { \ | ||
331 | event_##name.perf_perm = &perf_perm_##name; \ | ||
332 | return 0; \ | ||
333 | } \ | ||
334 | early_initcall(trace_init_perf_perm_##name); | ||
335 | |||
320 | #define PERF_MAX_TRACE_SIZE 2048 | 336 | #define PERF_MAX_TRACE_SIZE 2048 |
321 | 337 | ||
322 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ | 338 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |