aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-11-14 10:23:04 -0500
committerIngo Molnar <mingo@kernel.org>2013-11-19 10:57:40 -0500
commitd5b5f391d434c5cc8bcb1ab2d759738797b85f52 (patch)
tree9be9680fd08dd943cac38b278dde12d83b4a9856
parent801a76050bcf8d4e500eb8d048ff6265f37a61c8 (diff)
ftrace, perf: Avoid infinite event generation loop
Vince's perf-trinity fuzzer found yet another 'interesting' problem. When we sample the irq_work_exit tracepoint with period==1 (or PERF_SAMPLE_PERIOD) and we add an fasync SIGNAL handler we create an infinite event generation loop: ,-> <IPI> | irq_work_exit() -> | trace_irq_work_exit() -> | ... | __perf_event_overflow() -> (due to fasync) | irq_work_queue() -> (irq_work_list must be empty) '--------- arch_irq_work_raise() Similar things can happen due to regular poll() wakeups if we exceed the ring-buffer wakeup watermark, or have an event_limit. To avoid this, dis-allow sampling this particular tracepoint. In order to achieve this, create a special perf_perm function pointer for each event and call this (when set) on trying to create a tracepoint perf event. [ roasted: use expr... to allow for ',' in your expression ] Reported-by: Vince Weaver <vincent.weaver@maine.edu> Tested-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Dave Jones <davej@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/20131114152304.GC5364@laptop.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h11
-rw-r--r--include/linux/ftrace_event.h16
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/trace/ftrace.h7
-rw-r--r--kernel/trace/trace_event_perf.c6
5 files changed, 44 insertions, 0 deletions
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 2874df24e7a4..4cab890007a7 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -72,6 +72,17 @@ DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
72DEFINE_IRQ_VECTOR_EVENT(irq_work); 72DEFINE_IRQ_VECTOR_EVENT(irq_work);
73 73
74/* 74/*
75 * We must dis-allow sampling irq_work_exit() because perf event sampling
76 * itself can cause irq_work, which would lead to an infinite loop;
77 *
78 * 1) irq_work_exit happens
79 * 2) generates perf sample
80 * 3) generates irq_work
81 * 4) goto 1
82 */
83TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
84
85/*
75 * call_function - called when entering/exiting a call function interrupt 86 * call_function - called when entering/exiting a call function interrupt
76 * vector handler 87 * vector handler
77 */ 88 */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 9abbe630c456..8c9b7a1c4138 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -248,6 +248,9 @@ struct ftrace_event_call {
248#ifdef CONFIG_PERF_EVENTS 248#ifdef CONFIG_PERF_EVENTS
249 int perf_refcount; 249 int perf_refcount;
250 struct hlist_head __percpu *perf_events; 250 struct hlist_head __percpu *perf_events;
251
252 int (*perf_perm)(struct ftrace_event_call *,
253 struct perf_event *);
251#endif 254#endif
252}; 255};
253 256
@@ -317,6 +320,19 @@ struct ftrace_event_file {
317 } \ 320 } \
318 early_initcall(trace_init_flags_##name); 321 early_initcall(trace_init_flags_##name);
319 322
323#define __TRACE_EVENT_PERF_PERM(name, expr...) \
324 static int perf_perm_##name(struct ftrace_event_call *tp_event, \
325 struct perf_event *p_event) \
326 { \
327 return ({ expr; }); \
328 } \
329 static int __init trace_init_perf_perm_##name(void) \
330 { \
331 event_##name.perf_perm = &perf_perm_##name; \
332 return 0; \
333 } \
334 early_initcall(trace_init_perf_perm_##name);
335
320#define PERF_MAX_TRACE_SIZE 2048 336#define PERF_MAX_TRACE_SIZE 2048
321 337
322#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 338#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index ebeab360d851..f16dc0a40049 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -267,6 +267,8 @@ static inline void tracepoint_synchronize_unregister(void)
267 267
268#define TRACE_EVENT_FLAGS(event, flag) 268#define TRACE_EVENT_FLAGS(event, flag)
269 269
270#define TRACE_EVENT_PERF_PERM(event, expr...)
271
270#endif /* DECLARE_TRACE */ 272#endif /* DECLARE_TRACE */
271 273
272#ifndef TRACE_EVENT 274#ifndef TRACE_EVENT
@@ -399,4 +401,6 @@ static inline void tracepoint_synchronize_unregister(void)
399 401
400#define TRACE_EVENT_FLAGS(event, flag) 402#define TRACE_EVENT_FLAGS(event, flag)
401 403
404#define TRACE_EVENT_PERF_PERM(event, expr...)
405
402#endif /* ifdef TRACE_EVENT (see note above) */ 406#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 52594b20179e..6b852f60f8ae 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -90,6 +90,10 @@
90#define TRACE_EVENT_FLAGS(name, value) \ 90#define TRACE_EVENT_FLAGS(name, value) \
91 __TRACE_EVENT_FLAGS(name, value) 91 __TRACE_EVENT_FLAGS(name, value)
92 92
93#undef TRACE_EVENT_PERF_PERM
94#define TRACE_EVENT_PERF_PERM(name, expr...) \
95 __TRACE_EVENT_PERF_PERM(name, expr)
96
93#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 97#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
94 98
95 99
@@ -140,6 +144,9 @@
140#undef TRACE_EVENT_FLAGS 144#undef TRACE_EVENT_FLAGS
141#define TRACE_EVENT_FLAGS(event, flag) 145#define TRACE_EVENT_FLAGS(event, flag)
142 146
147#undef TRACE_EVENT_PERF_PERM
148#define TRACE_EVENT_PERF_PERM(event, expr...)
149
143#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 150#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
144 151
145/* 152/*
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 78e27e3b52ac..630889f68b1d 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -24,6 +24,12 @@ static int total_ref_count;
24static int perf_trace_event_perm(struct ftrace_event_call *tp_event, 24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event) 25 struct perf_event *p_event)
26{ 26{
27 if (tp_event->perf_perm) {
28 int ret = tp_event->perf_perm(tp_event, p_event);
29 if (ret)
30 return ret;
31 }
32
27 /* The ftrace function trace is allowed only for root. */ 33 /* The ftrace function trace is allowed only for root. */
28 if (ftrace_event_is_function(tp_event) && 34 if (ftrace_event_is_function(tp_event) &&
29 perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) 35 perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))