aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2013-07-02 14:48:23 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-07-25 17:07:43 -0400
commit68cebd265c91873277cf100e7ac1d047c6598ddf (patch)
tree40e9d76980fa3df077908b1401fec8e4d97e7cec /kernel
parent6492334c86dfb441af456337dc3217c2a430f141 (diff)
tracing: Fix race between deleting buffer and setting events
commit 2a6c24afab70dbcfee49f4c76e1511eec1a3298b upstream. While analyzing the code, I discovered that there's a potential race between deleting a trace instance and setting events. There are a few races that can occur if events are being traced as the buffer is being deleted. Mostly the problem comes with freeing the descriptor used by the trace event callback. To prevent problems like this, the events are disabled before the buffer is deleted. The problem with the current solution is that the event_mutex is let go between disabling the events and freeing the files, which means that the events could be enabled again while the freeing takes place. Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_events.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f82d92dbd614..32b9895af239 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -415,14 +415,14 @@ static void put_system(struct ftrace_subsystem_dir *dir)
415/* 415/*
416 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. 416 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
417 */ 417 */
418static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, 418static int
419 const char *sub, const char *event, int set) 419__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
420 const char *sub, const char *event, int set)
420{ 421{
421 struct ftrace_event_file *file; 422 struct ftrace_event_file *file;
422 struct ftrace_event_call *call; 423 struct ftrace_event_call *call;
423 int ret = -EINVAL; 424 int ret = -EINVAL;
424 425
425 mutex_lock(&event_mutex);
426 list_for_each_entry(file, &tr->events, list) { 426 list_for_each_entry(file, &tr->events, list) {
427 427
428 call = file->event_call; 428 call = file->event_call;
@@ -448,6 +448,17 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
448 448
449 ret = 0; 449 ret = 0;
450 } 450 }
451
452 return ret;
453}
454
455static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
456 const char *sub, const char *event, int set)
457{
458 int ret;
459
460 mutex_lock(&event_mutex);
461 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
451 mutex_unlock(&event_mutex); 462 mutex_unlock(&event_mutex);
452 463
453 return ret; 464 return ret;
@@ -2367,11 +2378,11 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2367 2378
2368int event_trace_del_tracer(struct trace_array *tr) 2379int event_trace_del_tracer(struct trace_array *tr)
2369{ 2380{
2370 /* Disable any running events */
2371 __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2372
2373 mutex_lock(&event_mutex); 2381 mutex_lock(&event_mutex);
2374 2382
2383 /* Disable any running events */
2384 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2385
2375 down_write(&trace_event_sem); 2386 down_write(&trace_event_sem);
2376 __trace_remove_event_dirs(tr); 2387 __trace_remove_event_dirs(tr);
2377 debugfs_remove_recursive(tr->event_dir); 2388 debugfs_remove_recursive(tr->event_dir);