diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2013-07-02 14:48:23 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-07-02 20:42:25 -0400 |
commit | 2a6c24afab70dbcfee49f4c76e1511eec1a3298b (patch) | |
tree | ea1f139f0f1192f4494a68166eb0ba36322a23de /kernel | |
parent | 8e2e2fa47129532a30cff6c25a47078dc97d9260 (diff) |
tracing: Fix race between deleting buffer and setting events
While analyzing the code, I discovered that there's a potential race between
deleting a trace instance and setting events. There are a few races that can
occur if events are being traced as the buffer is being deleted. Mostly the
problem comes with freeing the descriptor used by the trace event callback.
To prevent problems like this, the events are disabled before the buffer is
deleted. The problem with the current solution is that the event_mutex is let
go between disabling the events and freeing the files, which means that the events
could be enabled again while the freeing takes place.
Cc: stable@vger.kernel.org # 3.10
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace_events.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 920e08fb53b3..7d854290bf81 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -441,14 +441,14 @@ static int tracing_release_generic_file(struct inode *inode, struct file *filp) | |||
441 | /* | 441 | /* |
442 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. | 442 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. |
443 | */ | 443 | */ |
444 | static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, | 444 | static int |
445 | const char *sub, const char *event, int set) | 445 | __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, |
446 | const char *sub, const char *event, int set) | ||
446 | { | 447 | { |
447 | struct ftrace_event_file *file; | 448 | struct ftrace_event_file *file; |
448 | struct ftrace_event_call *call; | 449 | struct ftrace_event_call *call; |
449 | int ret = -EINVAL; | 450 | int ret = -EINVAL; |
450 | 451 | ||
451 | mutex_lock(&event_mutex); | ||
452 | list_for_each_entry(file, &tr->events, list) { | 452 | list_for_each_entry(file, &tr->events, list) { |
453 | 453 | ||
454 | call = file->event_call; | 454 | call = file->event_call; |
@@ -474,6 +474,17 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, | |||
474 | 474 | ||
475 | ret = 0; | 475 | ret = 0; |
476 | } | 476 | } |
477 | |||
478 | return ret; | ||
479 | } | ||
480 | |||
481 | static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, | ||
482 | const char *sub, const char *event, int set) | ||
483 | { | ||
484 | int ret; | ||
485 | |||
486 | mutex_lock(&event_mutex); | ||
487 | ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); | ||
477 | mutex_unlock(&event_mutex); | 488 | mutex_unlock(&event_mutex); |
478 | 489 | ||
479 | return ret; | 490 | return ret; |
@@ -2408,11 +2419,11 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr) | |||
2408 | 2419 | ||
2409 | int event_trace_del_tracer(struct trace_array *tr) | 2420 | int event_trace_del_tracer(struct trace_array *tr) |
2410 | { | 2421 | { |
2411 | /* Disable any running events */ | ||
2412 | __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); | ||
2413 | |||
2414 | mutex_lock(&event_mutex); | 2422 | mutex_lock(&event_mutex); |
2415 | 2423 | ||
2424 | /* Disable any running events */ | ||
2425 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); | ||
2426 | |||
2416 | down_write(&trace_event_sem); | 2427 | down_write(&trace_event_sem); |
2417 | __trace_remove_event_dirs(tr); | 2428 | __trace_remove_event_dirs(tr); |
2418 | debugfs_remove_recursive(tr->event_dir); | 2429 | debugfs_remove_recursive(tr->event_dir); |