diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2009-05-05 22:33:45 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-06 04:38:19 -0400 |
commit | 20c8928abe70e204bd077ab6cfe23002d7788983 (patch) | |
tree | e161656f99c814ebdd69df8b5a79dab58f80065e | |
parent | 2df75e415709ad12862028916c772c1f377f6a7c (diff) |
tracing/events: fix concurrent access to ftrace_events list
A module will add/remove its trace events when it gets loaded/unloaded, so
the ftrace_events list is not "const", and concurrent access needs to be
protected.
This patch thus fixes races between loading/unloding modules and read
'available_events' or read/write 'set_event', etc.
Below shows how to reproduce the race:
# for ((; ;)) { cat /mnt/tracing/available_events; } > /dev/null &
# for ((; ;)) { insmod trace-events-sample.ko; rmmod sample; } &
After a while:
BUG: unable to handle kernel paging request at 0010011c
IP: [<c1080f27>] t_next+0x1b/0x2d
...
Call Trace:
[<c10c90e6>] ? seq_read+0x217/0x30d
[<c10c8ecf>] ? seq_read+0x0/0x30d
[<c10b4c19>] ? vfs_read+0x8f/0x136
[<c10b4fc3>] ? sys_read+0x40/0x65
[<c1002a68>] ? sysenter_do_call+0x12/0x36
[ Impact: fix races when concurrent accessing ftrace_events list ]
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4A00F709.3080800@cn.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/trace/trace.h | 1 | ||||
-rw-r--r-- | kernel/trace/trace_event_profile.c | 19 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 20 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 10 |
4 files changed, 33 insertions, 17 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 7736fe8c1b76..777c6c3a0cde 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -825,6 +825,7 @@ static int filter_pred_##size(struct filter_pred *pred, void *event, \ | |||
825 | return match; \ | 825 | return match; \ |
826 | } | 826 | } |
827 | 827 | ||
828 | extern struct mutex event_mutex; | ||
828 | extern struct list_head ftrace_events; | 829 | extern struct list_head ftrace_events; |
829 | 830 | ||
830 | extern const char *__start___trace_bprintk_fmt[]; | 831 | extern const char *__start___trace_bprintk_fmt[]; |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 7bf2ad65eee5..5b5895afecfe 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -10,21 +10,30 @@ | |||
10 | int ftrace_profile_enable(int event_id) | 10 | int ftrace_profile_enable(int event_id) |
11 | { | 11 | { |
12 | struct ftrace_event_call *event; | 12 | struct ftrace_event_call *event; |
13 | int ret = -EINVAL; | ||
13 | 14 | ||
15 | mutex_lock(&event_mutex); | ||
14 | list_for_each_entry(event, &ftrace_events, list) { | 16 | list_for_each_entry(event, &ftrace_events, list) { |
15 | if (event->id == event_id) | 17 | if (event->id == event_id) { |
16 | return event->profile_enable(event); | 18 | ret = event->profile_enable(event); |
19 | break; | ||
20 | } | ||
17 | } | 21 | } |
22 | mutex_unlock(&event_mutex); | ||
18 | 23 | ||
19 | return -EINVAL; | 24 | return ret; |
20 | } | 25 | } |
21 | 26 | ||
22 | void ftrace_profile_disable(int event_id) | 27 | void ftrace_profile_disable(int event_id) |
23 | { | 28 | { |
24 | struct ftrace_event_call *event; | 29 | struct ftrace_event_call *event; |
25 | 30 | ||
31 | mutex_lock(&event_mutex); | ||
26 | list_for_each_entry(event, &ftrace_events, list) { | 32 | list_for_each_entry(event, &ftrace_events, list) { |
27 | if (event->id == event_id) | 33 | if (event->id == event_id) { |
28 | return event->profile_disable(event); | 34 | event->profile_disable(event); |
35 | break; | ||
36 | } | ||
29 | } | 37 | } |
38 | mutex_unlock(&event_mutex); | ||
30 | } | 39 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f251a150e75e..8d579ff23610 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #define TRACE_SYSTEM "TRACE_SYSTEM" | 22 | #define TRACE_SYSTEM "TRACE_SYSTEM" |
23 | 23 | ||
24 | static DEFINE_MUTEX(event_mutex); | 24 | DEFINE_MUTEX(event_mutex); |
25 | 25 | ||
26 | LIST_HEAD(ftrace_events); | 26 | LIST_HEAD(ftrace_events); |
27 | 27 | ||
@@ -80,6 +80,7 @@ static void ftrace_clear_events(void) | |||
80 | { | 80 | { |
81 | struct ftrace_event_call *call; | 81 | struct ftrace_event_call *call; |
82 | 82 | ||
83 | mutex_lock(&event_mutex); | ||
83 | list_for_each_entry(call, &ftrace_events, list) { | 84 | list_for_each_entry(call, &ftrace_events, list) { |
84 | 85 | ||
85 | if (call->enabled) { | 86 | if (call->enabled) { |
@@ -87,6 +88,7 @@ static void ftrace_clear_events(void) | |||
87 | call->unregfunc(); | 88 | call->unregfunc(); |
88 | } | 89 | } |
89 | } | 90 | } |
91 | mutex_unlock(&event_mutex); | ||
90 | } | 92 | } |
91 | 93 | ||
92 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, | 94 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, |
@@ -274,6 +276,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
274 | 276 | ||
275 | static void *t_start(struct seq_file *m, loff_t *pos) | 277 | static void *t_start(struct seq_file *m, loff_t *pos) |
276 | { | 278 | { |
279 | mutex_lock(&event_mutex); | ||
280 | if (*pos == 0) | ||
281 | m->private = ftrace_events.next; | ||
277 | return t_next(m, NULL, pos); | 282 | return t_next(m, NULL, pos); |
278 | } | 283 | } |
279 | 284 | ||
@@ -303,6 +308,9 @@ s_next(struct seq_file *m, void *v, loff_t *pos) | |||
303 | 308 | ||
304 | static void *s_start(struct seq_file *m, loff_t *pos) | 309 | static void *s_start(struct seq_file *m, loff_t *pos) |
305 | { | 310 | { |
311 | mutex_lock(&event_mutex); | ||
312 | if (*pos == 0) | ||
313 | m->private = ftrace_events.next; | ||
306 | return s_next(m, NULL, pos); | 314 | return s_next(m, NULL, pos); |
307 | } | 315 | } |
308 | 316 | ||
@@ -319,12 +327,12 @@ static int t_show(struct seq_file *m, void *v) | |||
319 | 327 | ||
320 | static void t_stop(struct seq_file *m, void *p) | 328 | static void t_stop(struct seq_file *m, void *p) |
321 | { | 329 | { |
330 | mutex_unlock(&event_mutex); | ||
322 | } | 331 | } |
323 | 332 | ||
324 | static int | 333 | static int |
325 | ftrace_event_seq_open(struct inode *inode, struct file *file) | 334 | ftrace_event_seq_open(struct inode *inode, struct file *file) |
326 | { | 335 | { |
327 | int ret; | ||
328 | const struct seq_operations *seq_ops; | 336 | const struct seq_operations *seq_ops; |
329 | 337 | ||
330 | if ((file->f_mode & FMODE_WRITE) && | 338 | if ((file->f_mode & FMODE_WRITE) && |
@@ -332,13 +340,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file) | |||
332 | ftrace_clear_events(); | 340 | ftrace_clear_events(); |
333 | 341 | ||
334 | seq_ops = inode->i_private; | 342 | seq_ops = inode->i_private; |
335 | ret = seq_open(file, seq_ops); | 343 | return seq_open(file, seq_ops); |
336 | if (!ret) { | ||
337 | struct seq_file *m = file->private_data; | ||
338 | |||
339 | m->private = ftrace_events.next; | ||
340 | } | ||
341 | return ret; | ||
342 | } | 344 | } |
343 | 345 | ||
344 | static ssize_t | 346 | static ssize_t |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index ce07b8186710..7ac691085276 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -408,6 +408,7 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) | |||
408 | filter->n_preds = 0; | 408 | filter->n_preds = 0; |
409 | } | 409 | } |
410 | 410 | ||
411 | mutex_lock(&event_mutex); | ||
411 | list_for_each_entry(call, &ftrace_events, list) { | 412 | list_for_each_entry(call, &ftrace_events, list) { |
412 | if (!call->define_fields) | 413 | if (!call->define_fields) |
413 | continue; | 414 | continue; |
@@ -417,6 +418,7 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) | |||
417 | remove_filter_string(call->filter); | 418 | remove_filter_string(call->filter); |
418 | } | 419 | } |
419 | } | 420 | } |
421 | mutex_unlock(&event_mutex); | ||
420 | } | 422 | } |
421 | 423 | ||
422 | static int filter_add_pred_fn(struct filter_parse_state *ps, | 424 | static int filter_add_pred_fn(struct filter_parse_state *ps, |
@@ -567,6 +569,7 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
567 | { | 569 | { |
568 | struct event_filter *filter = system->filter; | 570 | struct event_filter *filter = system->filter; |
569 | struct ftrace_event_call *call; | 571 | struct ftrace_event_call *call; |
572 | int err = 0; | ||
570 | 573 | ||
571 | if (!filter->preds) { | 574 | if (!filter->preds) { |
572 | filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), | 575 | filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), |
@@ -584,8 +587,8 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
584 | filter->preds[filter->n_preds] = pred; | 587 | filter->preds[filter->n_preds] = pred; |
585 | filter->n_preds++; | 588 | filter->n_preds++; |
586 | 589 | ||
590 | mutex_lock(&event_mutex); | ||
587 | list_for_each_entry(call, &ftrace_events, list) { | 591 | list_for_each_entry(call, &ftrace_events, list) { |
588 | int err; | ||
589 | 592 | ||
590 | if (!call->define_fields) | 593 | if (!call->define_fields) |
591 | continue; | 594 | continue; |
@@ -597,12 +600,13 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
597 | if (err) { | 600 | if (err) { |
598 | filter_free_subsystem_preds(system); | 601 | filter_free_subsystem_preds(system); |
599 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); | 602 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); |
600 | return err; | 603 | break; |
601 | } | 604 | } |
602 | replace_filter_string(call->filter, filter_string); | 605 | replace_filter_string(call->filter, filter_string); |
603 | } | 606 | } |
607 | mutex_unlock(&event_mutex); | ||
604 | 608 | ||
605 | return 0; | 609 | return err; |
606 | } | 610 | } |
607 | 611 | ||
608 | static void parse_init(struct filter_parse_state *ps, | 612 | static void parse_init(struct filter_parse_state *ps, |