diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-05-06 21:54:09 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-05-06 23:11:41 -0400 |
commit | 9456f0fa6d3cb944d3b9fc31c9a244e0362c26ea (patch) | |
tree | 9e6e97199f63c786a79966a5938873a98591e10c /kernel/trace | |
parent | 71e1c8ac42ae4038ddb1367cce7097ab868dc532 (diff) |
tracing: reset ring buffer when removing modules with events
Li Zefan found that there's a race using the event ids of events and
modules. When a module is loaded, an event id is incremented. We only
have 16 bits for event ids (65536) and there is a possible (but highly
unlikely) race that we could load and unload a module that registers
events so many times that the event id counter overflows.
When it overflows, it then restarts and goes looking for available
ids. An id is available if it was added by a module and released.
The race is if you have one module add an id, and then is removed.
Another module loaded can use that same event id. But if the old module
still had events in the ring buffer, the new module's call back would
get bogus data. At best (and most likely) the output would just be
garbage. But if the module for some reason used pointers (not recommended)
then this could potentially crash.
The safest thing to do is just reset the ring buffer if a module that
registered events is removed.
[ Impact: prevent unpredictable results of event id overflows ]
Reported-by: Li Zefan <lizf@cn.fujitsu.com>
LKML-Reference: <49FEAFD0.30106@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 9 |
3 files changed, 21 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4164a344e72a..dd40d2320346 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -639,6 +639,16 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
639 | tracing_reset(tr, cpu); | 639 | tracing_reset(tr, cpu); |
640 | } | 640 | } |
641 | 641 | ||
642 | void tracing_reset_current(int cpu) | ||
643 | { | ||
644 | tracing_reset(&global_trace, cpu); | ||
645 | } | ||
646 | |||
647 | void tracing_reset_current_online_cpus(void) | ||
648 | { | ||
649 | tracing_reset_online_cpus(&global_trace); | ||
650 | } | ||
651 | |||
642 | #define SAVED_CMDLINES 128 | 652 | #define SAVED_CMDLINES 128 |
643 | #define NO_CMDLINE_MAP UINT_MAX | 653 | #define NO_CMDLINE_MAP UINT_MAX |
644 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 654 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 777c6c3a0cde..ba25793ffe67 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -409,6 +409,8 @@ int tracing_is_enabled(void); | |||
409 | void trace_wake_up(void); | 409 | void trace_wake_up(void); |
410 | void tracing_reset(struct trace_array *tr, int cpu); | 410 | void tracing_reset(struct trace_array *tr, int cpu); |
411 | void tracing_reset_online_cpus(struct trace_array *tr); | 411 | void tracing_reset_online_cpus(struct trace_array *tr); |
412 | void tracing_reset_current(int cpu); | ||
413 | void tracing_reset_current_online_cpus(void); | ||
412 | int tracing_open_generic(struct inode *inode, struct file *filp); | 414 | int tracing_open_generic(struct inode *inode, struct file *filp); |
413 | struct dentry *trace_create_file(const char *name, | 415 | struct dentry *trace_create_file(const char *name, |
414 | mode_t mode, | 416 | mode_t mode, |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 8d579ff23610..6d2c842a0248 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -932,9 +932,11 @@ static void trace_module_remove_events(struct module *mod) | |||
932 | { | 932 | { |
933 | struct ftrace_module_file_ops *file_ops; | 933 | struct ftrace_module_file_ops *file_ops; |
934 | struct ftrace_event_call *call, *p; | 934 | struct ftrace_event_call *call, *p; |
935 | bool found = false; | ||
935 | 936 | ||
936 | list_for_each_entry_safe(call, p, &ftrace_events, list) { | 937 | list_for_each_entry_safe(call, p, &ftrace_events, list) { |
937 | if (call->mod == mod) { | 938 | if (call->mod == mod) { |
939 | found = true; | ||
938 | if (call->enabled) { | 940 | if (call->enabled) { |
939 | call->enabled = 0; | 941 | call->enabled = 0; |
940 | call->unregfunc(); | 942 | call->unregfunc(); |
@@ -957,6 +959,13 @@ static void trace_module_remove_events(struct module *mod) | |||
957 | list_del(&file_ops->list); | 959 | list_del(&file_ops->list); |
958 | kfree(file_ops); | 960 | kfree(file_ops); |
959 | } | 961 | } |
962 | |||
963 | /* | ||
964 | * It is safest to reset the ring buffer if the module being unloaded | ||
965 | * registered any events. | ||
966 | */ | ||
967 | if (found) | ||
968 | tracing_reset_current_online_cpus(); | ||
960 | } | 969 | } |
961 | 970 | ||
962 | static int trace_module_notify(struct notifier_block *self, | 971 | static int trace_module_notify(struct notifier_block *self, |