aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-05-06 21:54:09 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-05-06 23:11:41 -0400
commit9456f0fa6d3cb944d3b9fc31c9a244e0362c26ea (patch)
tree9e6e97199f63c786a79966a5938873a98591e10c /kernel/trace/trace_events.c
parent71e1c8ac42ae4038ddb1367cce7097ab868dc532 (diff)
tracing: reset ring buffer when removing modules with events
Li Zefan found that there's a race using the event ids of events and modules. When a module is loaded, an event id is incremented. We only have 16 bits for event ids (65536) and there is a possible (but highly unlikely) race that we could load and unload a module that registers events so many times that the event id counter overflows. When it overflows, it then restarts and goes looking for available ids. An id is available if it was added by a module and released. The race is if you have one module add an id, and then is removed. Another module loaded can use that same event id. But if the old module still had events in the ring buffer, the new module's call back would get bogus data. At best (and most likely) the output would just be garbage. But if the module for some reason used pointers (not recommended) then this could potentially crash. The safest thing to do is just reset the ring buffer if a module that registered events is removed. [ Impact: prevent unpredictable results of event id overflows ] Reported-by: Li Zefan <lizf@cn.fujitsu.com> LKML-Reference: <49FEAFD0.30106@cn.fujitsu.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 8d579ff23610..6d2c842a0248 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -932,9 +932,11 @@ static void trace_module_remove_events(struct module *mod)
932{ 932{
933 struct ftrace_module_file_ops *file_ops; 933 struct ftrace_module_file_ops *file_ops;
934 struct ftrace_event_call *call, *p; 934 struct ftrace_event_call *call, *p;
935 bool found = false;
935 936
936 list_for_each_entry_safe(call, p, &ftrace_events, list) { 937 list_for_each_entry_safe(call, p, &ftrace_events, list) {
937 if (call->mod == mod) { 938 if (call->mod == mod) {
939 found = true;
938 if (call->enabled) { 940 if (call->enabled) {
939 call->enabled = 0; 941 call->enabled = 0;
940 call->unregfunc(); 942 call->unregfunc();
@@ -957,6 +959,13 @@ static void trace_module_remove_events(struct module *mod)
957 list_del(&file_ops->list); 959 list_del(&file_ops->list);
958 kfree(file_ops); 960 kfree(file_ops);
959 } 961 }
962
963 /*
964 * It is safest to reset the ring buffer if the module being unloaded
965 * registered any events.
966 */
967 if (found)
968 tracing_reset_current_online_cpus();
960} 969}
961 970
962static int trace_module_notify(struct notifier_block *self, 971static int trace_module_notify(struct notifier_block *self,