aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <srostedt@redhat.com>2013-03-04 23:05:12 -0500
committerSteven Rostedt <rostedt@goodmis.org>2013-03-15 00:34:56 -0400
commit575380da8b46969a2c6a7e14a51056a63b30fe2e (patch)
tree97eb88b78d064e2a3cfae555383f213083d29f45 /kernel/trace/trace_events.c
parent2a30c11f6a037e2475f3c651bc57e697e79fa963 (diff)
tracing: Only clear trace buffer on module unload if event was traced
Currently, when a module with events is unloaded, the trace buffer is cleared. This is just a safety net in case the module might have some strange callback when its event is outputted. But there's no reason to reset the buffer if the module didn't have any of its events traced. Add a flag to the event "call" structure called WAS_ENABLED and gets set when the event is ever enabled, and this flag never gets cleared. When a module gets unloaded, if any of its events have this flag set, then the trace buffer will get cleared. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 0f1307a29fcf..9a7dc4bf1171 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -245,6 +245,9 @@ static int ftrace_event_enable_disable(struct ftrace_event_file *file,
245 break; 245 break;
246 } 246 }
247 file->flags |= FTRACE_EVENT_FL_ENABLED; 247 file->flags |= FTRACE_EVENT_FL_ENABLED;
248
249 /* WAS_ENABLED gets set but never cleared. */
250 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
248 } 251 }
249 break; 252 break;
250 } 253 }
@@ -1626,12 +1629,13 @@ static void trace_module_remove_events(struct module *mod)
1626{ 1629{
1627 struct ftrace_module_file_ops *file_ops; 1630 struct ftrace_module_file_ops *file_ops;
1628 struct ftrace_event_call *call, *p; 1631 struct ftrace_event_call *call, *p;
1629 bool found = false; 1632 bool clear_trace = false;
1630 1633
1631 down_write(&trace_event_mutex); 1634 down_write(&trace_event_mutex);
1632 list_for_each_entry_safe(call, p, &ftrace_events, list) { 1635 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1633 if (call->mod == mod) { 1636 if (call->mod == mod) {
1634 found = true; 1637 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1638 clear_trace = true;
1635 __trace_remove_event_call(call); 1639 __trace_remove_event_call(call);
1636 } 1640 }
1637 } 1641 }
@@ -1648,9 +1652,9 @@ static void trace_module_remove_events(struct module *mod)
1648 1652
1649 /* 1653 /*
1650 * It is safest to reset the ring buffer if the module being unloaded 1654 * It is safest to reset the ring buffer if the module being unloaded
1651 * registered any events. 1655 * registered any events that were used.
1652 */ 1656 */
1653 if (found) 1657 if (clear_trace)
1654 tracing_reset_current_online_cpus(); 1658 tracing_reset_current_online_cpus();
1655 up_write(&trace_event_mutex); 1659 up_write(&trace_event_mutex);
1656} 1660}