aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2013-05-09 01:44:29 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-05-09 20:14:25 -0400
commit1cf4c0732db3cd3c49cadbc60ff6bda08604e6fa (patch)
tree74133737a3ecea18847788c10fae7bb474f1aaa8 /kernel/trace
parent30052170dcc256c18a43fb3e76577a67394543f8 (diff)
tracing: Modify soft-mode only if there's no other referrer
Modify soft-mode flag only if no other soft-mode referrer (currently only the ftrace triggers) by using a reference counter in each ftrace_event_file. Without this fix, adding and removing several different enable/disable_event triggers on the same event clear soft-mode bit from the ftrace_event_file. This also happens with a typo of glob on setting triggers. e.g. # echo vfs_symlink:enable_event:net:netif_rx > set_ftrace_filter # cat events/net/netif_rx/enable 0* # echo typo_func:enable_event:net:netif_rx > set_ftrace_filter # cat events/net/netif_rx/enable 0 # cat set_ftrace_filter #### all functions enabled #### vfs_symlink:enable_event:net:netif_rx:unlimited As above, we still have a trigger, but soft-mode is gone. Link: http://lkml.kernel.org/r/20130509054429.30398.7464.stgit@mhiramat-M0-7522 Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: David Sharp <dhsharp@google.com> Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com> Cc: Tom Zanussi <tom.zanussi@intel.com> Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_events.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 915c136d7bd1..8be1224046f8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
251 switch (enable) { 251 switch (enable) {
252 case 0: 252 case 0:
253 /* 253 /*
254 * When soft_disable is set and enable is cleared, we want 254 * When soft_disable is set and enable is cleared, the sm_ref
255 * reference counter is decremented. If it reaches 0, we want
255 * to clear the SOFT_DISABLED flag but leave the event in the 256 * to clear the SOFT_DISABLED flag but leave the event in the
256 * state that it was. That is, if the event was enabled and 257 * state that it was. That is, if the event was enabled and
257 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 258 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
@@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
263 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 264 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
264 */ 265 */
265 if (soft_disable) { 266 if (soft_disable) {
267 if (atomic_dec_return(&file->sm_ref) > 0)
268 break;
266 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; 269 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
267 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 270 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
268 } else 271 } else
@@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
291 */ 294 */
292 if (!soft_disable) 295 if (!soft_disable)
293 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 296 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
294 else 297 else {
298 if (atomic_inc_return(&file->sm_ref) > 1)
299 break;
295 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 300 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
301 }
296 302
297 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { 303 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
298 304
@@ -1540,6 +1546,7 @@ __trace_add_new_event(struct ftrace_event_call *call,
1540 1546
1541 file->event_call = call; 1547 file->event_call = call;
1542 file->tr = tr; 1548 file->tr = tr;
1549 atomic_set(&file->sm_ref, 0);
1543 list_add(&file->list, &tr->events); 1550 list_add(&file->list, &tr->events);
1544 1551
1545 return event_create_dir(tr->event_dir, file, id, enable, filter, format); 1552 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
@@ -1562,6 +1569,7 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
1562 1569
1563 file->event_call = call; 1570 file->event_call = call;
1564 file->tr = tr; 1571 file->tr = tr;
1572 atomic_set(&file->sm_ref, 0);
1565 list_add(&file->list, &tr->events); 1573 list_add(&file->list, &tr->events);
1566 1574
1567 return 0; 1575 return 0;