aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
authorTom Zanussi <tzanussi@gmail.com>2009-04-13 04:17:50 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-13 18:03:55 -0400
commit0a19e53c1514ad8e9c3cbab40c6c3f52c86f403d (patch)
tree1089246a64f65b2b3c2ba29182ed4f9ce9ad375e /kernel/trace/trace_events.c
parentb5c851a88a369854c04e511cefb84ea2d0cfa209 (diff)
tracing/filters: allow on-the-fly filter switching
This patch allows event filters to be safely removed or switched on-the-fly while avoiding the use of rcu or the suspension of tracing of previous versions. It does it by adding a new filter_pred_none() predicate function which does nothing and by never deallocating either the predicates or any of the filter_pred members used in matching; the predicate lists are allocated and initialized during ftrace_event_calls initialization. Whenever a filter is removed or replaced, the filter_pred_* functions currently in use by the affected ftrace_event_call are immediately switched over to to the filter_pred_none() function, while the rest of the filter_pred members are left intact, allowing any currently executing filter_pred_* functions to finish up, using the values they're currently using. In the case of filter replacement, the new predicate values are copied into the old predicates after the above step, and the filter_pred_none() functions are replaced by the filter_pred_* functions for the new filter. In this case, it is possible though very unlikely that a previous filter_pred_* is still running even after the filter_pred_none() switch and the switch to the new filter_pred_*. In that case, however, because nothing has been deallocated in the filter_pred, the worst that can happen is that the old filter_pred_* function sees the new values and as a result produces either a false positive or a false negative, depending on the values it finds. So one downside to this method is that rarely, it can produce a bad match during the filter switch, but it should be possible to live with that, IMHO. The other downside is that at least in this patch the predicate lists are always pre-allocated, taking up memory from the start. They could probably be allocated on first-use, and de-allocated when tracing is completely stopped - if this patch makes sense, I could create another one to do that later on. Oh, and it also places a restriction on the size of __arrays in events, currently set to 128, since they can't be larger than the now embedded str_val arrays in the filter_pred struct. Signed-off-by: Tom Zanussi <tzanussi@gmail.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: paulmck@linux.vnet.ibm.com LKML-Reference: <1239610670.6660.49.camel@tropicana> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 789e14eb09a5..ead68ac99191 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -481,7 +481,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
481 481
482 trace_seq_init(s); 482 trace_seq_init(s);
483 483
484 filter_print_preds(call->preds, s); 484 filter_print_preds(call->preds, call->n_preds, s);
485 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 485 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
486 486
487 kfree(s); 487 kfree(s);
@@ -516,7 +516,7 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
516 } 516 }
517 517
518 if (pred->clear) { 518 if (pred->clear) {
519 filter_free_preds(call); 519 filter_disable_preds(call);
520 filter_free_pred(pred); 520 filter_free_pred(pred);
521 return cnt; 521 return cnt;
522 } 522 }
@@ -527,6 +527,8 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
527 return err; 527 return err;
528 } 528 }
529 529
530 filter_free_pred(pred);
531
530 *ppos += cnt; 532 *ppos += cnt;
531 533
532 return cnt; 534 return cnt;
@@ -549,7 +551,7 @@ subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
549 551
550 trace_seq_init(s); 552 trace_seq_init(s);
551 553
552 filter_print_preds(system->preds, s); 554 filter_print_preds(system->preds, system->n_preds, s);
553 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 555 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
554 556
555 kfree(s); 557 kfree(s);
@@ -712,6 +714,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
712 list_add(&system->list, &event_subsystems); 714 list_add(&system->list, &event_subsystems);
713 715
714 system->preds = NULL; 716 system->preds = NULL;
717 system->n_preds = 0;
715 718
716 entry = debugfs_create_file("filter", 0644, system->entry, system, 719 entry = debugfs_create_file("filter", 0644, system->entry, system,
717 &ftrace_subsystem_filter_fops); 720 &ftrace_subsystem_filter_fops);