diff options
author | Tom Zanussi <tom.zanussi@linux.intel.com> | 2013-10-24 09:59:29 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-12-21 22:02:17 -0500 |
commit | bac5fb97a173aeef8296b3efdb552e3489d55179 (patch) | |
tree | 2acb18186a608cca2eda53f6e110e792c1b6edbe | |
parent | 2875a08b2d1da7bae58fc01badb9b0ef1e8fc1a4 (diff) |
tracing: Add and use generic set_trigger_filter() implementation
Add a generic event_command.set_trigger_filter() op implementation and
have the current set of trigger commands use it - this essentially
gives them all support for filters.
Syntactically, filters are supported by adding 'if <filter>' just
after the command, in which case only events matching the filter will
invoke the trigger. For example, to add a filter to an
enable/disable_event command:
echo 'enable_event:system:event if common_pid == 999' > \
.../othersys/otherevent/trigger
The above command will only enable the system:event event if the
common_pid field in the othersys:otherevent event is 999.
As another example, to add a filter to a stacktrace command:
echo 'stacktrace if common_pid == 999' > \
.../somesys/someevent/trigger
The above command will only trigger a stacktrace if the common_pid
field in the event is 999.
The filter syntax is the same as that described in the 'Event
filtering' section of Documentation/trace/events.txt.
Because triggers can now use filters, the trigger-invoking logic needs
to be moved in those cases - e.g. for ftrace_raw_event_calls, if a
trigger has a filter associated with it, the trigger invocation now
needs to happen after the { assign; } part of the call, in order for
the trigger condition to be tested.
There's still a SOFT_DISABLED-only check at the top of e.g. the
ftrace_raw_events function, so when an event is soft disabled but not
because of the presence of a trigger, the original SOFT_DISABLED
behavior remains unchanged.
There's also a bit of trickiness in that some triggers need to avoid
being invoked while an event is currently in the process of being
logged, since the trigger may itself log data into the trace buffer.
Thus we make sure the current event is committed before invoking those
triggers. To do that, we split the trigger invocation in two - the
first part (event_triggers_call()) checks the filter using the current
trace record; if a command has the post_trigger flag set, it sets a
bit for itself in the return value, otherwise it directly invoks the
trigger. Once all commands have been either invoked or set their
return flag, event_triggers_call() returns. The current record is
then either committed or discarded; if any commands have deferred
their triggers, those commands are finally invoked following the close
of the current event by event_triggers_post_call().
To simplify the above and make it more efficient, the TRIGGER_COND bit
is introduced, which is set only if a soft-disabled trigger needs to
use the log record for filter testing or needs to wait until the
current log record is closed.
The syscall event invocation code is also changed in analogous ways.
Because event triggers need to be able to create and free filters,
this also adds a couple external wrappers for the existing
create_filter and free_filter functions, which are too generic to be
made extern functions themselves.
Link: http://lkml.kernel.org/r/7164930759d8719ef460357f143d995406e4eead.1382622043.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/ftrace_event.h | 9 | ||||
-rw-r--r-- | include/trace/ftrace.h | 48 | ||||
-rw-r--r-- | kernel/trace/trace.h | 5 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace_events_trigger.c | 170 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 46 |
6 files changed, 263 insertions, 27 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 2f73c3988fc7..03d2db22ad0d 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | #ifndef _LINUX_FTRACE_EVENT_H | 2 | #ifndef _LINUX_FTRACE_EVENT_H |
2 | #define _LINUX_FTRACE_EVENT_H | 3 | #define _LINUX_FTRACE_EVENT_H |
3 | 4 | ||
@@ -265,6 +266,7 @@ enum { | |||
265 | FTRACE_EVENT_FL_SOFT_MODE_BIT, | 266 | FTRACE_EVENT_FL_SOFT_MODE_BIT, |
266 | FTRACE_EVENT_FL_SOFT_DISABLED_BIT, | 267 | FTRACE_EVENT_FL_SOFT_DISABLED_BIT, |
267 | FTRACE_EVENT_FL_TRIGGER_MODE_BIT, | 268 | FTRACE_EVENT_FL_TRIGGER_MODE_BIT, |
269 | FTRACE_EVENT_FL_TRIGGER_COND_BIT, | ||
268 | }; | 270 | }; |
269 | 271 | ||
270 | /* | 272 | /* |
@@ -277,6 +279,7 @@ enum { | |||
277 | * SOFT_DISABLED - When set, do not trace the event (even though its | 279 | * SOFT_DISABLED - When set, do not trace the event (even though its |
278 | * tracepoint may be enabled) | 280 | * tracepoint may be enabled) |
279 | * TRIGGER_MODE - When set, invoke the triggers associated with the event | 281 | * TRIGGER_MODE - When set, invoke the triggers associated with the event |
282 | * TRIGGER_COND - When set, one or more triggers has an associated filter | ||
280 | */ | 283 | */ |
281 | enum { | 284 | enum { |
282 | FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), | 285 | FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), |
@@ -286,6 +289,7 @@ enum { | |||
286 | FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), | 289 | FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), |
287 | FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), | 290 | FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), |
288 | FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT), | 291 | FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT), |
292 | FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT), | ||
289 | }; | 293 | }; |
290 | 294 | ||
291 | struct ftrace_event_file { | 295 | struct ftrace_event_file { |
@@ -361,7 +365,10 @@ extern int filter_check_discard(struct ftrace_event_file *file, void *rec, | |||
361 | extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, | 365 | extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, |
362 | struct ring_buffer *buffer, | 366 | struct ring_buffer *buffer, |
363 | struct ring_buffer_event *event); | 367 | struct ring_buffer_event *event); |
364 | extern void event_triggers_call(struct ftrace_event_file *file); | 368 | extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file, |
369 | void *rec); | ||
370 | extern void event_triggers_post_call(struct ftrace_event_file *file, | ||
371 | enum event_trigger_type tt); | ||
365 | 372 | ||
366 | enum { | 373 | enum { |
367 | FILTER_OTHER = 0, | 374 | FILTER_OTHER = 0, |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0a48bff964bd..0962968b8b37 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -418,6 +418,8 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
418 | * struct ftrace_event_file *ftrace_file = __data; | 418 | * struct ftrace_event_file *ftrace_file = __data; |
419 | * struct ftrace_event_call *event_call = ftrace_file->event_call; | 419 | * struct ftrace_event_call *event_call = ftrace_file->event_call; |
420 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | 420 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; |
421 | * unsigned long eflags = ftrace_file->flags; | ||
422 | * enum event_trigger_type __tt = ETT_NONE; | ||
421 | * struct ring_buffer_event *event; | 423 | * struct ring_buffer_event *event; |
422 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 424 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
423 | * struct ring_buffer *buffer; | 425 | * struct ring_buffer *buffer; |
@@ -425,9 +427,12 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
425 | * int __data_size; | 427 | * int __data_size; |
426 | * int pc; | 428 | * int pc; |
427 | * | 429 | * |
428 | * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, | 430 | * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { |
429 | * &ftrace_file->flags)) | 431 | * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) |
430 | * return; | 432 | * event_triggers_call(ftrace_file, NULL); |
433 | * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) | ||
434 | * return; | ||
435 | * } | ||
431 | * | 436 | * |
432 | * local_save_flags(irq_flags); | 437 | * local_save_flags(irq_flags); |
433 | * pc = preempt_count(); | 438 | * pc = preempt_count(); |
@@ -445,8 +450,17 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
445 | * { <assign>; } <-- Here we assign the entries by the __field and | 450 | * { <assign>; } <-- Here we assign the entries by the __field and |
446 | * __array macros. | 451 | * __array macros. |
447 | * | 452 | * |
448 | * if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 453 | * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) |
454 | * __tt = event_triggers_call(ftrace_file, entry); | ||
455 | * | ||
456 | * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, | ||
457 | * &ftrace_file->flags)) | ||
458 | * ring_buffer_discard_commit(buffer, event); | ||
459 | * else if (!filter_check_discard(ftrace_file, entry, buffer, event)) | ||
449 | * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); | 460 | * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); |
461 | * | ||
462 | * if (__tt) | ||
463 | * event_triggers_post_call(ftrace_file, __tt); | ||
450 | * } | 464 | * } |
451 | * | 465 | * |
452 | * static struct trace_event ftrace_event_type_<call> = { | 466 | * static struct trace_event ftrace_event_type_<call> = { |
@@ -532,6 +546,8 @@ ftrace_raw_event_##call(void *__data, proto) \ | |||
532 | struct ftrace_event_file *ftrace_file = __data; \ | 546 | struct ftrace_event_file *ftrace_file = __data; \ |
533 | struct ftrace_event_call *event_call = ftrace_file->event_call; \ | 547 | struct ftrace_event_call *event_call = ftrace_file->event_call; \ |
534 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 548 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
549 | unsigned long eflags = ftrace_file->flags; \ | ||
550 | enum event_trigger_type __tt = ETT_NONE; \ | ||
535 | struct ring_buffer_event *event; \ | 551 | struct ring_buffer_event *event; \ |
536 | struct ftrace_raw_##call *entry; \ | 552 | struct ftrace_raw_##call *entry; \ |
537 | struct ring_buffer *buffer; \ | 553 | struct ring_buffer *buffer; \ |
@@ -539,13 +555,12 @@ ftrace_raw_event_##call(void *__data, proto) \ | |||
539 | int __data_size; \ | 555 | int __data_size; \ |
540 | int pc; \ | 556 | int pc; \ |
541 | \ | 557 | \ |
542 | if (test_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, \ | 558 | if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { \ |
543 | &ftrace_file->flags)) \ | 559 | if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) \ |
544 | event_triggers_call(ftrace_file); \ | 560 | event_triggers_call(ftrace_file, NULL); \ |
545 | \ | 561 | if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) \ |
546 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \ | 562 | return; \ |
547 | &ftrace_file->flags)) \ | 563 | } \ |
548 | return; \ | ||
549 | \ | 564 | \ |
550 | local_save_flags(irq_flags); \ | 565 | local_save_flags(irq_flags); \ |
551 | pc = preempt_count(); \ | 566 | pc = preempt_count(); \ |
@@ -564,8 +579,17 @@ ftrace_raw_event_##call(void *__data, proto) \ | |||
564 | \ | 579 | \ |
565 | { assign; } \ | 580 | { assign; } \ |
566 | \ | 581 | \ |
567 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) \ | 582 | if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) \ |
583 | __tt = event_triggers_call(ftrace_file, entry); \ | ||
584 | \ | ||
585 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \ | ||
586 | &ftrace_file->flags)) \ | ||
587 | ring_buffer_discard_commit(buffer, event); \ | ||
588 | else if (!filter_check_discard(ftrace_file, entry, buffer, event)) \ | ||
568 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ | 589 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ |
590 | \ | ||
591 | if (__tt) \ | ||
592 | event_triggers_post_call(ftrace_file, __tt); \ | ||
569 | } | 593 | } |
570 | /* | 594 | /* |
571 | * The ftrace_test_probe is compiled out, it is only here as a build time check | 595 | * The ftrace_test_probe is compiled out, it is only here as a build time check |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ccbd8104cf99..433bfc5dd576 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | #ifndef _LINUX_KERNEL_TRACE_H | 2 | #ifndef _LINUX_KERNEL_TRACE_H |
2 | #define _LINUX_KERNEL_TRACE_H | 3 | #define _LINUX_KERNEL_TRACE_H |
3 | 4 | ||
@@ -1020,6 +1021,10 @@ extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | |||
1020 | extern void print_subsystem_event_filter(struct event_subsystem *system, | 1021 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
1021 | struct trace_seq *s); | 1022 | struct trace_seq *s); |
1022 | extern int filter_assign_type(const char *type); | 1023 | extern int filter_assign_type(const char *type); |
1024 | extern int create_event_filter(struct ftrace_event_call *call, | ||
1025 | char *filter_str, bool set_str, | ||
1026 | struct event_filter **filterp); | ||
1027 | extern void free_event_filter(struct event_filter *filter); | ||
1023 | 1028 | ||
1024 | struct ftrace_event_field * | 1029 | struct ftrace_event_field * |
1025 | trace_find_event_field(struct ftrace_event_call *call, char *name); | 1030 | trace_find_event_field(struct ftrace_event_call *call, char *name); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 2468f56dc5db..8a8631926a07 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -799,6 +799,11 @@ static void __free_filter(struct event_filter *filter) | |||
799 | kfree(filter); | 799 | kfree(filter); |
800 | } | 800 | } |
801 | 801 | ||
802 | void free_event_filter(struct event_filter *filter) | ||
803 | { | ||
804 | __free_filter(filter); | ||
805 | } | ||
806 | |||
802 | void destroy_call_preds(struct ftrace_event_call *call) | 807 | void destroy_call_preds(struct ftrace_event_call *call) |
803 | { | 808 | { |
804 | __free_filter(call->filter); | 809 | __free_filter(call->filter); |
@@ -1938,6 +1943,13 @@ static int create_filter(struct ftrace_event_call *call, | |||
1938 | return err; | 1943 | return err; |
1939 | } | 1944 | } |
1940 | 1945 | ||
1946 | int create_event_filter(struct ftrace_event_call *call, | ||
1947 | char *filter_str, bool set_str, | ||
1948 | struct event_filter **filterp) | ||
1949 | { | ||
1950 | return create_filter(call, filter_str, set_str, filterp); | ||
1951 | } | ||
1952 | |||
1941 | /** | 1953 | /** |
1942 | * create_system_filter - create a filter for an event_subsystem | 1954 | * create_system_filter - create a filter for an event_subsystem |
1943 | * @system: event_subsystem to create a filter for | 1955 | * @system: event_subsystem to create a filter for |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 45e48b109d51..f5b3f780fda4 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
@@ -31,6 +31,9 @@ static DEFINE_MUTEX(trigger_cmd_mutex); | |||
31 | static void | 31 | static void |
32 | trigger_data_free(struct event_trigger_data *data) | 32 | trigger_data_free(struct event_trigger_data *data) |
33 | { | 33 | { |
34 | if (data->cmd_ops->set_filter) | ||
35 | data->cmd_ops->set_filter(NULL, data, NULL); | ||
36 | |||
34 | synchronize_sched(); /* make sure current triggers exit before free */ | 37 | synchronize_sched(); /* make sure current triggers exit before free */ |
35 | kfree(data); | 38 | kfree(data); |
36 | } | 39 | } |
@@ -38,27 +41,78 @@ trigger_data_free(struct event_trigger_data *data) | |||
38 | /** | 41 | /** |
39 | * event_triggers_call - Call triggers associated with a trace event | 42 | * event_triggers_call - Call triggers associated with a trace event |
40 | * @file: The ftrace_event_file associated with the event | 43 | * @file: The ftrace_event_file associated with the event |
44 | * @rec: The trace entry for the event, NULL for unconditional invocation | ||
41 | * | 45 | * |
42 | * For each trigger associated with an event, invoke the trigger | 46 | * For each trigger associated with an event, invoke the trigger |
43 | * function registered with the associated trigger command. | 47 | * function registered with the associated trigger command. If rec is |
48 | * non-NULL, it means that the trigger requires further processing and | ||
49 | * shouldn't be unconditionally invoked. If rec is non-NULL and the | ||
50 | * trigger has a filter associated with it, rec will checked against | ||
51 | * the filter and if the record matches the trigger will be invoked. | ||
52 | * If the trigger is a 'post_trigger', meaning it shouldn't be invoked | ||
53 | * in any case until the current event is written, the trigger | ||
54 | * function isn't invoked but the bit associated with the deferred | ||
55 | * trigger is set in the return value. | ||
56 | * | ||
57 | * Returns an enum event_trigger_type value containing a set bit for | ||
58 | * any trigger that should be deferred, ETT_NONE if nothing to defer. | ||
44 | * | 59 | * |
45 | * Called from tracepoint handlers (with rcu_read_lock_sched() held). | 60 | * Called from tracepoint handlers (with rcu_read_lock_sched() held). |
46 | * | 61 | * |
47 | * Return: an enum event_trigger_type value containing a set bit for | 62 | * Return: an enum event_trigger_type value containing a set bit for |
48 | * any trigger that should be deferred, ETT_NONE if nothing to defer. | 63 | * any trigger that should be deferred, ETT_NONE if nothing to defer. |
49 | */ | 64 | */ |
50 | void event_triggers_call(struct ftrace_event_file *file) | 65 | enum event_trigger_type |
66 | event_triggers_call(struct ftrace_event_file *file, void *rec) | ||
51 | { | 67 | { |
52 | struct event_trigger_data *data; | 68 | struct event_trigger_data *data; |
69 | enum event_trigger_type tt = ETT_NONE; | ||
53 | 70 | ||
54 | if (list_empty(&file->triggers)) | 71 | if (list_empty(&file->triggers)) |
55 | return; | 72 | return tt; |
56 | 73 | ||
57 | list_for_each_entry_rcu(data, &file->triggers, list) | 74 | list_for_each_entry_rcu(data, &file->triggers, list) { |
75 | if (!rec) { | ||
76 | data->ops->func(data); | ||
77 | continue; | ||
78 | } | ||
79 | if (data->filter && !filter_match_preds(data->filter, rec)) | ||
80 | continue; | ||
81 | if (data->cmd_ops->post_trigger) { | ||
82 | tt |= data->cmd_ops->trigger_type; | ||
83 | continue; | ||
84 | } | ||
58 | data->ops->func(data); | 85 | data->ops->func(data); |
86 | } | ||
87 | return tt; | ||
59 | } | 88 | } |
60 | EXPORT_SYMBOL_GPL(event_triggers_call); | 89 | EXPORT_SYMBOL_GPL(event_triggers_call); |
61 | 90 | ||
91 | /** | ||
92 | * event_triggers_post_call - Call 'post_triggers' for a trace event | ||
93 | * @file: The ftrace_event_file associated with the event | ||
94 | * @tt: enum event_trigger_type containing a set bit for each trigger to invoke | ||
95 | * | ||
96 | * For each trigger associated with an event, invoke the trigger | ||
97 | * function registered with the associated trigger command, if the | ||
98 | * corresponding bit is set in the tt enum passed into this function. | ||
99 | * See @event_triggers_call for details on how those bits are set. | ||
100 | * | ||
101 | * Called from tracepoint handlers (with rcu_read_lock_sched() held). | ||
102 | */ | ||
103 | void | ||
104 | event_triggers_post_call(struct ftrace_event_file *file, | ||
105 | enum event_trigger_type tt) | ||
106 | { | ||
107 | struct event_trigger_data *data; | ||
108 | |||
109 | list_for_each_entry_rcu(data, &file->triggers, list) { | ||
110 | if (data->cmd_ops->trigger_type & tt) | ||
111 | data->ops->func(data); | ||
112 | } | ||
113 | } | ||
114 | EXPORT_SYMBOL_GPL(event_triggers_post_call); | ||
115 | |||
62 | static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) | 116 | static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) |
63 | { | 117 | { |
64 | struct ftrace_event_file *event_file = event_file_data(m->private); | 118 | struct ftrace_event_file *event_file = event_file_data(m->private); |
@@ -403,6 +457,34 @@ clear_event_triggers(struct trace_array *tr) | |||
403 | } | 457 | } |
404 | 458 | ||
405 | /** | 459 | /** |
460 | * update_cond_flag - Set or reset the TRIGGER_COND bit | ||
461 | * @file: The ftrace_event_file associated with the event | ||
462 | * | ||
463 | * If an event has triggers and any of those triggers has a filter or | ||
464 | * a post_trigger, trigger invocation needs to be deferred until after | ||
465 | * the current event has logged its data, and the event should have | ||
466 | * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be | ||
467 | * cleared. | ||
468 | */ | ||
469 | static void update_cond_flag(struct ftrace_event_file *file) | ||
470 | { | ||
471 | struct event_trigger_data *data; | ||
472 | bool set_cond = false; | ||
473 | |||
474 | list_for_each_entry_rcu(data, &file->triggers, list) { | ||
475 | if (data->filter || data->cmd_ops->post_trigger) { | ||
476 | set_cond = true; | ||
477 | break; | ||
478 | } | ||
479 | } | ||
480 | |||
481 | if (set_cond) | ||
482 | set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags); | ||
483 | else | ||
484 | clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags); | ||
485 | } | ||
486 | |||
487 | /** | ||
406 | * register_trigger - Generic event_command @reg implementation | 488 | * register_trigger - Generic event_command @reg implementation |
407 | * @glob: The raw string used to register the trigger | 489 | * @glob: The raw string used to register the trigger |
408 | * @ops: The trigger ops associated with the trigger | 490 | * @ops: The trigger ops associated with the trigger |
@@ -443,6 +525,7 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops, | |||
443 | list_del_rcu(&data->list); | 525 | list_del_rcu(&data->list); |
444 | ret--; | 526 | ret--; |
445 | } | 527 | } |
528 | update_cond_flag(file); | ||
446 | out: | 529 | out: |
447 | return ret; | 530 | return ret; |
448 | } | 531 | } |
@@ -470,6 +553,7 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops, | |||
470 | if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { | 553 | if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { |
471 | unregistered = true; | 554 | unregistered = true; |
472 | list_del_rcu(&data->list); | 555 | list_del_rcu(&data->list); |
556 | update_cond_flag(file); | ||
473 | trace_event_trigger_enable_disable(file, 0); | 557 | trace_event_trigger_enable_disable(file, 0); |
474 | break; | 558 | break; |
475 | } | 559 | } |
@@ -572,10 +656,78 @@ event_trigger_callback(struct event_command *cmd_ops, | |||
572 | return ret; | 656 | return ret; |
573 | 657 | ||
574 | out_free: | 658 | out_free: |
659 | if (cmd_ops->set_filter) | ||
660 | cmd_ops->set_filter(NULL, trigger_data, NULL); | ||
575 | kfree(trigger_data); | 661 | kfree(trigger_data); |
576 | goto out; | 662 | goto out; |
577 | } | 663 | } |
578 | 664 | ||
665 | /** | ||
666 | * set_trigger_filter - Generic event_command @set_filter implementation | ||
667 | * @filter_str: The filter string for the trigger, NULL to remove filter | ||
668 | * @trigger_data: Trigger-specific data | ||
669 | * @file: The ftrace_event_file associated with the event | ||
670 | * | ||
671 | * Common implementation for event command filter parsing and filter | ||
672 | * instantiation. | ||
673 | * | ||
674 | * Usually used directly as the @set_filter method in event command | ||
675 | * implementations. | ||
676 | * | ||
677 | * Also used to remove a filter (if filter_str = NULL). | ||
678 | * | ||
679 | * Return: 0 on success, errno otherwise | ||
680 | */ | ||
681 | static int set_trigger_filter(char *filter_str, | ||
682 | struct event_trigger_data *trigger_data, | ||
683 | struct ftrace_event_file *file) | ||
684 | { | ||
685 | struct event_trigger_data *data = trigger_data; | ||
686 | struct event_filter *filter = NULL, *tmp; | ||
687 | int ret = -EINVAL; | ||
688 | char *s; | ||
689 | |||
690 | if (!filter_str) /* clear the current filter */ | ||
691 | goto assign; | ||
692 | |||
693 | s = strsep(&filter_str, " \t"); | ||
694 | |||
695 | if (!strlen(s) || strcmp(s, "if") != 0) | ||
696 | goto out; | ||
697 | |||
698 | if (!filter_str) | ||
699 | goto out; | ||
700 | |||
701 | /* The filter is for the 'trigger' event, not the triggered event */ | ||
702 | ret = create_event_filter(file->event_call, filter_str, false, &filter); | ||
703 | if (ret) | ||
704 | goto out; | ||
705 | assign: | ||
706 | tmp = data->filter; | ||
707 | |||
708 | rcu_assign_pointer(data->filter, filter); | ||
709 | |||
710 | if (tmp) { | ||
711 | /* Make sure the call is done with the filter */ | ||
712 | synchronize_sched(); | ||
713 | free_event_filter(tmp); | ||
714 | } | ||
715 | |||
716 | kfree(data->filter_str); | ||
717 | data->filter_str = NULL; | ||
718 | |||
719 | if (filter_str) { | ||
720 | data->filter_str = kstrdup(filter_str, GFP_KERNEL); | ||
721 | if (!data->filter_str) { | ||
722 | free_event_filter(data->filter); | ||
723 | data->filter = NULL; | ||
724 | ret = -ENOMEM; | ||
725 | } | ||
726 | } | ||
727 | out: | ||
728 | return ret; | ||
729 | } | ||
730 | |||
579 | static void | 731 | static void |
580 | traceon_trigger(struct event_trigger_data *data) | 732 | traceon_trigger(struct event_trigger_data *data) |
581 | { | 733 | { |
@@ -685,6 +837,7 @@ static struct event_command trigger_traceon_cmd = { | |||
685 | .reg = register_trigger, | 837 | .reg = register_trigger, |
686 | .unreg = unregister_trigger, | 838 | .unreg = unregister_trigger, |
687 | .get_trigger_ops = onoff_get_trigger_ops, | 839 | .get_trigger_ops = onoff_get_trigger_ops, |
840 | .set_filter = set_trigger_filter, | ||
688 | }; | 841 | }; |
689 | 842 | ||
690 | static struct event_command trigger_traceoff_cmd = { | 843 | static struct event_command trigger_traceoff_cmd = { |
@@ -694,6 +847,7 @@ static struct event_command trigger_traceoff_cmd = { | |||
694 | .reg = register_trigger, | 847 | .reg = register_trigger, |
695 | .unreg = unregister_trigger, | 848 | .unreg = unregister_trigger, |
696 | .get_trigger_ops = onoff_get_trigger_ops, | 849 | .get_trigger_ops = onoff_get_trigger_ops, |
850 | .set_filter = set_trigger_filter, | ||
697 | }; | 851 | }; |
698 | 852 | ||
699 | #ifdef CONFIG_TRACER_SNAPSHOT | 853 | #ifdef CONFIG_TRACER_SNAPSHOT |
@@ -765,6 +919,7 @@ static struct event_command trigger_snapshot_cmd = { | |||
765 | .reg = register_snapshot_trigger, | 919 | .reg = register_snapshot_trigger, |
766 | .unreg = unregister_trigger, | 920 | .unreg = unregister_trigger, |
767 | .get_trigger_ops = snapshot_get_trigger_ops, | 921 | .get_trigger_ops = snapshot_get_trigger_ops, |
922 | .set_filter = set_trigger_filter, | ||
768 | }; | 923 | }; |
769 | 924 | ||
770 | static __init int register_trigger_snapshot_cmd(void) | 925 | static __init int register_trigger_snapshot_cmd(void) |
@@ -843,6 +998,7 @@ static struct event_command trigger_stacktrace_cmd = { | |||
843 | .reg = register_trigger, | 998 | .reg = register_trigger, |
844 | .unreg = unregister_trigger, | 999 | .unreg = unregister_trigger, |
845 | .get_trigger_ops = stacktrace_get_trigger_ops, | 1000 | .get_trigger_ops = stacktrace_get_trigger_ops, |
1001 | .set_filter = set_trigger_filter, | ||
846 | }; | 1002 | }; |
847 | 1003 | ||
848 | static __init int register_trigger_stacktrace_cmd(void) | 1004 | static __init int register_trigger_stacktrace_cmd(void) |
@@ -1100,6 +1256,8 @@ event_enable_trigger_func(struct event_command *cmd_ops, | |||
1100 | out_put: | 1256 | out_put: |
1101 | module_put(event_enable_file->event_call->mod); | 1257 | module_put(event_enable_file->event_call->mod); |
1102 | out_free: | 1258 | out_free: |
1259 | if (cmd_ops->set_filter) | ||
1260 | cmd_ops->set_filter(NULL, trigger_data, NULL); | ||
1103 | kfree(trigger_data); | 1261 | kfree(trigger_data); |
1104 | kfree(enable_data); | 1262 | kfree(enable_data); |
1105 | goto out; | 1263 | goto out; |
@@ -1137,6 +1295,7 @@ static int event_enable_register_trigger(char *glob, | |||
1137 | list_del_rcu(&data->list); | 1295 | list_del_rcu(&data->list); |
1138 | ret--; | 1296 | ret--; |
1139 | } | 1297 | } |
1298 | update_cond_flag(file); | ||
1140 | out: | 1299 | out: |
1141 | return ret; | 1300 | return ret; |
1142 | } | 1301 | } |
@@ -1157,6 +1316,7 @@ static void event_enable_unregister_trigger(char *glob, | |||
1157 | (enable_data->file == test_enable_data->file)) { | 1316 | (enable_data->file == test_enable_data->file)) { |
1158 | unregistered = true; | 1317 | unregistered = true; |
1159 | list_del_rcu(&data->list); | 1318 | list_del_rcu(&data->list); |
1319 | update_cond_flag(file); | ||
1160 | trace_event_trigger_enable_disable(file, 0); | 1320 | trace_event_trigger_enable_disable(file, 0); |
1161 | break; | 1321 | break; |
1162 | } | 1322 | } |
@@ -1191,6 +1351,7 @@ static struct event_command trigger_enable_cmd = { | |||
1191 | .reg = event_enable_register_trigger, | 1351 | .reg = event_enable_register_trigger, |
1192 | .unreg = event_enable_unregister_trigger, | 1352 | .unreg = event_enable_unregister_trigger, |
1193 | .get_trigger_ops = event_enable_get_trigger_ops, | 1353 | .get_trigger_ops = event_enable_get_trigger_ops, |
1354 | .set_filter = set_trigger_filter, | ||
1194 | }; | 1355 | }; |
1195 | 1356 | ||
1196 | static struct event_command trigger_disable_cmd = { | 1357 | static struct event_command trigger_disable_cmd = { |
@@ -1200,6 +1361,7 @@ static struct event_command trigger_disable_cmd = { | |||
1200 | .reg = event_enable_register_trigger, | 1361 | .reg = event_enable_register_trigger, |
1201 | .unreg = event_enable_unregister_trigger, | 1362 | .unreg = event_enable_unregister_trigger, |
1202 | .get_trigger_ops = event_enable_get_trigger_ops, | 1363 | .get_trigger_ops = event_enable_get_trigger_ops, |
1364 | .set_filter = set_trigger_filter, | ||
1203 | }; | 1365 | }; |
1204 | 1366 | ||
1205 | static __init void unregister_trigger_enable_disable_cmds(void) | 1367 | static __init void unregister_trigger_enable_disable_cmds(void) |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 936ec3960335..fdd955f2f1aa 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -306,8 +306,10 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
306 | struct syscall_trace_enter *entry; | 306 | struct syscall_trace_enter *entry; |
307 | struct syscall_metadata *sys_data; | 307 | struct syscall_metadata *sys_data; |
308 | struct ring_buffer_event *event; | 308 | struct ring_buffer_event *event; |
309 | enum event_trigger_type __tt = ETT_NONE; | ||
309 | struct ring_buffer *buffer; | 310 | struct ring_buffer *buffer; |
310 | unsigned long irq_flags; | 311 | unsigned long irq_flags; |
312 | unsigned long eflags; | ||
311 | int pc; | 313 | int pc; |
312 | int syscall_nr; | 314 | int syscall_nr; |
313 | int size; | 315 | int size; |
@@ -321,10 +323,14 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
321 | if (!ftrace_file) | 323 | if (!ftrace_file) |
322 | return; | 324 | return; |
323 | 325 | ||
324 | if (test_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &ftrace_file->flags)) | 326 | eflags = ftrace_file->flags; |
325 | event_triggers_call(ftrace_file); | 327 | |
326 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | 328 | if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { |
327 | return; | 329 | if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) |
330 | event_triggers_call(ftrace_file, NULL); | ||
331 | if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) | ||
332 | return; | ||
333 | } | ||
328 | 334 | ||
329 | sys_data = syscall_nr_to_meta(syscall_nr); | 335 | sys_data = syscall_nr_to_meta(syscall_nr); |
330 | if (!sys_data) | 336 | if (!sys_data) |
@@ -345,9 +351,16 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
345 | entry->nr = syscall_nr; | 351 | entry->nr = syscall_nr; |
346 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 352 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
347 | 353 | ||
348 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 354 | if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) |
355 | __tt = event_triggers_call(ftrace_file, entry); | ||
356 | |||
357 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | ||
358 | ring_buffer_discard_commit(buffer, event); | ||
359 | else if (!filter_check_discard(ftrace_file, entry, buffer, event)) | ||
349 | trace_current_buffer_unlock_commit(buffer, event, | 360 | trace_current_buffer_unlock_commit(buffer, event, |
350 | irq_flags, pc); | 361 | irq_flags, pc); |
362 | if (__tt) | ||
363 | event_triggers_post_call(ftrace_file, __tt); | ||
351 | } | 364 | } |
352 | 365 | ||
353 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | 366 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
@@ -357,8 +370,10 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
357 | struct syscall_trace_exit *entry; | 370 | struct syscall_trace_exit *entry; |
358 | struct syscall_metadata *sys_data; | 371 | struct syscall_metadata *sys_data; |
359 | struct ring_buffer_event *event; | 372 | struct ring_buffer_event *event; |
373 | enum event_trigger_type __tt = ETT_NONE; | ||
360 | struct ring_buffer *buffer; | 374 | struct ring_buffer *buffer; |
361 | unsigned long irq_flags; | 375 | unsigned long irq_flags; |
376 | unsigned long eflags; | ||
362 | int pc; | 377 | int pc; |
363 | int syscall_nr; | 378 | int syscall_nr; |
364 | 379 | ||
@@ -371,10 +386,14 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
371 | if (!ftrace_file) | 386 | if (!ftrace_file) |
372 | return; | 387 | return; |
373 | 388 | ||
374 | if (test_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &ftrace_file->flags)) | 389 | eflags = ftrace_file->flags; |
375 | event_triggers_call(ftrace_file); | 390 | |
376 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | 391 | if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { |
377 | return; | 392 | if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) |
393 | event_triggers_call(ftrace_file, NULL); | ||
394 | if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) | ||
395 | return; | ||
396 | } | ||
378 | 397 | ||
379 | sys_data = syscall_nr_to_meta(syscall_nr); | 398 | sys_data = syscall_nr_to_meta(syscall_nr); |
380 | if (!sys_data) | 399 | if (!sys_data) |
@@ -394,9 +413,16 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
394 | entry->nr = syscall_nr; | 413 | entry->nr = syscall_nr; |
395 | entry->ret = syscall_get_return_value(current, regs); | 414 | entry->ret = syscall_get_return_value(current, regs); |
396 | 415 | ||
397 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 416 | if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) |
417 | __tt = event_triggers_call(ftrace_file, entry); | ||
418 | |||
419 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | ||
420 | ring_buffer_discard_commit(buffer, event); | ||
421 | else if (!filter_check_discard(ftrace_file, entry, buffer, event)) | ||
398 | trace_current_buffer_unlock_commit(buffer, event, | 422 | trace_current_buffer_unlock_commit(buffer, event, |
399 | irq_flags, pc); | 423 | irq_flags, pc); |
424 | if (__tt) | ||
425 | event_triggers_post_call(ftrace_file, __tt); | ||
400 | } | 426 | } |
401 | 427 | ||
402 | static int reg_event_syscall_enter(struct ftrace_event_file *file, | 428 | static int reg_event_syscall_enter(struct ftrace_event_file *file, |