aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
authorTom Zanussi <tom.zanussi@linux.intel.com>2013-10-24 09:59:29 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-12-21 22:02:17 -0500
commitbac5fb97a173aeef8296b3efdb552e3489d55179 (patch)
tree2acb18186a608cca2eda53f6e110e792c1b6edbe /include/trace/ftrace.h
parent2875a08b2d1da7bae58fc01badb9b0ef1e8fc1a4 (diff)
tracing: Add and use generic set_trigger_filter() implementation
Add a generic event_command.set_trigger_filter() op implementation and have the current set of trigger commands use it - this essentially gives them all support for filters. Syntactically, filters are supported by adding 'if <filter>' just after the command, in which case only events matching the filter will invoke the trigger. For example, to add a filter to an enable/disable_event command: echo 'enable_event:system:event if common_pid == 999' > \ .../othersys/otherevent/trigger The above command will only enable the system:event event if the common_pid field in the othersys:otherevent event is 999. As another example, to add a filter to a stacktrace command: echo 'stacktrace if common_pid == 999' > \ .../somesys/someevent/trigger The above command will only trigger a stacktrace if the common_pid field in the event is 999. The filter syntax is the same as that described in the 'Event filtering' section of Documentation/trace/events.txt. Because triggers can now use filters, the trigger-invoking logic needs to be moved in those cases - e.g. for ftrace_raw_event_calls, if a trigger has a filter associated with it, the trigger invocation now needs to happen after the { assign; } part of the call, in order for the trigger condition to be tested. There's still a SOFT_DISABLED-only check at the top of e.g. the ftrace_raw_events function, so when an event is soft disabled but not because of the presence of a trigger, the original SOFT_DISABLED behavior remains unchanged. There's also a bit of trickiness in that some triggers need to avoid being invoked while an event is currently in the process of being logged, since the trigger may itself log data into the trace buffer. Thus we make sure the current event is committed before invoking those triggers. To do that, we split the trigger invocation in two - the first part (event_triggers_call()) checks the filter using the current trace record; if a command has the post_trigger flag set, it sets a bit for itself in the return value, otherwise it directly invoks the trigger. Once all commands have been either invoked or set their return flag, event_triggers_call() returns. The current record is then either committed or discarded; if any commands have deferred their triggers, those commands are finally invoked following the close of the current event by event_triggers_post_call(). To simplify the above and make it more efficient, the TRIGGER_COND bit is introduced, which is set only if a soft-disabled trigger needs to use the log record for filter testing or needs to wait until the current log record is closed. The syscall event invocation code is also changed in analogous ways. Because event triggers need to be able to create and free filters, this also adds a couple external wrappers for the existing create_filter and free_filter functions, which are too generic to be made extern functions themselves. Link: http://lkml.kernel.org/r/7164930759d8719ef460357f143d995406e4eead.1382622043.git.tom.zanussi@linux.intel.com Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h48
1 files changed, 36 insertions, 12 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 0a48bff964bd..0962968b8b37 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -418,6 +418,8 @@ static inline notrace int ftrace_get_offsets_##call( \
418 * struct ftrace_event_file *ftrace_file = __data; 418 * struct ftrace_event_file *ftrace_file = __data;
419 * struct ftrace_event_call *event_call = ftrace_file->event_call; 419 * struct ftrace_event_call *event_call = ftrace_file->event_call;
420 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 420 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
421 * unsigned long eflags = ftrace_file->flags;
422 * enum event_trigger_type __tt = ETT_NONE;
421 * struct ring_buffer_event *event; 423 * struct ring_buffer_event *event;
422 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 424 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
423 * struct ring_buffer *buffer; 425 * struct ring_buffer *buffer;
@@ -425,9 +427,12 @@ static inline notrace int ftrace_get_offsets_##call( \
425 * int __data_size; 427 * int __data_size;
426 * int pc; 428 * int pc;
427 * 429 *
428 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 430 * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
429 * &ftrace_file->flags)) 431 * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
430 * return; 432 * event_triggers_call(ftrace_file, NULL);
433 * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
434 * return;
435 * }
431 * 436 *
432 * local_save_flags(irq_flags); 437 * local_save_flags(irq_flags);
433 * pc = preempt_count(); 438 * pc = preempt_count();
@@ -445,8 +450,17 @@ static inline notrace int ftrace_get_offsets_##call( \
445 * { <assign>; } <-- Here we assign the entries by the __field and 450 * { <assign>; } <-- Here we assign the entries by the __field and
446 * __array macros. 451 * __array macros.
447 * 452 *
448 * if (!filter_check_discard(ftrace_file, entry, buffer, event)) 453 * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
454 * __tt = event_triggers_call(ftrace_file, entry);
455 *
456 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
457 * &ftrace_file->flags))
458 * ring_buffer_discard_commit(buffer, event);
459 * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
449 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 460 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
461 *
462 * if (__tt)
463 * event_triggers_post_call(ftrace_file, __tt);
450 * } 464 * }
451 * 465 *
452 * static struct trace_event ftrace_event_type_<call> = { 466 * static struct trace_event ftrace_event_type_<call> = {
@@ -532,6 +546,8 @@ ftrace_raw_event_##call(void *__data, proto) \
532 struct ftrace_event_file *ftrace_file = __data; \ 546 struct ftrace_event_file *ftrace_file = __data; \
533 struct ftrace_event_call *event_call = ftrace_file->event_call; \ 547 struct ftrace_event_call *event_call = ftrace_file->event_call; \
534 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 548 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
549 unsigned long eflags = ftrace_file->flags; \
550 enum event_trigger_type __tt = ETT_NONE; \
535 struct ring_buffer_event *event; \ 551 struct ring_buffer_event *event; \
536 struct ftrace_raw_##call *entry; \ 552 struct ftrace_raw_##call *entry; \
537 struct ring_buffer *buffer; \ 553 struct ring_buffer *buffer; \
@@ -539,13 +555,12 @@ ftrace_raw_event_##call(void *__data, proto) \
539 int __data_size; \ 555 int __data_size; \
540 int pc; \ 556 int pc; \
541 \ 557 \
542 if (test_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, \ 558 if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { \
543 &ftrace_file->flags)) \ 559 if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) \
544 event_triggers_call(ftrace_file); \ 560 event_triggers_call(ftrace_file, NULL); \
545 \ 561 if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) \
546 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \ 562 return; \
547 &ftrace_file->flags)) \ 563 } \
548 return; \
549 \ 564 \
550 local_save_flags(irq_flags); \ 565 local_save_flags(irq_flags); \
551 pc = preempt_count(); \ 566 pc = preempt_count(); \
@@ -564,8 +579,17 @@ ftrace_raw_event_##call(void *__data, proto) \
564 \ 579 \
565 { assign; } \ 580 { assign; } \
566 \ 581 \
567 if (!filter_check_discard(ftrace_file, entry, buffer, event)) \ 582 if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) \
583 __tt = event_triggers_call(ftrace_file, entry); \
584 \
585 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
586 &ftrace_file->flags)) \
587 ring_buffer_discard_commit(buffer, event); \
588 else if (!filter_check_discard(ftrace_file, entry, buffer, event)) \
568 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ 589 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
590 \
591 if (__tt) \
592 event_triggers_post_call(ftrace_file, __tt); \
569} 593}
570/* 594/*
571 * The ftrace_test_probe is compiled out, it is only here as a build time check 595 * The ftrace_test_probe is compiled out, it is only here as a build time check