diff options
author | Tom Zanussi <tom.zanussi@linux.intel.com> | 2014-01-06 14:44:19 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2014-01-06 15:21:43 -0500 |
commit | 0641d368f206f2fe7725c9fa5f1461229ef8010f (patch) | |
tree | aac0db92d00b238236cbe64d562ff83f8978bb18 /kernel/trace | |
parent | e0d18fe063464cb3f1a6d1939e4fcf47d92d8386 (diff) |
tracing/kprobes: Add trace event trigger invocations
Add code to the kprobe/kretprobe event functions that will invoke any
event triggers associated with a probe's ftrace_event_file.
The code to do this is very similar to the invocation code already
used to invoke the triggers associated with static events and
essentially replaces the existing soft-disable checks with a superset
that preserves the original behavior but adds the bits needed to
support event triggers.
Link: http://lkml.kernel.org/r/f2d49f157b608070045fdb26c9564d5a05a5a7d0.1389036657.git.tom.zanussi@linux.intel.com
Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace_kprobe.c | 42 |
1 files changed, 36 insertions, 6 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index ce0ed8afb77e..3afa716d6268 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -929,12 +929,20 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, | |||
929 | struct ring_buffer *buffer; | 929 | struct ring_buffer *buffer; |
930 | int size, dsize, pc; | 930 | int size, dsize, pc; |
931 | unsigned long irq_flags; | 931 | unsigned long irq_flags; |
932 | unsigned long eflags; | ||
933 | enum event_trigger_type tt = ETT_NONE; | ||
932 | struct ftrace_event_call *call = &tk->tp.call; | 934 | struct ftrace_event_call *call = &tk->tp.call; |
933 | 935 | ||
934 | WARN_ON(call != ftrace_file->event_call); | 936 | WARN_ON(call != ftrace_file->event_call); |
935 | 937 | ||
936 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | 938 | eflags = ftrace_file->flags; |
937 | return; | 939 | |
940 | if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { | ||
941 | if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) | ||
942 | event_triggers_call(ftrace_file, NULL); | ||
943 | if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) | ||
944 | return; | ||
945 | } | ||
938 | 946 | ||
939 | local_save_flags(irq_flags); | 947 | local_save_flags(irq_flags); |
940 | pc = preempt_count(); | 948 | pc = preempt_count(); |
@@ -952,9 +960,16 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, | |||
952 | entry->ip = (unsigned long)tk->rp.kp.addr; | 960 | entry->ip = (unsigned long)tk->rp.kp.addr; |
953 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); | 961 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
954 | 962 | ||
955 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 963 | if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) |
964 | tt = event_triggers_call(ftrace_file, entry); | ||
965 | |||
966 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | ||
967 | ring_buffer_discard_commit(buffer, event); | ||
968 | else if (!filter_check_discard(ftrace_file, entry, buffer, event)) | ||
956 | trace_buffer_unlock_commit_regs(buffer, event, | 969 | trace_buffer_unlock_commit_regs(buffer, event, |
957 | irq_flags, pc, regs); | 970 | irq_flags, pc, regs); |
971 | if (tt) | ||
972 | event_triggers_post_call(ftrace_file, tt); | ||
958 | } | 973 | } |
959 | 974 | ||
960 | static __kprobes void | 975 | static __kprobes void |
@@ -977,12 +992,20 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, | |||
977 | struct ring_buffer *buffer; | 992 | struct ring_buffer *buffer; |
978 | int size, pc, dsize; | 993 | int size, pc, dsize; |
979 | unsigned long irq_flags; | 994 | unsigned long irq_flags; |
995 | unsigned long eflags; | ||
996 | enum event_trigger_type tt = ETT_NONE; | ||
980 | struct ftrace_event_call *call = &tk->tp.call; | 997 | struct ftrace_event_call *call = &tk->tp.call; |
981 | 998 | ||
982 | WARN_ON(call != ftrace_file->event_call); | 999 | WARN_ON(call != ftrace_file->event_call); |
983 | 1000 | ||
984 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | 1001 | eflags = ftrace_file->flags; |
985 | return; | 1002 | |
1003 | if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { | ||
1004 | if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) | ||
1005 | event_triggers_call(ftrace_file, NULL); | ||
1006 | if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) | ||
1007 | return; | ||
1008 | } | ||
986 | 1009 | ||
987 | local_save_flags(irq_flags); | 1010 | local_save_flags(irq_flags); |
988 | pc = preempt_count(); | 1011 | pc = preempt_count(); |
@@ -1001,9 +1024,16 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, | |||
1001 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1024 | entry->ret_ip = (unsigned long)ri->ret_addr; |
1002 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); | 1025 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
1003 | 1026 | ||
1004 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 1027 | if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) |
1028 | tt = event_triggers_call(ftrace_file, entry); | ||
1029 | |||
1030 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | ||
1031 | ring_buffer_discard_commit(buffer, event); | ||
1032 | else if (!filter_check_discard(ftrace_file, entry, buffer, event)) | ||
1005 | trace_buffer_unlock_commit_regs(buffer, event, | 1033 | trace_buffer_unlock_commit_regs(buffer, event, |
1006 | irq_flags, pc, regs); | 1034 | irq_flags, pc, regs); |
1035 | if (tt) | ||
1036 | event_triggers_post_call(ftrace_file, tt); | ||
1007 | } | 1037 | } |
1008 | 1038 | ||
1009 | static __kprobes void | 1039 | static __kprobes void |