aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-08-21 05:23:40 -0400
committerIngo Molnar <mingo@kernel.org>2012-08-21 05:23:40 -0400
commit26198c21d1b286a084fe5d514a30bc7e6c712a34 (patch)
tree364e3279f089e4042ccae52c2204b8dee2a979b8 /kernel/trace/trace_functions.c
parent194f8dcbe9629d8e9346cf96345a9c0bbf0e67ae (diff)
parente52538965119319447c0800c534da73142c27be2 (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
Pull ftrace updates from Steve Rostedt: " This patch series extends ftrace function tracing utility to be more dynamic for its users. It allows for data passing to the callback functions, as well as reading regs as if a breakpoint were to trigger at function entry. The main goal of this patch series was to allow kprobes to use ftrace as an optimized probe point when a probe is placed on an ftrace nop. With lots of help from Masami Hiramatsu, and going through lots of iterations, we finally came up with a good solution. " Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index c7b0c6a7db09..fdff65dff1bb 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -48,7 +48,8 @@ static void function_trace_start(struct trace_array *tr)
48} 48}
49 49
50static void 50static void
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) 51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 struct ftrace_ops *op, struct pt_regs *pt_regs)
52{ 53{
53 struct trace_array *tr = func_trace; 54 struct trace_array *tr = func_trace;
54 struct trace_array_cpu *data; 55 struct trace_array_cpu *data;
@@ -75,7 +76,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
75} 76}
76 77
77static void 78static void
78function_trace_call(unsigned long ip, unsigned long parent_ip) 79function_trace_call(unsigned long ip, unsigned long parent_ip,
80 struct ftrace_ops *op, struct pt_regs *pt_regs)
81
79{ 82{
80 struct trace_array *tr = func_trace; 83 struct trace_array *tr = func_trace;
81 struct trace_array_cpu *data; 84 struct trace_array_cpu *data;
@@ -106,7 +109,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
106} 109}
107 110
108static void 111static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip) 112function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
113 struct ftrace_ops *op, struct pt_regs *pt_regs)
110{ 114{
111 struct trace_array *tr = func_trace; 115 struct trace_array *tr = func_trace;
112 struct trace_array_cpu *data; 116 struct trace_array_cpu *data;
@@ -149,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
149static struct ftrace_ops trace_ops __read_mostly = 153static struct ftrace_ops trace_ops __read_mostly =
150{ 154{
151 .func = function_trace_call, 155 .func = function_trace_call,
152 .flags = FTRACE_OPS_FL_GLOBAL, 156 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
153}; 157};
154 158
155static struct ftrace_ops trace_stack_ops __read_mostly = 159static struct ftrace_ops trace_stack_ops __read_mostly =
156{ 160{
157 .func = function_stack_trace_call, 161 .func = function_stack_trace_call,
158 .flags = FTRACE_OPS_FL_GLOBAL, 162 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
159}; 163};
160 164
161/* Our two options */ 165/* Our two options */