aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-08-08 16:57:47 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-07-19 13:17:35 -0400
commit2f5f6ad9390c1ebbf738d130dbfe80b60eaa167e (patch)
tree1a88c37725d3efbcde8fa0a3cf19490d793877f0
parent6e0f17be0361444862637e8986c8c1a3b3f8dcf8 (diff)
ftrace: Pass ftrace_ops as third parameter to function trace callback
Currently the function trace callback receives only the ip and parent_ip of the function that it traced. It would be more powerful to also return the ops that registered the function as well. This allows the same function to act differently depending on what ftrace_ops registered it. Link: http://lkml.kernel.org/r/20120612225424.267254552@goodmis.org Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--arch/x86/include/asm/ftrace.h4
-rw-r--r--arch/x86/kernel/entry_64.S1
-rw-r--r--include/linux/ftrace.h16
-rw-r--r--kernel/trace/ftrace.c101
-rw-r--r--kernel/trace/trace_event_perf.c3
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_functions.c9
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_selftest.c15
-rw-r--r--kernel/trace/trace_stack.c2
11 files changed, 113 insertions, 46 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index b0767bc08740..783b107eacbc 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -32,6 +32,10 @@
32#define MCOUNT_ADDR ((long)(mcount)) 32#define MCOUNT_ADDR ((long)(mcount))
33#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 33#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
34 34
35#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_X86_64)
36#define ARCH_SUPPORTS_FTRACE_OPS 1
37#endif
38
35#ifndef __ASSEMBLY__ 39#ifndef __ASSEMBLY__
36extern void mcount(void); 40extern void mcount(void);
37extern atomic_t modifying_ftrace_code; 41extern atomic_t modifying_ftrace_code;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 7d65133b51be..2b4f94c5dc60 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -79,6 +79,7 @@ ENTRY(ftrace_caller)
79 79
80 MCOUNT_SAVE_FRAME 80 MCOUNT_SAVE_FRAME
81 81
82 leaq function_trace_op, %rdx
82 movq 0x38(%rsp), %rdi 83 movq 0x38(%rsp), %rdi
83 movq 8(%rbp), %rsi 84 movq 8(%rbp), %rsi
84 subq $MCOUNT_INSN_SIZE, %rdi 85 subq $MCOUNT_INSN_SIZE, %rdi
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 55e6d63d46d0..2d5964119885 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -18,6 +18,15 @@
18 18
19#include <asm/ftrace.h> 19#include <asm/ftrace.h>
20 20
21/*
22 * If the arch supports passing the variable contents of
23 * function_trace_op as the third parameter back from the
24 * mcount call, then the arch should define this as 1.
25 */
26#ifndef ARCH_SUPPORTS_FTRACE_OPS
27#define ARCH_SUPPORTS_FTRACE_OPS 0
28#endif
29
21struct module; 30struct module;
22struct ftrace_hash; 31struct ftrace_hash;
23 32
@@ -29,7 +38,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
29 void __user *buffer, size_t *lenp, 38 void __user *buffer, size_t *lenp,
30 loff_t *ppos); 39 loff_t *ppos);
31 40
32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 41struct ftrace_ops;
42
43typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
44 struct ftrace_ops *op);
33 45
34/* 46/*
35 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 47 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
@@ -163,7 +175,7 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
163 return *this_cpu_ptr(ops->disabled); 175 return *this_cpu_ptr(ops->disabled);
164} 176}
165 177
166extern void ftrace_stub(unsigned long a0, unsigned long a1); 178extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op);
167 179
168#else /* !CONFIG_FUNCTION_TRACER */ 180#else /* !CONFIG_FUNCTION_TRACER */
169/* 181/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b4f20fba09fc..4f2ab9352a68 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -64,12 +64,19 @@
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66 66
67static struct ftrace_ops ftrace_list_end __read_mostly = {
68 .func = ftrace_stub,
69};
70
67/* ftrace_enabled is a method to turn ftrace on or off */ 71/* ftrace_enabled is a method to turn ftrace on or off */
68int ftrace_enabled __read_mostly; 72int ftrace_enabled __read_mostly;
69static int last_ftrace_enabled; 73static int last_ftrace_enabled;
70 74
71/* Quick disabling of function tracer. */ 75/* Quick disabling of function tracer. */
72int function_trace_stop; 76int function_trace_stop __read_mostly;
77
78/* Current function tracing op */
79struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
73 80
74/* List for set_ftrace_pid's pids. */ 81/* List for set_ftrace_pid's pids. */
75LIST_HEAD(ftrace_pids); 82LIST_HEAD(ftrace_pids);
@@ -86,10 +93,6 @@ static int ftrace_disabled __read_mostly;
86 93
87static DEFINE_MUTEX(ftrace_lock); 94static DEFINE_MUTEX(ftrace_lock);
88 95
89static struct ftrace_ops ftrace_list_end __read_mostly = {
90 .func = ftrace_stub,
91};
92
93static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; 96static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 97static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
95static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 98static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
@@ -100,8 +103,14 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
100static struct ftrace_ops global_ops; 103static struct ftrace_ops global_ops;
101static struct ftrace_ops control_ops; 104static struct ftrace_ops control_ops;
102 105
103static void 106#if ARCH_SUPPORTS_FTRACE_OPS
104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); 107static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
108 struct ftrace_ops *op);
109#else
110/* See comment below, where ftrace_ops_list_func is defined */
111static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
112#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
113#endif
105 114
106/* 115/*
107 * Traverse the ftrace_global_list, invoking all entries. The reason that we 116 * Traverse the ftrace_global_list, invoking all entries. The reason that we
@@ -112,29 +121,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
112 * 121 *
113 * Silly Alpha and silly pointer-speculation compiler optimizations! 122 * Silly Alpha and silly pointer-speculation compiler optimizations!
114 */ 123 */
115static void ftrace_global_list_func(unsigned long ip, 124static void
116 unsigned long parent_ip) 125ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op)
117{ 127{
118 struct ftrace_ops *op;
119
120 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) 128 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
121 return; 129 return;
122 130
123 trace_recursion_set(TRACE_GLOBAL_BIT); 131 trace_recursion_set(TRACE_GLOBAL_BIT);
124 op = rcu_dereference_raw(ftrace_global_list); /*see above*/ 132 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
125 while (op != &ftrace_list_end) { 133 while (op != &ftrace_list_end) {
126 op->func(ip, parent_ip); 134 op->func(ip, parent_ip, op);
127 op = rcu_dereference_raw(op->next); /*see above*/ 135 op = rcu_dereference_raw(op->next); /*see above*/
128 }; 136 };
129 trace_recursion_clear(TRACE_GLOBAL_BIT); 137 trace_recursion_clear(TRACE_GLOBAL_BIT);
130} 138}
131 139
132static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) 140static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
141 struct ftrace_ops *op)
133{ 142{
134 if (!test_tsk_trace_trace(current)) 143 if (!test_tsk_trace_trace(current))
135 return; 144 return;
136 145
137 ftrace_pid_function(ip, parent_ip); 146 ftrace_pid_function(ip, parent_ip, op);
138} 147}
139 148
140static void set_ftrace_pid_function(ftrace_func_t func) 149static void set_ftrace_pid_function(ftrace_func_t func)
@@ -163,12 +172,13 @@ void clear_ftrace_function(void)
163 * For those archs that do not test ftrace_trace_stop in their 172 * For those archs that do not test ftrace_trace_stop in their
164 * mcount call site, we need to do it from C. 173 * mcount call site, we need to do it from C.
165 */ 174 */
166static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) 175static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip,
176 struct ftrace_ops *op)
167{ 177{
168 if (function_trace_stop) 178 if (function_trace_stop)
169 return; 179 return;
170 180
171 __ftrace_trace_function(ip, parent_ip); 181 __ftrace_trace_function(ip, parent_ip, op);
172} 182}
173#endif 183#endif
174 184
@@ -230,15 +240,24 @@ static void update_ftrace_function(void)
230 240
231 /* 241 /*
232 * If we are at the end of the list and this ops is 242 * If we are at the end of the list and this ops is
233 * not dynamic, then have the mcount trampoline call 243 * not dynamic and the arch supports passing ops, then have the
234 * the function directly 244 * mcount trampoline call the function directly.
235 */ 245 */
236 if (ftrace_ops_list == &ftrace_list_end || 246 if (ftrace_ops_list == &ftrace_list_end ||
237 (ftrace_ops_list->next == &ftrace_list_end && 247 (ftrace_ops_list->next == &ftrace_list_end &&
238 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) 248 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
249 ARCH_SUPPORTS_FTRACE_OPS)) {
250 /* Set the ftrace_ops that the arch callback uses */
251 if (ftrace_ops_list == &global_ops)
252 function_trace_op = ftrace_global_list;
253 else
254 function_trace_op = ftrace_ops_list;
239 func = ftrace_ops_list->func; 255 func = ftrace_ops_list->func;
240 else 256 } else {
257 /* Just use the default ftrace_ops */
258 function_trace_op = &ftrace_list_end;
241 func = ftrace_ops_list_func; 259 func = ftrace_ops_list_func;
260 }
242 261
243#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 262#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
244 ftrace_trace_function = func; 263 ftrace_trace_function = func;
@@ -773,7 +792,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
773} 792}
774 793
775static void 794static void
776function_profile_call(unsigned long ip, unsigned long parent_ip) 795function_profile_call(unsigned long ip, unsigned long parent_ip,
796 struct ftrace_ops *ops)
777{ 797{
778 struct ftrace_profile_stat *stat; 798 struct ftrace_profile_stat *stat;
779 struct ftrace_profile *rec; 799 struct ftrace_profile *rec;
@@ -803,7 +823,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
803#ifdef CONFIG_FUNCTION_GRAPH_TRACER 823#ifdef CONFIG_FUNCTION_GRAPH_TRACER
804static int profile_graph_entry(struct ftrace_graph_ent *trace) 824static int profile_graph_entry(struct ftrace_graph_ent *trace)
805{ 825{
806 function_profile_call(trace->func, 0); 826 function_profile_call(trace->func, 0, NULL);
807 return 1; 827 return 1;
808} 828}
809 829
@@ -2790,8 +2810,8 @@ static int __init ftrace_mod_cmd_init(void)
2790} 2810}
2791device_initcall(ftrace_mod_cmd_init); 2811device_initcall(ftrace_mod_cmd_init);
2792 2812
2793static void 2813static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2794function_trace_probe_call(unsigned long ip, unsigned long parent_ip) 2814 struct ftrace_ops *op)
2795{ 2815{
2796 struct ftrace_func_probe *entry; 2816 struct ftrace_func_probe *entry;
2797 struct hlist_head *hhd; 2817 struct hlist_head *hhd;
@@ -3942,10 +3962,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3942#endif /* CONFIG_DYNAMIC_FTRACE */ 3962#endif /* CONFIG_DYNAMIC_FTRACE */
3943 3963
3944static void 3964static void
3945ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) 3965ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
3966 struct ftrace_ops *op)
3946{ 3967{
3947 struct ftrace_ops *op;
3948
3949 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) 3968 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3950 return; 3969 return;
3951 3970
@@ -3959,7 +3978,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3959 while (op != &ftrace_list_end) { 3978 while (op != &ftrace_list_end) {
3960 if (!ftrace_function_local_disabled(op) && 3979 if (!ftrace_function_local_disabled(op) &&
3961 ftrace_ops_test(op, ip)) 3980 ftrace_ops_test(op, ip))
3962 op->func(ip, parent_ip); 3981 op->func(ip, parent_ip, op);
3963 3982
3964 op = rcu_dereference_raw(op->next); 3983 op = rcu_dereference_raw(op->next);
3965 }; 3984 };
@@ -3971,8 +3990,9 @@ static struct ftrace_ops control_ops = {
3971 .func = ftrace_ops_control_func, 3990 .func = ftrace_ops_control_func,
3972}; 3991};
3973 3992
3974static void 3993static inline void
3975ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 3994__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
3995 struct ftrace_ops *ignored)
3976{ 3996{
3977 struct ftrace_ops *op; 3997 struct ftrace_ops *op;
3978 3998
@@ -3988,13 +4008,32 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3988 op = rcu_dereference_raw(ftrace_ops_list); 4008 op = rcu_dereference_raw(ftrace_ops_list);
3989 while (op != &ftrace_list_end) { 4009 while (op != &ftrace_list_end) {
3990 if (ftrace_ops_test(op, ip)) 4010 if (ftrace_ops_test(op, ip))
3991 op->func(ip, parent_ip); 4011 op->func(ip, parent_ip, op);
3992 op = rcu_dereference_raw(op->next); 4012 op = rcu_dereference_raw(op->next);
3993 }; 4013 };
3994 preempt_enable_notrace(); 4014 preempt_enable_notrace();
3995 trace_recursion_clear(TRACE_INTERNAL_BIT); 4015 trace_recursion_clear(TRACE_INTERNAL_BIT);
3996} 4016}
3997 4017
4018/*
4019 * Some archs only support passing ip and parent_ip. Even though
4020 * the list function ignores the op parameter, we do not want any
4021 * C side effects, where a function is called without the caller
4022 * sending a third parameter.
4023 */
4024#if ARCH_SUPPORTS_FTRACE_OPS
4025static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4026 struct ftrace_ops *op)
4027{
4028 __ftrace_ops_list_func(ip, parent_ip, NULL);
4029}
4030#else
4031static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4032{
4033 __ftrace_ops_list_func(ip, parent_ip, NULL);
4034}
4035#endif
4036
3998static void clear_ftrace_swapper(void) 4037static void clear_ftrace_swapper(void)
3999{ 4038{
4000 struct task_struct *p; 4039 struct task_struct *p;
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index fee3752ae8f6..a872a9a298a0 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
258 258
259#ifdef CONFIG_FUNCTION_TRACER 259#ifdef CONFIG_FUNCTION_TRACER
260static void 260static void
261perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) 261perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
262 struct ftrace_ops *ops)
262{ 263{
263 struct ftrace_entry *entry; 264 struct ftrace_entry *entry;
264 struct hlist_head *head; 265 struct hlist_head *head;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 29111da1d100..88daa5177bf4 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1681,7 +1681,8 @@ static __init void event_trace_self_tests(void)
1681static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 1681static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1682 1682
1683static void 1683static void
1684function_test_events_call(unsigned long ip, unsigned long parent_ip) 1684function_test_events_call(unsigned long ip, unsigned long parent_ip,
1685 struct ftrace_ops *op)
1685{ 1686{
1686 struct ring_buffer_event *event; 1687 struct ring_buffer_event *event;
1687 struct ring_buffer *buffer; 1688 struct ring_buffer *buffer;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index c7b0c6a7db09..fceb7a9aa06d 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -48,7 +48,8 @@ static void function_trace_start(struct trace_array *tr)
48} 48}
49 49
50static void 50static void
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) 51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 struct ftrace_ops *op)
52{ 53{
53 struct trace_array *tr = func_trace; 54 struct trace_array *tr = func_trace;
54 struct trace_array_cpu *data; 55 struct trace_array_cpu *data;
@@ -75,7 +76,8 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
75} 76}
76 77
77static void 78static void
78function_trace_call(unsigned long ip, unsigned long parent_ip) 79function_trace_call(unsigned long ip, unsigned long parent_ip,
80 struct ftrace_ops *op)
79{ 81{
80 struct trace_array *tr = func_trace; 82 struct trace_array *tr = func_trace;
81 struct trace_array_cpu *data; 83 struct trace_array_cpu *data;
@@ -106,7 +108,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
106} 108}
107 109
108static void 110static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip) 111function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
112 struct ftrace_ops *op)
110{ 113{
111 struct trace_array *tr = func_trace; 114 struct trace_array *tr = func_trace;
112 struct trace_array_cpu *data; 115 struct trace_array_cpu *data;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 99d20e920368..2862c77f95d9 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr,
136 * irqsoff uses its own tracer function to keep the overhead down: 136 * irqsoff uses its own tracer function to keep the overhead down:
137 */ 137 */
138static void 138static void
139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) 139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
140 struct ftrace_ops *op)
140{ 141{
141 struct trace_array *tr = irqsoff_trace; 142 struct trace_array *tr = irqsoff_trace;
142 struct trace_array_cpu *data; 143 struct trace_array_cpu *data;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index ff791ea48b57..0caf4f5da569 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -108,7 +108,7 @@ out_enable:
108 * wakeup uses its own tracer function to keep the overhead down: 108 * wakeup uses its own tracer function to keep the overhead down:
109 */ 109 */
110static void 110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) 111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op)
112{ 112{
113 struct trace_array *tr = wakeup_trace; 113 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data; 114 struct trace_array_cpu *data;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 288541f977fb..9ae40c823af8 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -103,35 +103,40 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
103 103
104static int trace_selftest_test_probe1_cnt; 104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip, 105static void trace_selftest_test_probe1_func(unsigned long ip,
106 unsigned long pip) 106 unsigned long pip,
107 struct ftrace_ops *op)
107{ 108{
108 trace_selftest_test_probe1_cnt++; 109 trace_selftest_test_probe1_cnt++;
109} 110}
110 111
111static int trace_selftest_test_probe2_cnt; 112static int trace_selftest_test_probe2_cnt;
112static void trace_selftest_test_probe2_func(unsigned long ip, 113static void trace_selftest_test_probe2_func(unsigned long ip,
113 unsigned long pip) 114 unsigned long pip,
115 struct ftrace_ops *op)
114{ 116{
115 trace_selftest_test_probe2_cnt++; 117 trace_selftest_test_probe2_cnt++;
116} 118}
117 119
118static int trace_selftest_test_probe3_cnt; 120static int trace_selftest_test_probe3_cnt;
119static void trace_selftest_test_probe3_func(unsigned long ip, 121static void trace_selftest_test_probe3_func(unsigned long ip,
120 unsigned long pip) 122 unsigned long pip,
123 struct ftrace_ops *op)
121{ 124{
122 trace_selftest_test_probe3_cnt++; 125 trace_selftest_test_probe3_cnt++;
123} 126}
124 127
125static int trace_selftest_test_global_cnt; 128static int trace_selftest_test_global_cnt;
126static void trace_selftest_test_global_func(unsigned long ip, 129static void trace_selftest_test_global_func(unsigned long ip,
127 unsigned long pip) 130 unsigned long pip,
131 struct ftrace_ops *op)
128{ 132{
129 trace_selftest_test_global_cnt++; 133 trace_selftest_test_global_cnt++;
130} 134}
131 135
132static int trace_selftest_test_dyn_cnt; 136static int trace_selftest_test_dyn_cnt;
133static void trace_selftest_test_dyn_func(unsigned long ip, 137static void trace_selftest_test_dyn_func(unsigned long ip,
134 unsigned long pip) 138 unsigned long pip,
139 struct ftrace_ops *op)
135{ 140{
136 trace_selftest_test_dyn_cnt++; 141 trace_selftest_test_dyn_cnt++;
137} 142}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index d4545f49242e..e20006d5fb6a 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -111,7 +111,7 @@ static inline void check_stack(void)
111} 111}
112 112
113static void 113static void
114stack_trace_call(unsigned long ip, unsigned long parent_ip) 114stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op)
115{ 115{
116 int cpu; 116 int cpu;
117 117