aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c101
1 files changed, 70 insertions, 31 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b4f20fba09fc..4f2ab9352a68 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -64,12 +64,19 @@
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66 66
67static struct ftrace_ops ftrace_list_end __read_mostly = {
68 .func = ftrace_stub,
69};
70
67/* ftrace_enabled is a method to turn ftrace on or off */ 71/* ftrace_enabled is a method to turn ftrace on or off */
68int ftrace_enabled __read_mostly; 72int ftrace_enabled __read_mostly;
69static int last_ftrace_enabled; 73static int last_ftrace_enabled;
70 74
71/* Quick disabling of function tracer. */ 75/* Quick disabling of function tracer. */
72int function_trace_stop; 76int function_trace_stop __read_mostly;
77
78/* Current function tracing op */
79struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
73 80
74/* List for set_ftrace_pid's pids. */ 81/* List for set_ftrace_pid's pids. */
75LIST_HEAD(ftrace_pids); 82LIST_HEAD(ftrace_pids);
@@ -86,10 +93,6 @@ static int ftrace_disabled __read_mostly;
86 93
87static DEFINE_MUTEX(ftrace_lock); 94static DEFINE_MUTEX(ftrace_lock);
88 95
89static struct ftrace_ops ftrace_list_end __read_mostly = {
90 .func = ftrace_stub,
91};
92
93static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; 96static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 97static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
95static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 98static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
@@ -100,8 +103,14 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
100static struct ftrace_ops global_ops; 103static struct ftrace_ops global_ops;
101static struct ftrace_ops control_ops; 104static struct ftrace_ops control_ops;
102 105
103static void 106#if ARCH_SUPPORTS_FTRACE_OPS
104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); 107static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
108 struct ftrace_ops *op);
109#else
110/* See comment below, where ftrace_ops_list_func is defined */
111static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
112#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
113#endif
105 114
106/* 115/*
107 * Traverse the ftrace_global_list, invoking all entries. The reason that we 116 * Traverse the ftrace_global_list, invoking all entries. The reason that we
@@ -112,29 +121,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
112 * 121 *
113 * Silly Alpha and silly pointer-speculation compiler optimizations! 122 * Silly Alpha and silly pointer-speculation compiler optimizations!
114 */ 123 */
115static void ftrace_global_list_func(unsigned long ip, 124static void
116 unsigned long parent_ip) 125ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op)
117{ 127{
118 struct ftrace_ops *op;
119
120 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) 128 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
121 return; 129 return;
122 130
123 trace_recursion_set(TRACE_GLOBAL_BIT); 131 trace_recursion_set(TRACE_GLOBAL_BIT);
124 op = rcu_dereference_raw(ftrace_global_list); /*see above*/ 132 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
125 while (op != &ftrace_list_end) { 133 while (op != &ftrace_list_end) {
126 op->func(ip, parent_ip); 134 op->func(ip, parent_ip, op);
127 op = rcu_dereference_raw(op->next); /*see above*/ 135 op = rcu_dereference_raw(op->next); /*see above*/
128 }; 136 };
129 trace_recursion_clear(TRACE_GLOBAL_BIT); 137 trace_recursion_clear(TRACE_GLOBAL_BIT);
130} 138}
131 139
132static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) 140static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
141 struct ftrace_ops *op)
133{ 142{
134 if (!test_tsk_trace_trace(current)) 143 if (!test_tsk_trace_trace(current))
135 return; 144 return;
136 145
137 ftrace_pid_function(ip, parent_ip); 146 ftrace_pid_function(ip, parent_ip, op);
138} 147}
139 148
140static void set_ftrace_pid_function(ftrace_func_t func) 149static void set_ftrace_pid_function(ftrace_func_t func)
@@ -163,12 +172,13 @@ void clear_ftrace_function(void)
163 * For those archs that do not test ftrace_trace_stop in their 172 * For those archs that do not test ftrace_trace_stop in their
164 * mcount call site, we need to do it from C. 173 * mcount call site, we need to do it from C.
165 */ 174 */
166static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) 175static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip,
176 struct ftrace_ops *op)
167{ 177{
168 if (function_trace_stop) 178 if (function_trace_stop)
169 return; 179 return;
170 180
171 __ftrace_trace_function(ip, parent_ip); 181 __ftrace_trace_function(ip, parent_ip, op);
172} 182}
173#endif 183#endif
174 184
@@ -230,15 +240,24 @@ static void update_ftrace_function(void)
230 240
231 /* 241 /*
232 * If we are at the end of the list and this ops is 242 * If we are at the end of the list and this ops is
233 * not dynamic, then have the mcount trampoline call 243 * not dynamic and the arch supports passing ops, then have the
234 * the function directly 244 * mcount trampoline call the function directly.
235 */ 245 */
236 if (ftrace_ops_list == &ftrace_list_end || 246 if (ftrace_ops_list == &ftrace_list_end ||
237 (ftrace_ops_list->next == &ftrace_list_end && 247 (ftrace_ops_list->next == &ftrace_list_end &&
238 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) 248 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
249 ARCH_SUPPORTS_FTRACE_OPS)) {
250 /* Set the ftrace_ops that the arch callback uses */
251 if (ftrace_ops_list == &global_ops)
252 function_trace_op = ftrace_global_list;
253 else
254 function_trace_op = ftrace_ops_list;
239 func = ftrace_ops_list->func; 255 func = ftrace_ops_list->func;
240 else 256 } else {
257 /* Just use the default ftrace_ops */
258 function_trace_op = &ftrace_list_end;
241 func = ftrace_ops_list_func; 259 func = ftrace_ops_list_func;
260 }
242 261
243#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 262#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
244 ftrace_trace_function = func; 263 ftrace_trace_function = func;
@@ -773,7 +792,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
773} 792}
774 793
775static void 794static void
776function_profile_call(unsigned long ip, unsigned long parent_ip) 795function_profile_call(unsigned long ip, unsigned long parent_ip,
796 struct ftrace_ops *ops)
777{ 797{
778 struct ftrace_profile_stat *stat; 798 struct ftrace_profile_stat *stat;
779 struct ftrace_profile *rec; 799 struct ftrace_profile *rec;
@@ -803,7 +823,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
803#ifdef CONFIG_FUNCTION_GRAPH_TRACER 823#ifdef CONFIG_FUNCTION_GRAPH_TRACER
804static int profile_graph_entry(struct ftrace_graph_ent *trace) 824static int profile_graph_entry(struct ftrace_graph_ent *trace)
805{ 825{
806 function_profile_call(trace->func, 0); 826 function_profile_call(trace->func, 0, NULL);
807 return 1; 827 return 1;
808} 828}
809 829
@@ -2790,8 +2810,8 @@ static int __init ftrace_mod_cmd_init(void)
2790} 2810}
2791device_initcall(ftrace_mod_cmd_init); 2811device_initcall(ftrace_mod_cmd_init);
2792 2812
2793static void 2813static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2794function_trace_probe_call(unsigned long ip, unsigned long parent_ip) 2814 struct ftrace_ops *op)
2795{ 2815{
2796 struct ftrace_func_probe *entry; 2816 struct ftrace_func_probe *entry;
2797 struct hlist_head *hhd; 2817 struct hlist_head *hhd;
@@ -3942,10 +3962,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3942#endif /* CONFIG_DYNAMIC_FTRACE */ 3962#endif /* CONFIG_DYNAMIC_FTRACE */
3943 3963
3944static void 3964static void
3945ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) 3965ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
3966 struct ftrace_ops *op)
3946{ 3967{
3947 struct ftrace_ops *op;
3948
3949 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) 3968 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3950 return; 3969 return;
3951 3970
@@ -3959,7 +3978,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3959 while (op != &ftrace_list_end) { 3978 while (op != &ftrace_list_end) {
3960 if (!ftrace_function_local_disabled(op) && 3979 if (!ftrace_function_local_disabled(op) &&
3961 ftrace_ops_test(op, ip)) 3980 ftrace_ops_test(op, ip))
3962 op->func(ip, parent_ip); 3981 op->func(ip, parent_ip, op);
3963 3982
3964 op = rcu_dereference_raw(op->next); 3983 op = rcu_dereference_raw(op->next);
3965 }; 3984 };
@@ -3971,8 +3990,9 @@ static struct ftrace_ops control_ops = {
3971 .func = ftrace_ops_control_func, 3990 .func = ftrace_ops_control_func,
3972}; 3991};
3973 3992
3974static void 3993static inline void
3975ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 3994__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
3995 struct ftrace_ops *ignored)
3976{ 3996{
3977 struct ftrace_ops *op; 3997 struct ftrace_ops *op;
3978 3998
@@ -3988,13 +4008,32 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3988 op = rcu_dereference_raw(ftrace_ops_list); 4008 op = rcu_dereference_raw(ftrace_ops_list);
3989 while (op != &ftrace_list_end) { 4009 while (op != &ftrace_list_end) {
3990 if (ftrace_ops_test(op, ip)) 4010 if (ftrace_ops_test(op, ip))
3991 op->func(ip, parent_ip); 4011 op->func(ip, parent_ip, op);
3992 op = rcu_dereference_raw(op->next); 4012 op = rcu_dereference_raw(op->next);
3993 }; 4013 };
3994 preempt_enable_notrace(); 4014 preempt_enable_notrace();
3995 trace_recursion_clear(TRACE_INTERNAL_BIT); 4015 trace_recursion_clear(TRACE_INTERNAL_BIT);
3996} 4016}
3997 4017
4018/*
4019 * Some archs only support passing ip and parent_ip. Even though
4020 * the list function ignores the op parameter, we do not want any
4021 * C side effects, where a function is called without the caller
4022 * sending a third parameter.
4023 */
4024#if ARCH_SUPPORTS_FTRACE_OPS
4025static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4026 struct ftrace_ops *op)
4027{
4028 __ftrace_ops_list_func(ip, parent_ip, NULL);
4029}
4030#else
4031static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4032{
4033 __ftrace_ops_list_func(ip, parent_ip, NULL);
4034}
4035#endif
4036
3998static void clear_ftrace_swapper(void) 4037static void clear_ftrace_swapper(void)
3999{ 4038{
4000 struct task_struct *p; 4039 struct task_struct *p;