aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-11-12 15:21:22 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-11-29 23:38:34 -0500
commitd864a3ca883095aa12575b84841ebd52b3d808fa (patch)
tree99bb266bc13ecabd2697309846aa7a53afdbddd4 /kernel/trace/trace_functions_graph.c
parentc43ac4a5301986c015137bb89568979f9b3264ca (diff)
fgraph: Create a fgraph.c file to store function graph infrastructure
As the function graph infrastructure can be used by thing other than tracing, moving the code to its own file out of the trace_functions_graph.c code makes more sense. The fgraph.c file will only contain the infrastructure required to hook into functions and their return code. Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c220
1 files changed, 0 insertions, 220 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 0d235e44d08e..b846d82c2f95 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -16,33 +16,6 @@
16#include "trace.h" 16#include "trace.h"
17#include "trace_output.h" 17#include "trace_output.h"
18 18
19static bool kill_ftrace_graph;
20
21/**
22 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
23 *
24 * ftrace_graph_stop() is called when a severe error is detected in
25 * the function graph tracing. This function is called by the critical
26 * paths of function graph to keep those paths from doing any more harm.
27 */
28bool ftrace_graph_is_dead(void)
29{
30 return kill_ftrace_graph;
31}
32
33/**
34 * ftrace_graph_stop - set to permanently disable function graph tracincg
35 *
36 * In case of an error int function graph tracing, this is called
37 * to try to keep function graph tracing from causing any more harm.
38 * Usually this is pretty severe and this is called to try to at least
39 * get a warning out to the user.
40 */
41void ftrace_graph_stop(void)
42{
43 kill_ftrace_graph = true;
44}
45
46/* When set, irq functions will be ignored */ 19/* When set, irq functions will be ignored */
47static int ftrace_graph_skip_irqs; 20static int ftrace_graph_skip_irqs;
48 21
@@ -117,199 +90,6 @@ static void
117print_graph_duration(struct trace_array *tr, unsigned long long duration, 90print_graph_duration(struct trace_array *tr, unsigned long long duration,
118 struct trace_seq *s, u32 flags); 91 struct trace_seq *s, u32 flags);
119 92
120/* Add a function return address to the trace stack on thread info.*/
121static int
122ftrace_push_return_trace(unsigned long ret, unsigned long func,
123 unsigned long frame_pointer, unsigned long *retp)
124{
125 unsigned long long calltime;
126 int index;
127
128 if (unlikely(ftrace_graph_is_dead()))
129 return -EBUSY;
130
131 if (!current->ret_stack)
132 return -EBUSY;
133
134 /*
135 * We must make sure the ret_stack is tested before we read
136 * anything else.
137 */
138 smp_rmb();
139
140 /* The return trace stack is full */
141 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
142 atomic_inc(&current->trace_overrun);
143 return -EBUSY;
144 }
145
146 /*
147 * The curr_ret_stack is an index to ftrace return stack of
148 * current task. Its value should be in [0, FTRACE_RETFUNC_
149 * DEPTH) when the function graph tracer is used. To support
150 * filtering out specific functions, it makes the index
151 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
152 * so when it sees a negative index the ftrace will ignore
153 * the record. And the index gets recovered when returning
154 * from the filtered function by adding the FTRACE_NOTRACE_
155 * DEPTH and then it'll continue to record functions normally.
156 *
157 * The curr_ret_stack is initialized to -1 and get increased
158 * in this function. So it can be less than -1 only if it was
159 * filtered out via ftrace_graph_notrace_addr() which can be
160 * set from set_graph_notrace file in tracefs by user.
161 */
162 if (current->curr_ret_stack < -1)
163 return -EBUSY;
164
165 calltime = trace_clock_local();
166
167 index = ++current->curr_ret_stack;
168 if (ftrace_graph_notrace_addr(func))
169 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
170 barrier();
171 current->ret_stack[index].ret = ret;
172 current->ret_stack[index].func = func;
173 current->ret_stack[index].calltime = calltime;
174#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175 current->ret_stack[index].fp = frame_pointer;
176#endif
177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp;
179#endif
180 return 0;
181}
182
183int function_graph_enter(unsigned long ret, unsigned long func,
184 unsigned long frame_pointer, unsigned long *retp)
185{
186 struct ftrace_graph_ent trace;
187
188 trace.func = func;
189 trace.depth = ++current->curr_ret_depth;
190
191 if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
192 goto out;
193
194 /* Only trace if the calling function expects to */
195 if (!ftrace_graph_entry(&trace))
196 goto out_ret;
197
198 return 0;
199 out_ret:
200 current->curr_ret_stack--;
201 out:
202 current->curr_ret_depth--;
203 return -EBUSY;
204}
205
206/* Retrieve a function return address to the trace stack on thread info.*/
207static void
208ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
209 unsigned long frame_pointer)
210{
211 int index;
212
213 index = current->curr_ret_stack;
214
215 /*
216 * A negative index here means that it's just returned from a
217 * notrace'd function. Recover index to get an original
218 * return address. See ftrace_push_return_trace().
219 *
220 * TODO: Need to check whether the stack gets corrupted.
221 */
222 if (index < 0)
223 index += FTRACE_NOTRACE_DEPTH;
224
225 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
226 ftrace_graph_stop();
227 WARN_ON(1);
228 /* Might as well panic, otherwise we have no where to go */
229 *ret = (unsigned long)panic;
230 return;
231 }
232
233#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
234 /*
235 * The arch may choose to record the frame pointer used
236 * and check it here to make sure that it is what we expect it
237 * to be. If gcc does not set the place holder of the return
238 * address in the frame pointer, and does a copy instead, then
239 * the function graph trace will fail. This test detects this
240 * case.
241 *
242 * Currently, x86_32 with optimize for size (-Os) makes the latest
243 * gcc do the above.
244 *
245 * Note, -mfentry does not use frame pointers, and this test
246 * is not needed if CC_USING_FENTRY is set.
247 */
248 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
249 ftrace_graph_stop();
250 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
251 " from func %ps return to %lx\n",
252 current->ret_stack[index].fp,
253 frame_pointer,
254 (void *)current->ret_stack[index].func,
255 current->ret_stack[index].ret);
256 *ret = (unsigned long)panic;
257 return;
258 }
259#endif
260
261 *ret = current->ret_stack[index].ret;
262 trace->func = current->ret_stack[index].func;
263 trace->calltime = current->ret_stack[index].calltime;
264 trace->overrun = atomic_read(&current->trace_overrun);
265 trace->depth = current->curr_ret_depth--;
266 /*
267 * We still want to trace interrupts coming in if
268 * max_depth is set to 1. Make sure the decrement is
269 * seen before ftrace_graph_return.
270 */
271 barrier();
272}
273
274/*
275 * Send the trace to the ring-buffer.
276 * @return the original return address.
277 */
278unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
279{
280 struct ftrace_graph_ret trace;
281 unsigned long ret;
282
283 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
284 trace.rettime = trace_clock_local();
285 ftrace_graph_return(&trace);
286 /*
287 * The ftrace_graph_return() may still access the current
288 * ret_stack structure, we need to make sure the update of
289 * curr_ret_stack is after that.
290 */
291 barrier();
292 current->curr_ret_stack--;
293 /*
294 * The curr_ret_stack can be less than -1 only if it was
295 * filtered out and it's about to return from the function.
296 * Recover the index and continue to trace normal functions.
297 */
298 if (current->curr_ret_stack < -1) {
299 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
300 return ret;
301 }
302
303 if (unlikely(!ret)) {
304 ftrace_graph_stop();
305 WARN_ON(1);
306 /* Might as well panic. What else to do? */
307 ret = (unsigned long)panic;
308 }
309
310 return ret;
311}
312
313/** 93/**
314 * ftrace_graph_ret_addr - convert a potentially modified stack return address 94 * ftrace_graph_ret_addr - convert a potentially modified stack return address
315 * to its original value 95 * to its original value