aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-12-03 04:47:53 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-03 04:47:53 -0500
commitdf60673198ae678f68af54873b8904ba93fe13a0 (patch)
tree6e9a3393d0be7b68a69c2bbc58f4325ceb6fd853 /kernel/trace/trace_functions_graph.c
parent89f579ce99f7e028e81885d3965f973c0f787611 (diff)
parent2595646791c319cadfdbf271563aac97d0843dc7 (diff)
Merge tag 'v4.20-rc5' into x86/cleanups, to sync up the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c53
1 files changed, 42 insertions, 11 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 169b3c44ee97..086af4f5c3e8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
118 struct trace_seq *s, u32 flags); 118 struct trace_seq *s, u32 flags);
119 119
120/* Add a function return address to the trace stack on thread info.*/ 120/* Add a function return address to the trace stack on thread info.*/
121int 121static int
122ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 122ftrace_push_return_trace(unsigned long ret, unsigned long func,
123 unsigned long frame_pointer, unsigned long *retp) 123 unsigned long frame_pointer, unsigned long *retp)
124{ 124{
125 unsigned long long calltime; 125 unsigned long long calltime;
@@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp; 178 current->ret_stack[index].retp = retp;
179#endif 179#endif
180 *depth = current->curr_ret_stack; 180 return 0;
181}
182
183int function_graph_enter(unsigned long ret, unsigned long func,
184 unsigned long frame_pointer, unsigned long *retp)
185{
186 struct ftrace_graph_ent trace;
187
188 trace.func = func;
189 trace.depth = ++current->curr_ret_depth;
190
191 if (ftrace_push_return_trace(ret, func,
192 frame_pointer, retp))
193 goto out;
194
195 /* Only trace if the calling function expects to */
196 if (!ftrace_graph_entry(&trace))
197 goto out_ret;
181 198
182 return 0; 199 return 0;
200 out_ret:
201 current->curr_ret_stack--;
202 out:
203 current->curr_ret_depth--;
204 return -EBUSY;
183} 205}
184 206
185/* Retrieve a function return address to the trace stack on thread info.*/ 207/* Retrieve a function return address to the trace stack on thread info.*/
@@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
241 trace->func = current->ret_stack[index].func; 263 trace->func = current->ret_stack[index].func;
242 trace->calltime = current->ret_stack[index].calltime; 264 trace->calltime = current->ret_stack[index].calltime;
243 trace->overrun = atomic_read(&current->trace_overrun); 265 trace->overrun = atomic_read(&current->trace_overrun);
244 trace->depth = index; 266 trace->depth = current->curr_ret_depth--;
267 /*
268 * We still want to trace interrupts coming in if
269 * max_depth is set to 1. Make sure the decrement is
270 * seen before ftrace_graph_return.
271 */
272 barrier();
245} 273}
246 274
247/* 275/*
@@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
255 283
256 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 284 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
257 trace.rettime = trace_clock_local(); 285 trace.rettime = trace_clock_local();
286 ftrace_graph_return(&trace);
287 /*
288 * The ftrace_graph_return() may still access the current
289 * ret_stack structure, we need to make sure the update of
290 * curr_ret_stack is after that.
291 */
258 barrier(); 292 barrier();
259 current->curr_ret_stack--; 293 current->curr_ret_stack--;
260 /* 294 /*
@@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
267 return ret; 301 return ret;
268 } 302 }
269 303
270 /*
271 * The trace should run after decrementing the ret counter
272 * in case an interrupt were to come in. We don't want to
273 * lose the interrupt if max_depth is set.
274 */
275 ftrace_graph_return(&trace);
276
277 if (unlikely(!ret)) { 304 if (unlikely(!ret)) {
278 ftrace_graph_stop(); 305 ftrace_graph_stop();
279 WARN_ON(1); 306 WARN_ON(1);
@@ -482,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
482 int cpu; 509 int cpu;
483 int pc; 510 int pc;
484 511
512 ftrace_graph_addr_finish(trace);
513
485 local_irq_save(flags); 514 local_irq_save(flags);
486 cpu = raw_smp_processor_id(); 515 cpu = raw_smp_processor_id();
487 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 516 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -505,6 +534,8 @@ void set_graph_array(struct trace_array *tr)
505 534
506static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) 535static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
507{ 536{
537 ftrace_graph_addr_finish(trace);
538
508 if (tracing_thresh && 539 if (tracing_thresh &&
509 (trace->rettime - trace->calltime < tracing_thresh)) 540 (trace->rettime - trace->calltime < tracing_thresh))
510 return; 541 return;