diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 58 |
1 files changed, 46 insertions, 12 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 23c0b0cb5fb9..086af4f5c3e8 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration, | |||
118 | struct trace_seq *s, u32 flags); | 118 | struct trace_seq *s, u32 flags); |
119 | 119 | ||
120 | /* Add a function return address to the trace stack on thread info.*/ | 120 | /* Add a function return address to the trace stack on thread info.*/ |
121 | int | 121 | static int |
122 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | 122 | ftrace_push_return_trace(unsigned long ret, unsigned long func, |
123 | unsigned long frame_pointer, unsigned long *retp) | 123 | unsigned long frame_pointer, unsigned long *retp) |
124 | { | 124 | { |
125 | unsigned long long calltime; | 125 | unsigned long long calltime; |
@@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | |||
177 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR | 177 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR |
178 | current->ret_stack[index].retp = retp; | 178 | current->ret_stack[index].retp = retp; |
179 | #endif | 179 | #endif |
180 | *depth = current->curr_ret_stack; | 180 | return 0; |
181 | } | ||
182 | |||
183 | int function_graph_enter(unsigned long ret, unsigned long func, | ||
184 | unsigned long frame_pointer, unsigned long *retp) | ||
185 | { | ||
186 | struct ftrace_graph_ent trace; | ||
187 | |||
188 | trace.func = func; | ||
189 | trace.depth = ++current->curr_ret_depth; | ||
190 | |||
191 | if (ftrace_push_return_trace(ret, func, | ||
192 | frame_pointer, retp)) | ||
193 | goto out; | ||
194 | |||
195 | /* Only trace if the calling function expects to */ | ||
196 | if (!ftrace_graph_entry(&trace)) | ||
197 | goto out_ret; | ||
181 | 198 | ||
182 | return 0; | 199 | return 0; |
200 | out_ret: | ||
201 | current->curr_ret_stack--; | ||
202 | out: | ||
203 | current->curr_ret_depth--; | ||
204 | return -EBUSY; | ||
183 | } | 205 | } |
184 | 206 | ||
185 | /* Retrieve a function return address to the trace stack on thread info.*/ | 207 | /* Retrieve a function return address to the trace stack on thread info.*/ |
@@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | |||
241 | trace->func = current->ret_stack[index].func; | 263 | trace->func = current->ret_stack[index].func; |
242 | trace->calltime = current->ret_stack[index].calltime; | 264 | trace->calltime = current->ret_stack[index].calltime; |
243 | trace->overrun = atomic_read(¤t->trace_overrun); | 265 | trace->overrun = atomic_read(¤t->trace_overrun); |
244 | trace->depth = index; | 266 | trace->depth = current->curr_ret_depth--; |
267 | /* | ||
268 | * We still want to trace interrupts coming in if | ||
269 | * max_depth is set to 1. Make sure the decrement is | ||
270 | * seen before ftrace_graph_return. | ||
271 | */ | ||
272 | barrier(); | ||
245 | } | 273 | } |
246 | 274 | ||
247 | /* | 275 | /* |
@@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |||
255 | 283 | ||
256 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); | 284 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
257 | trace.rettime = trace_clock_local(); | 285 | trace.rettime = trace_clock_local(); |
286 | ftrace_graph_return(&trace); | ||
287 | /* | ||
288 | * The ftrace_graph_return() may still access the current | ||
289 | * ret_stack structure, we need to make sure the update of | ||
290 | * curr_ret_stack is after that. | ||
291 | */ | ||
258 | barrier(); | 292 | barrier(); |
259 | current->curr_ret_stack--; | 293 | current->curr_ret_stack--; |
260 | /* | 294 | /* |
@@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |||
267 | return ret; | 301 | return ret; |
268 | } | 302 | } |
269 | 303 | ||
270 | /* | ||
271 | * The trace should run after decrementing the ret counter | ||
272 | * in case an interrupt were to come in. We don't want to | ||
273 | * lose the interrupt if max_depth is set. | ||
274 | */ | ||
275 | ftrace_graph_return(&trace); | ||
276 | |||
277 | if (unlikely(!ret)) { | 304 | if (unlikely(!ret)) { |
278 | ftrace_graph_stop(); | 305 | ftrace_graph_stop(); |
279 | WARN_ON(1); | 306 | WARN_ON(1); |
@@ -482,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
482 | int cpu; | 509 | int cpu; |
483 | int pc; | 510 | int pc; |
484 | 511 | ||
512 | ftrace_graph_addr_finish(trace); | ||
513 | |||
485 | local_irq_save(flags); | 514 | local_irq_save(flags); |
486 | cpu = raw_smp_processor_id(); | 515 | cpu = raw_smp_processor_id(); |
487 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); | 516 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
@@ -505,6 +534,8 @@ void set_graph_array(struct trace_array *tr) | |||
505 | 534 | ||
506 | static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) | 535 | static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
507 | { | 536 | { |
537 | ftrace_graph_addr_finish(trace); | ||
538 | |||
508 | if (tracing_thresh && | 539 | if (tracing_thresh && |
509 | (trace->rettime - trace->calltime < tracing_thresh)) | 540 | (trace->rettime - trace->calltime < tracing_thresh)) |
510 | return; | 541 | return; |
@@ -831,6 +862,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
831 | struct ftrace_graph_ret *graph_ret; | 862 | struct ftrace_graph_ret *graph_ret; |
832 | struct ftrace_graph_ent *call; | 863 | struct ftrace_graph_ent *call; |
833 | unsigned long long duration; | 864 | unsigned long long duration; |
865 | int cpu = iter->cpu; | ||
834 | int i; | 866 | int i; |
835 | 867 | ||
836 | graph_ret = &ret_entry->ret; | 868 | graph_ret = &ret_entry->ret; |
@@ -839,7 +871,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
839 | 871 | ||
840 | if (data) { | 872 | if (data) { |
841 | struct fgraph_cpu_data *cpu_data; | 873 | struct fgraph_cpu_data *cpu_data; |
842 | int cpu = iter->cpu; | ||
843 | 874 | ||
844 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 875 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
845 | 876 | ||
@@ -869,6 +900,9 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
869 | 900 | ||
870 | trace_seq_printf(s, "%ps();\n", (void *)call->func); | 901 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
871 | 902 | ||
903 | print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, | ||
904 | cpu, iter->ent->pid, flags); | ||
905 | |||
872 | return trace_handle_return(s); | 906 | return trace_handle_return(s); |
873 | } | 907 | } |
874 | 908 | ||