aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c67
1 files changed, 56 insertions, 11 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d28687e7b3a7..d2249abafb53 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -57,7 +57,8 @@ static struct tracer_flags tracer_flags = {
57 57
58/* Add a function return address to the trace stack on thread info.*/ 58/* Add a function return address to the trace stack on thread info.*/
59int 59int
60ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) 60ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
61 unsigned long frame_pointer)
61{ 62{
62 unsigned long long calltime; 63 unsigned long long calltime;
63 int index; 64 int index;
@@ -65,6 +66,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
65 if (!current->ret_stack) 66 if (!current->ret_stack)
66 return -EBUSY; 67 return -EBUSY;
67 68
69 /*
70 * We must make sure the ret_stack is tested before we read
71 * anything else.
72 */
73 smp_rmb();
74
68 /* The return trace stack is full */ 75 /* The return trace stack is full */
69 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { 76 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
70 atomic_inc(&current->trace_overrun); 77 atomic_inc(&current->trace_overrun);
@@ -78,14 +85,17 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
78 current->ret_stack[index].ret = ret; 85 current->ret_stack[index].ret = ret;
79 current->ret_stack[index].func = func; 86 current->ret_stack[index].func = func;
80 current->ret_stack[index].calltime = calltime; 87 current->ret_stack[index].calltime = calltime;
88 current->ret_stack[index].subtime = 0;
89 current->ret_stack[index].fp = frame_pointer;
81 *depth = index; 90 *depth = index;
82 91
83 return 0; 92 return 0;
84} 93}
85 94
86/* Retrieve a function return address to the trace stack on thread info.*/ 95/* Retrieve a function return address to the trace stack on thread info.*/
87void 96static void
88ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) 97ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
98 unsigned long frame_pointer)
89{ 99{
90 int index; 100 int index;
91 101
@@ -99,28 +109,52 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
99 return; 109 return;
100 } 110 }
101 111
112#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
113 /*
114 * The arch may choose to record the frame pointer used
115 * and check it here to make sure that it is what we expect it
116 * to be. If gcc does not set the place holder of the return
117 * address in the frame pointer, and does a copy instead, then
118 * the function graph trace will fail. This test detects this
119 * case.
120 *
121 * Currently, x86_32 with optimize for size (-Os) makes the latest
122 * gcc do the above.
123 */
124 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
125 ftrace_graph_stop();
126 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
127 " from func %pF return to %lx\n",
128 current->ret_stack[index].fp,
129 frame_pointer,
130 (void *)current->ret_stack[index].func,
131 current->ret_stack[index].ret);
132 *ret = (unsigned long)panic;
133 return;
134 }
135#endif
136
102 *ret = current->ret_stack[index].ret; 137 *ret = current->ret_stack[index].ret;
103 trace->func = current->ret_stack[index].func; 138 trace->func = current->ret_stack[index].func;
104 trace->calltime = current->ret_stack[index].calltime; 139 trace->calltime = current->ret_stack[index].calltime;
105 trace->overrun = atomic_read(&current->trace_overrun); 140 trace->overrun = atomic_read(&current->trace_overrun);
106 trace->depth = index; 141 trace->depth = index;
107 barrier();
108 current->curr_ret_stack--;
109
110} 142}
111 143
112/* 144/*
113 * Send the trace to the ring-buffer. 145 * Send the trace to the ring-buffer.
114 * @return the original return address. 146 * @return the original return address.
115 */ 147 */
116unsigned long ftrace_return_to_handler(void) 148unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
117{ 149{
118 struct ftrace_graph_ret trace; 150 struct ftrace_graph_ret trace;
119 unsigned long ret; 151 unsigned long ret;
120 152
121 ftrace_pop_return_trace(&trace, &ret); 153 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
122 trace.rettime = trace_clock_local(); 154 trace.rettime = trace_clock_local();
123 ftrace_graph_return(&trace); 155 ftrace_graph_return(&trace);
156 barrier();
157 current->curr_ret_stack--;
124 158
125 if (unlikely(!ret)) { 159 if (unlikely(!ret)) {
126 ftrace_graph_stop(); 160 ftrace_graph_stop();
@@ -426,8 +460,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
426 return TRACE_TYPE_HANDLED; 460 return TRACE_TYPE_HANDLED;
427} 461}
428 462
429static enum print_line_t 463enum print_line_t
430print_graph_duration(unsigned long long duration, struct trace_seq *s) 464trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
431{ 465{
432 unsigned long nsecs_rem = do_div(duration, 1000); 466 unsigned long nsecs_rem = do_div(duration, 1000);
433 /* log10(ULONG_MAX) + '\0' */ 467 /* log10(ULONG_MAX) + '\0' */
@@ -464,12 +498,23 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
464 if (!ret) 498 if (!ret)
465 return TRACE_TYPE_PARTIAL_LINE; 499 return TRACE_TYPE_PARTIAL_LINE;
466 } 500 }
501 return TRACE_TYPE_HANDLED;
502}
503
504static enum print_line_t
505print_graph_duration(unsigned long long duration, struct trace_seq *s)
506{
507 int ret;
508
509 ret = trace_print_graph_duration(duration, s);
510 if (ret != TRACE_TYPE_HANDLED)
511 return ret;
467 512
468 ret = trace_seq_printf(s, "| "); 513 ret = trace_seq_printf(s, "| ");
469 if (!ret) 514 if (!ret)
470 return TRACE_TYPE_PARTIAL_LINE; 515 return TRACE_TYPE_PARTIAL_LINE;
471 return TRACE_TYPE_HANDLED;
472 516
517 return TRACE_TYPE_HANDLED;
473} 518}
474 519
475/* Case of a leaf function on its call entry */ 520/* Case of a leaf function on its call entry */