diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 78 |
1 files changed, 65 insertions, 13 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d28687e7b3a7..420ec3487579 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -57,7 +57,8 @@ static struct tracer_flags tracer_flags = { | |||
| 57 | 57 | ||
| 58 | /* Add a function return address to the trace stack on thread info.*/ | 58 | /* Add a function return address to the trace stack on thread info.*/ |
| 59 | int | 59 | int |
| 60 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | 60 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
| 61 | unsigned long frame_pointer) | ||
| 61 | { | 62 | { |
| 62 | unsigned long long calltime; | 63 | unsigned long long calltime; |
| 63 | int index; | 64 | int index; |
| @@ -65,6 +66,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
| 65 | if (!current->ret_stack) | 66 | if (!current->ret_stack) |
| 66 | return -EBUSY; | 67 | return -EBUSY; |
| 67 | 68 | ||
| 69 | /* | ||
| 70 | * We must make sure the ret_stack is tested before we read | ||
| 71 | * anything else. | ||
| 72 | */ | ||
| 73 | smp_rmb(); | ||
| 74 | |||
| 68 | /* The return trace stack is full */ | 75 | /* The return trace stack is full */ |
| 69 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | 76 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
| 70 | atomic_inc(¤t->trace_overrun); | 77 | atomic_inc(¤t->trace_overrun); |
| @@ -78,14 +85,17 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
| 78 | current->ret_stack[index].ret = ret; | 85 | current->ret_stack[index].ret = ret; |
| 79 | current->ret_stack[index].func = func; | 86 | current->ret_stack[index].func = func; |
| 80 | current->ret_stack[index].calltime = calltime; | 87 | current->ret_stack[index].calltime = calltime; |
| 88 | current->ret_stack[index].subtime = 0; | ||
| 89 | current->ret_stack[index].fp = frame_pointer; | ||
| 81 | *depth = index; | 90 | *depth = index; |
| 82 | 91 | ||
| 83 | return 0; | 92 | return 0; |
| 84 | } | 93 | } |
| 85 | 94 | ||
| 86 | /* Retrieve a function return address to the trace stack on thread info.*/ | 95 | /* Retrieve a function return address to the trace stack on thread info.*/ |
| 87 | void | 96 | static void |
| 88 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | 97 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
| 98 | unsigned long frame_pointer) | ||
| 89 | { | 99 | { |
| 90 | int index; | 100 | int index; |
| 91 | 101 | ||
| @@ -99,28 +109,52 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | |||
| 99 | return; | 109 | return; |
| 100 | } | 110 | } |
| 101 | 111 | ||
| 112 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | ||
| 113 | /* | ||
| 114 | * The arch may choose to record the frame pointer used | ||
| 115 | * and check it here to make sure that it is what we expect it | ||
| 116 | * to be. If gcc does not set the place holder of the return | ||
| 117 | * address in the frame pointer, and does a copy instead, then | ||
| 118 | * the function graph trace will fail. This test detects this | ||
| 119 | * case. | ||
| 120 | * | ||
| 121 | * Currently, x86_32 with optimize for size (-Os) makes the latest | ||
| 122 | * gcc do the above. | ||
| 123 | */ | ||
| 124 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | ||
| 125 | ftrace_graph_stop(); | ||
| 126 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | ||
| 127 | " from func %pF return to %lx\n", | ||
| 128 | current->ret_stack[index].fp, | ||
| 129 | frame_pointer, | ||
| 130 | (void *)current->ret_stack[index].func, | ||
| 131 | current->ret_stack[index].ret); | ||
| 132 | *ret = (unsigned long)panic; | ||
| 133 | return; | ||
| 134 | } | ||
| 135 | #endif | ||
| 136 | |||
| 102 | *ret = current->ret_stack[index].ret; | 137 | *ret = current->ret_stack[index].ret; |
| 103 | trace->func = current->ret_stack[index].func; | 138 | trace->func = current->ret_stack[index].func; |
| 104 | trace->calltime = current->ret_stack[index].calltime; | 139 | trace->calltime = current->ret_stack[index].calltime; |
| 105 | trace->overrun = atomic_read(¤t->trace_overrun); | 140 | trace->overrun = atomic_read(¤t->trace_overrun); |
| 106 | trace->depth = index; | 141 | trace->depth = index; |
| 107 | barrier(); | ||
| 108 | current->curr_ret_stack--; | ||
| 109 | |||
| 110 | } | 142 | } |
| 111 | 143 | ||
| 112 | /* | 144 | /* |
| 113 | * Send the trace to the ring-buffer. | 145 | * Send the trace to the ring-buffer. |
| 114 | * @return the original return address. | 146 | * @return the original return address. |
| 115 | */ | 147 | */ |
| 116 | unsigned long ftrace_return_to_handler(void) | 148 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
| 117 | { | 149 | { |
| 118 | struct ftrace_graph_ret trace; | 150 | struct ftrace_graph_ret trace; |
| 119 | unsigned long ret; | 151 | unsigned long ret; |
| 120 | 152 | ||
| 121 | ftrace_pop_return_trace(&trace, &ret); | 153 | ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
| 122 | trace.rettime = trace_clock_local(); | 154 | trace.rettime = trace_clock_local(); |
| 123 | ftrace_graph_return(&trace); | 155 | ftrace_graph_return(&trace); |
| 156 | barrier(); | ||
| 157 | current->curr_ret_stack--; | ||
| 124 | 158 | ||
| 125 | if (unlikely(!ret)) { | 159 | if (unlikely(!ret)) { |
| 126 | ftrace_graph_stop(); | 160 | ftrace_graph_stop(); |
| @@ -426,8 +460,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
| 426 | return TRACE_TYPE_HANDLED; | 460 | return TRACE_TYPE_HANDLED; |
| 427 | } | 461 | } |
| 428 | 462 | ||
| 429 | static enum print_line_t | 463 | enum print_line_t |
| 430 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | 464 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
| 431 | { | 465 | { |
| 432 | unsigned long nsecs_rem = do_div(duration, 1000); | 466 | unsigned long nsecs_rem = do_div(duration, 1000); |
| 433 | /* log10(ULONG_MAX) + '\0' */ | 467 | /* log10(ULONG_MAX) + '\0' */ |
| @@ -464,12 +498,23 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
| 464 | if (!ret) | 498 | if (!ret) |
| 465 | return TRACE_TYPE_PARTIAL_LINE; | 499 | return TRACE_TYPE_PARTIAL_LINE; |
| 466 | } | 500 | } |
| 501 | return TRACE_TYPE_HANDLED; | ||
| 502 | } | ||
| 503 | |||
| 504 | static enum print_line_t | ||
| 505 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | ||
| 506 | { | ||
| 507 | int ret; | ||
| 508 | |||
| 509 | ret = trace_print_graph_duration(duration, s); | ||
| 510 | if (ret != TRACE_TYPE_HANDLED) | ||
| 511 | return ret; | ||
| 467 | 512 | ||
| 468 | ret = trace_seq_printf(s, "| "); | 513 | ret = trace_seq_printf(s, "| "); |
| 469 | if (!ret) | 514 | if (!ret) |
| 470 | return TRACE_TYPE_PARTIAL_LINE; | 515 | return TRACE_TYPE_PARTIAL_LINE; |
| 471 | return TRACE_TYPE_HANDLED; | ||
| 472 | 516 | ||
| 517 | return TRACE_TYPE_HANDLED; | ||
| 473 | } | 518 | } |
| 474 | 519 | ||
| 475 | /* Case of a leaf function on its call entry */ | 520 | /* Case of a leaf function on its call entry */ |
| @@ -798,9 +843,16 @@ print_graph_function(struct trace_iterator *iter) | |||
| 798 | 843 | ||
| 799 | switch (entry->type) { | 844 | switch (entry->type) { |
| 800 | case TRACE_GRAPH_ENT: { | 845 | case TRACE_GRAPH_ENT: { |
| 801 | struct ftrace_graph_ent_entry *field; | 846 | /* |
| 847 | * print_graph_entry() may consume the current event, | ||
| 848 | * thus @field may become invalid, so we need to save it. | ||
| 849 | * sizeof(struct ftrace_graph_ent_entry) is very small, | ||
| 850 | * it can be safely saved at the stack. | ||
| 851 | */ | ||
| 852 | struct ftrace_graph_ent_entry *field, saved; | ||
| 802 | trace_assign_type(field, entry); | 853 | trace_assign_type(field, entry); |
| 803 | return print_graph_entry(field, s, iter); | 854 | saved = *field; |
| 855 | return print_graph_entry(&saved, s, iter); | ||
| 804 | } | 856 | } |
| 805 | case TRACE_GRAPH_RET: { | 857 | case TRACE_GRAPH_RET: { |
| 806 | struct ftrace_graph_ret_entry *field; | 858 | struct ftrace_graph_ret_entry *field; |
