aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2015-07-16 13:24:54 -0400
committerSteven Rostedt <rostedt@goodmis.org>2015-07-20 22:30:50 -0400
commit72ac426a5bb0cec572d26b4456f8c1e14601694e (patch)
tree637bfb15ecc273161b4a5d49e6abc7b136fca6ac /kernel/trace
parentd90fd77402d3de56a9ca3df04e5d868d0979dc59 (diff)
tracing: Clean up stack tracing and fix fentry updates
Akashi Takahiro was porting the stack tracer to arm64 and found some issues with it. One was that it repeats the top function, due to the stack frame added by the mcount caller and added by itself. This was added when fentry came in, and before fentry created its own stack frame. But x86's fentry now creates its own stack frame, and there's no need to insert the function again. This also cleans up the code a bit, where it doesn't need to do something special for fentry, and doesn't include insertion of a duplicate entry for the called function being traced. Link: http://lkml.kernel.org/r/55A646EE.6030402@linaro.org Some-suggestions-by: Jungseok Lee <jungseoklee85@gmail.com> Some-suggestions-by: Mark Rutland <mark.rutland@arm.com> Reported-by: AKASHI Takahiro <takahiro.akashi@linaro.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_stack.c68
1 files changed, 23 insertions, 45 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 3f34496244e9..b746399ab59c 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -18,12 +18,6 @@
18 18
19#define STACK_TRACE_ENTRIES 500 19#define STACK_TRACE_ENTRIES 500
20 20
21#ifdef CC_USING_FENTRY
22# define fentry 1
23#else
24# define fentry 0
25#endif
26
27static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 21static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
28 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; 22 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
29static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; 23static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
@@ -35,7 +29,7 @@ static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
35 */ 29 */
36static struct stack_trace max_stack_trace = { 30static struct stack_trace max_stack_trace = {
37 .max_entries = STACK_TRACE_ENTRIES - 1, 31 .max_entries = STACK_TRACE_ENTRIES - 1,
38 .entries = &stack_dump_trace[1], 32 .entries = &stack_dump_trace[0],
39}; 33};
40 34
41static unsigned long max_stack_size; 35static unsigned long max_stack_size;
@@ -55,7 +49,7 @@ static inline void print_max_stack(void)
55 49
56 pr_emerg(" Depth Size Location (%d entries)\n" 50 pr_emerg(" Depth Size Location (%d entries)\n"
57 " ----- ---- --------\n", 51 " ----- ---- --------\n",
58 max_stack_trace.nr_entries - 1); 52 max_stack_trace.nr_entries);
59 53
60 for (i = 0; i < max_stack_trace.nr_entries; i++) { 54 for (i = 0; i < max_stack_trace.nr_entries; i++) {
61 if (stack_dump_trace[i] == ULONG_MAX) 55 if (stack_dump_trace[i] == ULONG_MAX)
@@ -77,7 +71,7 @@ check_stack(unsigned long ip, unsigned long *stack)
77 unsigned long this_size, flags; unsigned long *p, *top, *start; 71 unsigned long this_size, flags; unsigned long *p, *top, *start;
78 static int tracer_frame; 72 static int tracer_frame;
79 int frame_size = ACCESS_ONCE(tracer_frame); 73 int frame_size = ACCESS_ONCE(tracer_frame);
80 int i; 74 int i, x;
81 75
82 this_size = ((unsigned long)stack) & (THREAD_SIZE-1); 76 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
83 this_size = THREAD_SIZE - this_size; 77 this_size = THREAD_SIZE - this_size;
@@ -105,26 +99,20 @@ check_stack(unsigned long ip, unsigned long *stack)
105 max_stack_size = this_size; 99 max_stack_size = this_size;
106 100
107 max_stack_trace.nr_entries = 0; 101 max_stack_trace.nr_entries = 0;
108 102 max_stack_trace.skip = 3;
109 if (using_ftrace_ops_list_func())
110 max_stack_trace.skip = 4;
111 else
112 max_stack_trace.skip = 3;
113 103
114 save_stack_trace(&max_stack_trace); 104 save_stack_trace(&max_stack_trace);
115 105
116 /* 106 /* Skip over the overhead of the stack tracer itself */
117 * Add the passed in ip from the function tracer. 107 for (i = 0; i < max_stack_trace.nr_entries; i++) {
118 * Searching for this on the stack will skip over 108 if (stack_dump_trace[i] == ip)
119 * most of the overhead from the stack tracer itself. 109 break;
120 */ 110 }
121 stack_dump_trace[0] = ip;
122 max_stack_trace.nr_entries++;
123 111
124 /* 112 /*
125 * Now find where in the stack these are. 113 * Now find where in the stack these are.
126 */ 114 */
127 i = 0; 115 x = 0;
128 start = stack; 116 start = stack;
129 top = (unsigned long *) 117 top = (unsigned long *)
130 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); 118 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
@@ -139,12 +127,15 @@ check_stack(unsigned long ip, unsigned long *stack)
139 while (i < max_stack_trace.nr_entries) { 127 while (i < max_stack_trace.nr_entries) {
140 int found = 0; 128 int found = 0;
141 129
142 stack_dump_index[i] = this_size; 130 stack_dump_index[x] = this_size;
143 p = start; 131 p = start;
144 132
145 for (; p < top && i < max_stack_trace.nr_entries; p++) { 133 for (; p < top && i < max_stack_trace.nr_entries; p++) {
134 if (stack_dump_trace[i] == ULONG_MAX)
135 break;
146 if (*p == stack_dump_trace[i]) { 136 if (*p == stack_dump_trace[i]) {
147 this_size = stack_dump_index[i++] = 137 stack_dump_trace[x] = stack_dump_trace[i++];
138 this_size = stack_dump_index[x++] =
148 (top - p) * sizeof(unsigned long); 139 (top - p) * sizeof(unsigned long);
149 found = 1; 140 found = 1;
150 /* Start the search from here */ 141 /* Start the search from here */
@@ -156,7 +147,7 @@ check_stack(unsigned long ip, unsigned long *stack)
156 * out what that is, then figure it out 147 * out what that is, then figure it out
157 * now. 148 * now.
158 */ 149 */
159 if (unlikely(!tracer_frame) && i == 1) { 150 if (unlikely(!tracer_frame)) {
160 tracer_frame = (p - stack) * 151 tracer_frame = (p - stack) *
161 sizeof(unsigned long); 152 sizeof(unsigned long);
162 max_stack_size -= tracer_frame; 153 max_stack_size -= tracer_frame;
@@ -168,6 +159,10 @@ check_stack(unsigned long ip, unsigned long *stack)
168 i++; 159 i++;
169 } 160 }
170 161
162 max_stack_trace.nr_entries = x;
163 for (; x < i; x++)
164 stack_dump_trace[x] = ULONG_MAX;
165
171 if (task_stack_end_corrupted(current)) { 166 if (task_stack_end_corrupted(current)) {
172 print_max_stack(); 167 print_max_stack();
173 BUG(); 168 BUG();
@@ -192,24 +187,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
192 if (per_cpu(trace_active, cpu)++ != 0) 187 if (per_cpu(trace_active, cpu)++ != 0)
193 goto out; 188 goto out;
194 189
195 /* 190 ip += MCOUNT_INSN_SIZE;
196 * When fentry is used, the traced function does not get
197 * its stack frame set up, and we lose the parent.
198 * The ip is pretty useless because the function tracer
199 * was called before that function set up its stack frame.
200 * In this case, we use the parent ip.
201 *
202 * By adding the return address of either the parent ip
203 * or the current ip we can disregard most of the stack usage
204 * caused by the stack tracer itself.
205 *
206 * The function tracer always reports the address of where the
207 * mcount call was, but the stack will hold the return address.
208 */
209 if (fentry)
210 ip = parent_ip;
211 else
212 ip += MCOUNT_INSN_SIZE;
213 191
214 check_stack(ip, &stack); 192 check_stack(ip, &stack);
215 193
@@ -284,7 +262,7 @@ __next(struct seq_file *m, loff_t *pos)
284{ 262{
285 long n = *pos - 1; 263 long n = *pos - 1;
286 264
287 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) 265 if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
288 return NULL; 266 return NULL;
289 267
290 m->private = (void *)n; 268 m->private = (void *)n;
@@ -354,7 +332,7 @@ static int t_show(struct seq_file *m, void *v)
354 seq_printf(m, " Depth Size Location" 332 seq_printf(m, " Depth Size Location"
355 " (%d entries)\n" 333 " (%d entries)\n"
356 " ----- ---- --------\n", 334 " ----- ---- --------\n",
357 max_stack_trace.nr_entries - 1); 335 max_stack_trace.nr_entries);
358 336
359 if (!stack_tracer_enabled && !max_stack_size) 337 if (!stack_tracer_enabled && !max_stack_size)
360 print_disabled(m); 338 print_disabled(m);