aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_stack.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r--kernel/trace/trace_stack.c85
1 files changed, 27 insertions, 58 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index eec648a0d673..5d16f73898db 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -18,44 +18,32 @@
18 18
19#include "trace.h" 19#include "trace.h"
20 20
21static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 21#define STACK_TRACE_ENTRIES 500
22 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
23unsigned stack_trace_index[STACK_TRACE_ENTRIES];
24 22
25/* 23static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
26 * Reserve one entry for the passed in ip. This will allow 24static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
27 * us to remove most or all of the stack size overhead
28 * added by the stack tracer itself.
29 */
30struct stack_trace stack_trace_max = {
31 .max_entries = STACK_TRACE_ENTRIES - 1,
32 .entries = &stack_dump_trace[0],
33};
34 25
35unsigned long stack_trace_max_size; 26static unsigned int stack_trace_nr_entries;
36arch_spinlock_t stack_trace_max_lock = 27static unsigned long stack_trace_max_size;
28static arch_spinlock_t stack_trace_max_lock =
37 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 29 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
38 30
39DEFINE_PER_CPU(int, disable_stack_tracer); 31DEFINE_PER_CPU(int, disable_stack_tracer);
40static DEFINE_MUTEX(stack_sysctl_mutex); 32static DEFINE_MUTEX(stack_sysctl_mutex);
41 33
42int stack_tracer_enabled; 34int stack_tracer_enabled;
43static int last_stack_tracer_enabled;
44 35
45void stack_trace_print(void) 36static void print_max_stack(void)
46{ 37{
47 long i; 38 long i;
48 int size; 39 int size;
49 40
50 pr_emerg(" Depth Size Location (%d entries)\n" 41 pr_emerg(" Depth Size Location (%d entries)\n"
51 " ----- ---- --------\n", 42 " ----- ---- --------\n",
52 stack_trace_max.nr_entries); 43 stack_trace_nr_entries);
53 44
54 for (i = 0; i < stack_trace_max.nr_entries; i++) { 45 for (i = 0; i < stack_trace_nr_entries; i++) {
55 if (stack_dump_trace[i] == ULONG_MAX) 46 if (i + 1 == stack_trace_nr_entries)
56 break;
57 if (i+1 == stack_trace_max.nr_entries ||
58 stack_dump_trace[i+1] == ULONG_MAX)
59 size = stack_trace_index[i]; 47 size = stack_trace_index[i];
60 else 48 else
61 size = stack_trace_index[i] - stack_trace_index[i+1]; 49 size = stack_trace_index[i] - stack_trace_index[i+1];
@@ -65,16 +53,7 @@ void stack_trace_print(void)
65 } 53 }
66} 54}
67 55
68/* 56static void check_stack(unsigned long ip, unsigned long *stack)
69 * When arch-specific code overrides this function, the following
70 * data should be filled up, assuming stack_trace_max_lock is held to
71 * prevent concurrent updates.
72 * stack_trace_index[]
73 * stack_trace_max
74 * stack_trace_max_size
75 */
76void __weak
77check_stack(unsigned long ip, unsigned long *stack)
78{ 57{
79 unsigned long this_size, flags; unsigned long *p, *top, *start; 58 unsigned long this_size, flags; unsigned long *p, *top, *start;
80 static int tracer_frame; 59 static int tracer_frame;
@@ -110,13 +89,12 @@ check_stack(unsigned long ip, unsigned long *stack)
110 89
111 stack_trace_max_size = this_size; 90 stack_trace_max_size = this_size;
112 91
113 stack_trace_max.nr_entries = 0; 92 stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
114 stack_trace_max.skip = 0; 93 ARRAY_SIZE(stack_dump_trace) - 1,
115 94 0);
116 save_stack_trace(&stack_trace_max);
117 95
118 /* Skip over the overhead of the stack tracer itself */ 96 /* Skip over the overhead of the stack tracer itself */
119 for (i = 0; i < stack_trace_max.nr_entries; i++) { 97 for (i = 0; i < stack_trace_nr_entries; i++) {
120 if (stack_dump_trace[i] == ip) 98 if (stack_dump_trace[i] == ip)
121 break; 99 break;
122 } 100 }
@@ -125,7 +103,7 @@ check_stack(unsigned long ip, unsigned long *stack)
125 * Some archs may not have the passed in ip in the dump. 103 * Some archs may not have the passed in ip in the dump.
126 * If that happens, we need to show everything. 104 * If that happens, we need to show everything.
127 */ 105 */
128 if (i == stack_trace_max.nr_entries) 106 if (i == stack_trace_nr_entries)
129 i = 0; 107 i = 0;
130 108
131 /* 109 /*
@@ -143,15 +121,13 @@ check_stack(unsigned long ip, unsigned long *stack)
143 * loop will only happen once. This code only takes place 121 * loop will only happen once. This code only takes place
144 * on a new max, so it is far from a fast path. 122 * on a new max, so it is far from a fast path.
145 */ 123 */
146 while (i < stack_trace_max.nr_entries) { 124 while (i < stack_trace_nr_entries) {
147 int found = 0; 125 int found = 0;
148 126
149 stack_trace_index[x] = this_size; 127 stack_trace_index[x] = this_size;
150 p = start; 128 p = start;
151 129
152 for (; p < top && i < stack_trace_max.nr_entries; p++) { 130 for (; p < top && i < stack_trace_nr_entries; p++) {
153 if (stack_dump_trace[i] == ULONG_MAX)
154 break;
155 /* 131 /*
156 * The READ_ONCE_NOCHECK is used to let KASAN know that 132 * The READ_ONCE_NOCHECK is used to let KASAN know that
157 * this is not a stack-out-of-bounds error. 133 * this is not a stack-out-of-bounds error.
@@ -182,12 +158,10 @@ check_stack(unsigned long ip, unsigned long *stack)
182 i++; 158 i++;
183 } 159 }
184 160
185 stack_trace_max.nr_entries = x; 161 stack_trace_nr_entries = x;
186 for (; x < i; x++)
187 stack_dump_trace[x] = ULONG_MAX;
188 162
189 if (task_stack_end_corrupted(current)) { 163 if (task_stack_end_corrupted(current)) {
190 stack_trace_print(); 164 print_max_stack();
191 BUG(); 165 BUG();
192 } 166 }
193 167
@@ -286,7 +260,7 @@ __next(struct seq_file *m, loff_t *pos)
286{ 260{
287 long n = *pos - 1; 261 long n = *pos - 1;
288 262
289 if (n >= stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) 263 if (n >= stack_trace_nr_entries)
290 return NULL; 264 return NULL;
291 265
292 m->private = (void *)n; 266 m->private = (void *)n;
@@ -350,7 +324,7 @@ static int t_show(struct seq_file *m, void *v)
350 seq_printf(m, " Depth Size Location" 324 seq_printf(m, " Depth Size Location"
351 " (%d entries)\n" 325 " (%d entries)\n"
352 " ----- ---- --------\n", 326 " ----- ---- --------\n",
353 stack_trace_max.nr_entries); 327 stack_trace_nr_entries);
354 328
355 if (!stack_tracer_enabled && !stack_trace_max_size) 329 if (!stack_tracer_enabled && !stack_trace_max_size)
356 print_disabled(m); 330 print_disabled(m);
@@ -360,12 +334,10 @@ static int t_show(struct seq_file *m, void *v)
360 334
361 i = *(long *)v; 335 i = *(long *)v;
362 336
363 if (i >= stack_trace_max.nr_entries || 337 if (i >= stack_trace_nr_entries)
364 stack_dump_trace[i] == ULONG_MAX)
365 return 0; 338 return 0;
366 339
367 if (i+1 == stack_trace_max.nr_entries || 340 if (i + 1 == stack_trace_nr_entries)
368 stack_dump_trace[i+1] == ULONG_MAX)
369 size = stack_trace_index[i]; 341 size = stack_trace_index[i];
370 else 342 else
371 size = stack_trace_index[i] - stack_trace_index[i+1]; 343 size = stack_trace_index[i] - stack_trace_index[i+1];
@@ -422,23 +394,21 @@ stack_trace_sysctl(struct ctl_table *table, int write,
422 void __user *buffer, size_t *lenp, 394 void __user *buffer, size_t *lenp,
423 loff_t *ppos) 395 loff_t *ppos)
424{ 396{
397 int was_enabled;
425 int ret; 398 int ret;
426 399
427 mutex_lock(&stack_sysctl_mutex); 400 mutex_lock(&stack_sysctl_mutex);
401 was_enabled = !!stack_tracer_enabled;
428 402
429 ret = proc_dointvec(table, write, buffer, lenp, ppos); 403 ret = proc_dointvec(table, write, buffer, lenp, ppos);
430 404
431 if (ret || !write || 405 if (ret || !write || (was_enabled == !!stack_tracer_enabled))
432 (last_stack_tracer_enabled == !!stack_tracer_enabled))
433 goto out; 406 goto out;
434 407
435 last_stack_tracer_enabled = !!stack_tracer_enabled;
436
437 if (stack_tracer_enabled) 408 if (stack_tracer_enabled)
438 register_ftrace_function(&trace_ops); 409 register_ftrace_function(&trace_ops);
439 else 410 else
440 unregister_ftrace_function(&trace_ops); 411 unregister_ftrace_function(&trace_ops);
441
442 out: 412 out:
443 mutex_unlock(&stack_sysctl_mutex); 413 mutex_unlock(&stack_sysctl_mutex);
444 return ret; 414 return ret;
@@ -454,7 +424,6 @@ static __init int enable_stacktrace(char *str)
454 strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE); 424 strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
455 425
456 stack_tracer_enabled = 1; 426 stack_tracer_enabled = 1;
457 last_stack_tracer_enabled = 1;
458 return 1; 427 return 1;
459} 428}
460__setup("stacktrace", enable_stacktrace); 429__setup("stacktrace", enable_stacktrace);