diff options
Diffstat (limited to 'kernel/trace/trace_stack.c')
| -rw-r--r-- | kernel/trace/trace_stack.c | 45 |
1 files changed, 25 insertions, 20 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index be682b62fe58..0b863f2cbc8e 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -48,7 +48,7 @@ static inline void check_stack(void) | |||
| 48 | if (!object_is_on_stack(&this_size)) | 48 | if (!object_is_on_stack(&this_size)) |
| 49 | return; | 49 | return; |
| 50 | 50 | ||
| 51 | raw_local_irq_save(flags); | 51 | local_irq_save(flags); |
| 52 | __raw_spin_lock(&max_stack_lock); | 52 | __raw_spin_lock(&max_stack_lock); |
| 53 | 53 | ||
| 54 | /* a race could have already updated it */ | 54 | /* a race could have already updated it */ |
| @@ -78,6 +78,7 @@ static inline void check_stack(void) | |||
| 78 | * on a new max, so it is far from a fast path. | 78 | * on a new max, so it is far from a fast path. |
| 79 | */ | 79 | */ |
| 80 | while (i < max_stack_trace.nr_entries) { | 80 | while (i < max_stack_trace.nr_entries) { |
| 81 | int found = 0; | ||
| 81 | 82 | ||
| 82 | stack_dump_index[i] = this_size; | 83 | stack_dump_index[i] = this_size; |
| 83 | p = start; | 84 | p = start; |
| @@ -86,17 +87,19 @@ static inline void check_stack(void) | |||
| 86 | if (*p == stack_dump_trace[i]) { | 87 | if (*p == stack_dump_trace[i]) { |
| 87 | this_size = stack_dump_index[i++] = | 88 | this_size = stack_dump_index[i++] = |
| 88 | (top - p) * sizeof(unsigned long); | 89 | (top - p) * sizeof(unsigned long); |
| 90 | found = 1; | ||
| 89 | /* Start the search from here */ | 91 | /* Start the search from here */ |
| 90 | start = p + 1; | 92 | start = p + 1; |
| 91 | } | 93 | } |
| 92 | } | 94 | } |
| 93 | 95 | ||
| 94 | i++; | 96 | if (!found) |
| 97 | i++; | ||
| 95 | } | 98 | } |
| 96 | 99 | ||
| 97 | out: | 100 | out: |
| 98 | __raw_spin_unlock(&max_stack_lock); | 101 | __raw_spin_unlock(&max_stack_lock); |
| 99 | raw_local_irq_restore(flags); | 102 | local_irq_restore(flags); |
| 100 | } | 103 | } |
| 101 | 104 | ||
| 102 | static void | 105 | static void |
| @@ -107,8 +110,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 110 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
| 108 | return; | 111 | return; |
| 109 | 112 | ||
| 110 | resched = need_resched(); | 113 | resched = ftrace_preempt_disable(); |
| 111 | preempt_disable_notrace(); | ||
| 112 | 114 | ||
| 113 | cpu = raw_smp_processor_id(); | 115 | cpu = raw_smp_processor_id(); |
| 114 | /* no atomic needed, we only modify this variable by this cpu */ | 116 | /* no atomic needed, we only modify this variable by this cpu */ |
| @@ -120,10 +122,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 120 | out: | 122 | out: |
| 121 | per_cpu(trace_active, cpu)--; | 123 | per_cpu(trace_active, cpu)--; |
| 122 | /* prevent recursion in schedule */ | 124 | /* prevent recursion in schedule */ |
| 123 | if (resched) | 125 | ftrace_preempt_enable(resched); |
| 124 | preempt_enable_no_resched_notrace(); | ||
| 125 | else | ||
| 126 | preempt_enable_notrace(); | ||
| 127 | } | 126 | } |
| 128 | 127 | ||
| 129 | static struct ftrace_ops trace_ops __read_mostly = | 128 | static struct ftrace_ops trace_ops __read_mostly = |
| @@ -166,11 +165,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 166 | if (ret < 0) | 165 | if (ret < 0) |
| 167 | return ret; | 166 | return ret; |
| 168 | 167 | ||
| 169 | raw_local_irq_save(flags); | 168 | local_irq_save(flags); |
| 170 | __raw_spin_lock(&max_stack_lock); | 169 | __raw_spin_lock(&max_stack_lock); |
| 171 | *ptr = val; | 170 | *ptr = val; |
| 172 | __raw_spin_unlock(&max_stack_lock); | 171 | __raw_spin_unlock(&max_stack_lock); |
| 173 | raw_local_irq_restore(flags); | 172 | local_irq_restore(flags); |
| 174 | 173 | ||
| 175 | return count; | 174 | return count; |
| 176 | } | 175 | } |
| @@ -184,11 +183,16 @@ static struct file_operations stack_max_size_fops = { | |||
| 184 | static void * | 183 | static void * |
| 185 | t_next(struct seq_file *m, void *v, loff_t *pos) | 184 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 186 | { | 185 | { |
| 187 | long i = (long)m->private; | 186 | long i; |
| 188 | 187 | ||
| 189 | (*pos)++; | 188 | (*pos)++; |
| 190 | 189 | ||
| 191 | i++; | 190 | if (v == SEQ_START_TOKEN) |
| 191 | i = 0; | ||
| 192 | else { | ||
| 193 | i = *(long *)v; | ||
| 194 | i++; | ||
| 195 | } | ||
| 192 | 196 | ||
| 193 | if (i >= max_stack_trace.nr_entries || | 197 | if (i >= max_stack_trace.nr_entries || |
| 194 | stack_dump_trace[i] == ULONG_MAX) | 198 | stack_dump_trace[i] == ULONG_MAX) |
| @@ -201,12 +205,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 201 | 205 | ||
| 202 | static void *t_start(struct seq_file *m, loff_t *pos) | 206 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 203 | { | 207 | { |
| 204 | void *t = &m->private; | 208 | void *t = SEQ_START_TOKEN; |
| 205 | loff_t l = 0; | 209 | loff_t l = 0; |
| 206 | 210 | ||
| 207 | local_irq_disable(); | 211 | local_irq_disable(); |
| 208 | __raw_spin_lock(&max_stack_lock); | 212 | __raw_spin_lock(&max_stack_lock); |
| 209 | 213 | ||
| 214 | if (*pos == 0) | ||
| 215 | return SEQ_START_TOKEN; | ||
| 216 | |||
| 210 | for (; t && l < *pos; t = t_next(m, t, &l)) | 217 | for (; t && l < *pos; t = t_next(m, t, &l)) |
| 211 | ; | 218 | ; |
| 212 | 219 | ||
| @@ -235,10 +242,10 @@ static int trace_lookup_stack(struct seq_file *m, long i) | |||
| 235 | 242 | ||
| 236 | static int t_show(struct seq_file *m, void *v) | 243 | static int t_show(struct seq_file *m, void *v) |
| 237 | { | 244 | { |
| 238 | long i = *(long *)v; | 245 | long i; |
| 239 | int size; | 246 | int size; |
| 240 | 247 | ||
| 241 | if (i < 0) { | 248 | if (v == SEQ_START_TOKEN) { |
| 242 | seq_printf(m, " Depth Size Location" | 249 | seq_printf(m, " Depth Size Location" |
| 243 | " (%d entries)\n" | 250 | " (%d entries)\n" |
| 244 | " ----- ---- --------\n", | 251 | " ----- ---- --------\n", |
| @@ -246,6 +253,8 @@ static int t_show(struct seq_file *m, void *v) | |||
| 246 | return 0; | 253 | return 0; |
| 247 | } | 254 | } |
| 248 | 255 | ||
| 256 | i = *(long *)v; | ||
| 257 | |||
| 249 | if (i >= max_stack_trace.nr_entries || | 258 | if (i >= max_stack_trace.nr_entries || |
| 250 | stack_dump_trace[i] == ULONG_MAX) | 259 | stack_dump_trace[i] == ULONG_MAX) |
| 251 | return 0; | 260 | return 0; |
| @@ -275,10 +284,6 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
| 275 | int ret; | 284 | int ret; |
| 276 | 285 | ||
| 277 | ret = seq_open(file, &stack_trace_seq_ops); | 286 | ret = seq_open(file, &stack_trace_seq_ops); |
| 278 | if (!ret) { | ||
| 279 | struct seq_file *m = file->private_data; | ||
| 280 | m->private = (void *)-1; | ||
| 281 | } | ||
| 282 | 287 | ||
| 283 | return ret; | 288 | return ret; |
| 284 | } | 289 | } |
