diff options
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r-- | kernel/trace/trace_stack.c | 33 |
1 files changed, 29 insertions, 4 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 678a5120ee30..4c5dead0c239 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -110,12 +110,12 @@ static inline void check_stack(void) | |||
110 | static void | 110 | static void |
111 | stack_trace_call(unsigned long ip, unsigned long parent_ip) | 111 | stack_trace_call(unsigned long ip, unsigned long parent_ip) |
112 | { | 112 | { |
113 | int cpu, resched; | 113 | int cpu; |
114 | 114 | ||
115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
116 | return; | 116 | return; |
117 | 117 | ||
118 | resched = ftrace_preempt_disable(); | 118 | preempt_disable_notrace(); |
119 | 119 | ||
120 | cpu = raw_smp_processor_id(); | 120 | cpu = raw_smp_processor_id(); |
121 | /* no atomic needed, we only modify this variable by this cpu */ | 121 | /* no atomic needed, we only modify this variable by this cpu */ |
@@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
127 | out: | 127 | out: |
128 | per_cpu(trace_active, cpu)--; | 128 | per_cpu(trace_active, cpu)--; |
129 | /* prevent recursion in schedule */ | 129 | /* prevent recursion in schedule */ |
130 | ftrace_preempt_enable(resched); | 130 | preempt_enable_notrace(); |
131 | } | 131 | } |
132 | 132 | ||
133 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
157 | unsigned long val, flags; | 157 | unsigned long val, flags; |
158 | char buf[64]; | 158 | char buf[64]; |
159 | int ret; | 159 | int ret; |
160 | int cpu; | ||
160 | 161 | ||
161 | if (count >= sizeof(buf)) | 162 | if (count >= sizeof(buf)) |
162 | return -EINVAL; | 163 | return -EINVAL; |
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
171 | return ret; | 172 | return ret; |
172 | 173 | ||
173 | local_irq_save(flags); | 174 | local_irq_save(flags); |
175 | |||
176 | /* | ||
177 | * In case we trace inside arch_spin_lock() or after (NMI), | ||
178 | * we will cause circular lock, so we also need to increase | ||
179 | * the percpu trace_active here. | ||
180 | */ | ||
181 | cpu = smp_processor_id(); | ||
182 | per_cpu(trace_active, cpu)++; | ||
183 | |||
174 | arch_spin_lock(&max_stack_lock); | 184 | arch_spin_lock(&max_stack_lock); |
175 | *ptr = val; | 185 | *ptr = val; |
176 | arch_spin_unlock(&max_stack_lock); | 186 | arch_spin_unlock(&max_stack_lock); |
187 | |||
188 | per_cpu(trace_active, cpu)--; | ||
177 | local_irq_restore(flags); | 189 | local_irq_restore(flags); |
178 | 190 | ||
179 | return count; | 191 | return count; |
@@ -183,6 +195,7 @@ static const struct file_operations stack_max_size_fops = { | |||
183 | .open = tracing_open_generic, | 195 | .open = tracing_open_generic, |
184 | .read = stack_max_size_read, | 196 | .read = stack_max_size_read, |
185 | .write = stack_max_size_write, | 197 | .write = stack_max_size_write, |
198 | .llseek = default_llseek, | ||
186 | }; | 199 | }; |
187 | 200 | ||
188 | static void * | 201 | static void * |
@@ -206,7 +219,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
206 | 219 | ||
207 | static void *t_start(struct seq_file *m, loff_t *pos) | 220 | static void *t_start(struct seq_file *m, loff_t *pos) |
208 | { | 221 | { |
222 | int cpu; | ||
223 | |||
209 | local_irq_disable(); | 224 | local_irq_disable(); |
225 | |||
226 | cpu = smp_processor_id(); | ||
227 | per_cpu(trace_active, cpu)++; | ||
228 | |||
210 | arch_spin_lock(&max_stack_lock); | 229 | arch_spin_lock(&max_stack_lock); |
211 | 230 | ||
212 | if (*pos == 0) | 231 | if (*pos == 0) |
@@ -217,7 +236,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
217 | 236 | ||
218 | static void t_stop(struct seq_file *m, void *p) | 237 | static void t_stop(struct seq_file *m, void *p) |
219 | { | 238 | { |
239 | int cpu; | ||
240 | |||
220 | arch_spin_unlock(&max_stack_lock); | 241 | arch_spin_unlock(&max_stack_lock); |
242 | |||
243 | cpu = smp_processor_id(); | ||
244 | per_cpu(trace_active, cpu)--; | ||
245 | |||
221 | local_irq_enable(); | 246 | local_irq_enable(); |
222 | } | 247 | } |
223 | 248 | ||
@@ -225,7 +250,7 @@ static int trace_lookup_stack(struct seq_file *m, long i) | |||
225 | { | 250 | { |
226 | unsigned long addr = stack_dump_trace[i]; | 251 | unsigned long addr = stack_dump_trace[i]; |
227 | 252 | ||
228 | return seq_printf(m, "%pF\n", (void *)addr); | 253 | return seq_printf(m, "%pS\n", (void *)addr); |
229 | } | 254 | } |
230 | 255 | ||
231 | static void print_disabled(struct seq_file *m) | 256 | static void print_disabled(struct seq_file *m) |