aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_stack.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r--kernel/trace/trace_stack.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 5fb1f2c87e6b..76aa04d4c925 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -35,7 +35,7 @@ unsigned long stack_trace_max_size;
35arch_spinlock_t stack_trace_max_lock = 35arch_spinlock_t stack_trace_max_lock =
36 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 36 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static DEFINE_PER_CPU(int, trace_active); 38DEFINE_PER_CPU(int, disable_stack_tracer);
39static DEFINE_MUTEX(stack_sysctl_mutex); 39static DEFINE_MUTEX(stack_sysctl_mutex);
40 40
41int stack_tracer_enabled; 41int stack_tracer_enabled;
@@ -96,6 +96,14 @@ check_stack(unsigned long ip, unsigned long *stack)
96 if (in_nmi()) 96 if (in_nmi())
97 return; 97 return;
98 98
99 /*
100 * There's a slight chance that we are tracing inside the
101 * RCU infrastructure, and rcu_irq_enter() will not work
102 * as expected.
103 */
104 if (unlikely(rcu_irq_enter_disabled()))
105 return;
106
99 local_irq_save(flags); 107 local_irq_save(flags);
100 arch_spin_lock(&stack_trace_max_lock); 108 arch_spin_lock(&stack_trace_max_lock);
101 109
@@ -207,13 +215,12 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
207 struct ftrace_ops *op, struct pt_regs *pt_regs) 215 struct ftrace_ops *op, struct pt_regs *pt_regs)
208{ 216{
209 unsigned long stack; 217 unsigned long stack;
210 int cpu;
211 218
212 preempt_disable_notrace(); 219 preempt_disable_notrace();
213 220
214 cpu = raw_smp_processor_id();
215 /* no atomic needed, we only modify this variable by this cpu */ 221 /* no atomic needed, we only modify this variable by this cpu */
216 if (per_cpu(trace_active, cpu)++ != 0) 222 __this_cpu_inc(disable_stack_tracer);
223 if (__this_cpu_read(disable_stack_tracer) != 1)
217 goto out; 224 goto out;
218 225
219 ip += MCOUNT_INSN_SIZE; 226 ip += MCOUNT_INSN_SIZE;
@@ -221,7 +228,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
221 check_stack(ip, &stack); 228 check_stack(ip, &stack);
222 229
223 out: 230 out:
224 per_cpu(trace_active, cpu)--; 231 __this_cpu_dec(disable_stack_tracer);
225 /* prevent recursion in schedule */ 232 /* prevent recursion in schedule */
226 preempt_enable_notrace(); 233 preempt_enable_notrace();
227} 234}
@@ -253,7 +260,6 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
253 long *ptr = filp->private_data; 260 long *ptr = filp->private_data;
254 unsigned long val, flags; 261 unsigned long val, flags;
255 int ret; 262 int ret;
256 int cpu;
257 263
258 ret = kstrtoul_from_user(ubuf, count, 10, &val); 264 ret = kstrtoul_from_user(ubuf, count, 10, &val);
259 if (ret) 265 if (ret)
@@ -264,16 +270,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
264 /* 270 /*
265 * In case we trace inside arch_spin_lock() or after (NMI), 271 * In case we trace inside arch_spin_lock() or after (NMI),
266 * we will cause circular lock, so we also need to increase 272 * we will cause circular lock, so we also need to increase
267 * the percpu trace_active here. 273 * the percpu disable_stack_tracer here.
268 */ 274 */
269 cpu = smp_processor_id(); 275 __this_cpu_inc(disable_stack_tracer);
270 per_cpu(trace_active, cpu)++;
271 276
272 arch_spin_lock(&stack_trace_max_lock); 277 arch_spin_lock(&stack_trace_max_lock);
273 *ptr = val; 278 *ptr = val;
274 arch_spin_unlock(&stack_trace_max_lock); 279 arch_spin_unlock(&stack_trace_max_lock);
275 280
276 per_cpu(trace_active, cpu)--; 281 __this_cpu_dec(disable_stack_tracer);
277 local_irq_restore(flags); 282 local_irq_restore(flags);
278 283
279 return count; 284 return count;
@@ -307,12 +312,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
307 312
308static void *t_start(struct seq_file *m, loff_t *pos) 313static void *t_start(struct seq_file *m, loff_t *pos)
309{ 314{
310 int cpu;
311
312 local_irq_disable(); 315 local_irq_disable();
313 316
314 cpu = smp_processor_id(); 317 __this_cpu_inc(disable_stack_tracer);
315 per_cpu(trace_active, cpu)++;
316 318
317 arch_spin_lock(&stack_trace_max_lock); 319 arch_spin_lock(&stack_trace_max_lock);
318 320
@@ -324,12 +326,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
324 326
325static void t_stop(struct seq_file *m, void *p) 327static void t_stop(struct seq_file *m, void *p)
326{ 328{
327 int cpu;
328
329 arch_spin_unlock(&stack_trace_max_lock); 329 arch_spin_unlock(&stack_trace_max_lock);
330 330
331 cpu = smp_processor_id(); 331 __this_cpu_dec(disable_stack_tracer);
332 per_cpu(trace_active, cpu)--;
333 332
334 local_irq_enable(); 333 local_irq_enable();
335} 334}