aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-06 15:47:32 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-10 15:21:47 -0400
commit8aaf1ee70e19ac74cbbb81098edfa328d1ab4bd7 (patch)
tree5b0e3a0691a6a655b9d25b1812535344ccd6b08d /kernel
parent5367278cb7ba74537bcad1470d75f30d95b09c14 (diff)
tracing: Rename trace_active to disable_stack_tracer and inline its modification
In order to eliminate a function call, make "trace_active" into "disable_stack_tracer" and convert stack_tracer_disable() and friends into static inline functions. Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_stack.c50
1 files changed, 9 insertions, 41 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 21e536cf66e4..f2f02ff350d4 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -35,44 +35,12 @@ unsigned long stack_trace_max_size;
35arch_spinlock_t stack_trace_max_lock = 35arch_spinlock_t stack_trace_max_lock =
36 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 36 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static DEFINE_PER_CPU(int, trace_active); 38DEFINE_PER_CPU(int, disable_stack_tracer);
39static DEFINE_MUTEX(stack_sysctl_mutex); 39static DEFINE_MUTEX(stack_sysctl_mutex);
40 40
41int stack_tracer_enabled; 41int stack_tracer_enabled;
42static int last_stack_tracer_enabled; 42static int last_stack_tracer_enabled;
43 43
44/**
45 * stack_tracer_disable - temporarily disable the stack tracer
46 *
47 * There's a few locations (namely in RCU) where stack tracing
48 * cannot be executed. This function is used to disable stack
49 * tracing during those critical sections.
50 *
51 * This function must be called with preemption or interrupts
52 * disabled and stack_tracer_enable() must be called shortly after
53 * while preemption or interrupts are still disabled.
54 */
55void stack_tracer_disable(void)
56{
57 /* Preemption or interupts must be disabled */
58 if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
59 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
60 this_cpu_inc(trace_active);
61}
62
63/**
64 * stack_tracer_enable - re-enable the stack tracer
65 *
66 * After stack_tracer_disable() is called, stack_tracer_enable()
67 * must be called shortly afterward.
68 */
69void stack_tracer_enable(void)
70{
71 if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
72 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
73 this_cpu_dec(trace_active);
74}
75
76void stack_trace_print(void) 44void stack_trace_print(void)
77{ 45{
78 long i; 46 long i;
@@ -243,8 +211,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
243 preempt_disable_notrace(); 211 preempt_disable_notrace();
244 212
245 /* no atomic needed, we only modify this variable by this cpu */ 213 /* no atomic needed, we only modify this variable by this cpu */
246 __this_cpu_inc(trace_active); 214 __this_cpu_inc(disable_stack_tracer);
247 if (__this_cpu_read(trace_active) != 1) 215 if (__this_cpu_read(disable_stack_tracer) != 1)
248 goto out; 216 goto out;
249 217
250 ip += MCOUNT_INSN_SIZE; 218 ip += MCOUNT_INSN_SIZE;
@@ -252,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
252 check_stack(ip, &stack); 220 check_stack(ip, &stack);
253 221
254 out: 222 out:
255 __this_cpu_dec(trace_active); 223 __this_cpu_dec(disable_stack_tracer);
256 /* prevent recursion in schedule */ 224 /* prevent recursion in schedule */
257 preempt_enable_notrace(); 225 preempt_enable_notrace();
258} 226}
@@ -294,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
294 /* 262 /*
295 * In case we trace inside arch_spin_lock() or after (NMI), 263 * In case we trace inside arch_spin_lock() or after (NMI),
296 * we will cause circular lock, so we also need to increase 264 * we will cause circular lock, so we also need to increase
297 * the percpu trace_active here. 265 * the percpu disable_stack_tracer here.
298 */ 266 */
299 __this_cpu_inc(trace_active); 267 __this_cpu_inc(disable_stack_tracer);
300 268
301 arch_spin_lock(&stack_trace_max_lock); 269 arch_spin_lock(&stack_trace_max_lock);
302 *ptr = val; 270 *ptr = val;
303 arch_spin_unlock(&stack_trace_max_lock); 271 arch_spin_unlock(&stack_trace_max_lock);
304 272
305 __this_cpu_dec(trace_active); 273 __this_cpu_dec(disable_stack_tracer);
306 local_irq_restore(flags); 274 local_irq_restore(flags);
307 275
308 return count; 276 return count;
@@ -338,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
338{ 306{
339 local_irq_disable(); 307 local_irq_disable();
340 308
341 __this_cpu_inc(trace_active); 309 __this_cpu_inc(disable_stack_tracer);
342 310
343 arch_spin_lock(&stack_trace_max_lock); 311 arch_spin_lock(&stack_trace_max_lock);
344 312
@@ -352,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p)
352{ 320{
353 arch_spin_unlock(&stack_trace_max_lock); 321 arch_spin_unlock(&stack_trace_max_lock);
354 322
355 __this_cpu_dec(trace_active); 323 __this_cpu_dec(disable_stack_tracer);
356 324
357 local_irq_enable(); 325 local_irq_enable();
358} 326}