diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2015-11-03 14:50:15 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2015-11-03 14:50:15 -0500 |
commit | d332736df0c277905de06311ae084e2c76580a3f (patch) | |
tree | 9cfa8234b9ce54d306a3c4c4c474f31c839ffc82 | |
parent | bb99d8ccec7f83a2730a29d1ae7eee5ffa446a9e (diff) |
tracing: Rename max_stack_lock to stack_trace_max_lock
Now that max_stack_lock is a global variable, it requires a naming
convention that is unlikely to collide. Rename it to the same naming
convention that the other stack_trace variables have.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/ftrace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 16 |
2 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b4c92ab9e08b..eae6548efbf0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -271,7 +271,7 @@ struct stack_trace; | |||
271 | extern unsigned stack_trace_index[]; | 271 | extern unsigned stack_trace_index[]; |
272 | extern struct stack_trace stack_trace_max; | 272 | extern struct stack_trace stack_trace_max; |
273 | extern unsigned long stack_trace_max_size; | 273 | extern unsigned long stack_trace_max_size; |
274 | extern arch_spinlock_t max_stack_lock; | 274 | extern arch_spinlock_t stack_trace_max_lock; |
275 | 275 | ||
276 | extern int stack_tracer_enabled; | 276 | extern int stack_tracer_enabled; |
277 | void stack_trace_print(void); | 277 | void stack_trace_print(void); |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 50945a7939f4..0bd212af406c 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -31,7 +31,7 @@ struct stack_trace stack_trace_max = { | |||
31 | }; | 31 | }; |
32 | 32 | ||
33 | unsigned long stack_trace_max_size; | 33 | unsigned long stack_trace_max_size; |
34 | arch_spinlock_t max_stack_lock = | 34 | arch_spinlock_t stack_trace_max_lock = |
35 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 35 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
36 | 36 | ||
37 | static DEFINE_PER_CPU(int, trace_active); | 37 | static DEFINE_PER_CPU(int, trace_active); |
@@ -65,7 +65,7 @@ void stack_trace_print(void) | |||
65 | 65 | ||
66 | /* | 66 | /* |
67 | * When arch-specific code overides this function, the following | 67 | * When arch-specific code overides this function, the following |
68 | * data should be filled up, assuming max_stack_lock is held to | 68 | * data should be filled up, assuming stack_trace_max_lock is held to |
69 | * prevent concurrent updates. | 69 | * prevent concurrent updates. |
70 | * stack_trace_index[] | 70 | * stack_trace_index[] |
71 | * stack_trace_max | 71 | * stack_trace_max |
@@ -92,7 +92,7 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
92 | return; | 92 | return; |
93 | 93 | ||
94 | local_irq_save(flags); | 94 | local_irq_save(flags); |
95 | arch_spin_lock(&max_stack_lock); | 95 | arch_spin_lock(&stack_trace_max_lock); |
96 | 96 | ||
97 | /* In case another CPU set the tracer_frame on us */ | 97 | /* In case another CPU set the tracer_frame on us */ |
98 | if (unlikely(!frame_size)) | 98 | if (unlikely(!frame_size)) |
@@ -175,7 +175,7 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
175 | } | 175 | } |
176 | 176 | ||
177 | out: | 177 | out: |
178 | arch_spin_unlock(&max_stack_lock); | 178 | arch_spin_unlock(&stack_trace_max_lock); |
179 | local_irq_restore(flags); | 179 | local_irq_restore(flags); |
180 | } | 180 | } |
181 | 181 | ||
@@ -246,9 +246,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
246 | cpu = smp_processor_id(); | 246 | cpu = smp_processor_id(); |
247 | per_cpu(trace_active, cpu)++; | 247 | per_cpu(trace_active, cpu)++; |
248 | 248 | ||
249 | arch_spin_lock(&max_stack_lock); | 249 | arch_spin_lock(&stack_trace_max_lock); |
250 | *ptr = val; | 250 | *ptr = val; |
251 | arch_spin_unlock(&max_stack_lock); | 251 | arch_spin_unlock(&stack_trace_max_lock); |
252 | 252 | ||
253 | per_cpu(trace_active, cpu)--; | 253 | per_cpu(trace_active, cpu)--; |
254 | local_irq_restore(flags); | 254 | local_irq_restore(flags); |
@@ -291,7 +291,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
291 | cpu = smp_processor_id(); | 291 | cpu = smp_processor_id(); |
292 | per_cpu(trace_active, cpu)++; | 292 | per_cpu(trace_active, cpu)++; |
293 | 293 | ||
294 | arch_spin_lock(&max_stack_lock); | 294 | arch_spin_lock(&stack_trace_max_lock); |
295 | 295 | ||
296 | if (*pos == 0) | 296 | if (*pos == 0) |
297 | return SEQ_START_TOKEN; | 297 | return SEQ_START_TOKEN; |
@@ -303,7 +303,7 @@ static void t_stop(struct seq_file *m, void *p) | |||
303 | { | 303 | { |
304 | int cpu; | 304 | int cpu; |
305 | 305 | ||
306 | arch_spin_unlock(&max_stack_lock); | 306 | arch_spin_unlock(&stack_trace_max_lock); |
307 | 307 | ||
308 | cpu = smp_processor_id(); | 308 | cpu = smp_processor_id(); |
309 | per_cpu(trace_active, cpu)--; | 309 | per_cpu(trace_active, cpu)--; |