diff options
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r-- | kernel/trace/trace_stack.c | 40 |
1 files changed, 32 insertions, 8 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8504ac71e4e8..f4bc9b27de5f 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = { | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | static unsigned long max_stack_size; | 29 | static unsigned long max_stack_size; |
30 | static raw_spinlock_t max_stack_lock = | 30 | static arch_spinlock_t max_stack_lock = |
31 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 31 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
32 | 32 | ||
33 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
34 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
@@ -54,7 +54,7 @@ static inline void check_stack(void) | |||
54 | return; | 54 | return; |
55 | 55 | ||
56 | local_irq_save(flags); | 56 | local_irq_save(flags); |
57 | __raw_spin_lock(&max_stack_lock); | 57 | arch_spin_lock(&max_stack_lock); |
58 | 58 | ||
59 | /* a race could have already updated it */ | 59 | /* a race could have already updated it */ |
60 | if (this_size <= max_stack_size) | 60 | if (this_size <= max_stack_size) |
@@ -103,7 +103,7 @@ static inline void check_stack(void) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | out: | 105 | out: |
106 | __raw_spin_unlock(&max_stack_lock); | 106 | arch_spin_unlock(&max_stack_lock); |
107 | local_irq_restore(flags); | 107 | local_irq_restore(flags); |
108 | } | 108 | } |
109 | 109 | ||
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
157 | unsigned long val, flags; | 157 | unsigned long val, flags; |
158 | char buf[64]; | 158 | char buf[64]; |
159 | int ret; | 159 | int ret; |
160 | int cpu; | ||
160 | 161 | ||
161 | if (count >= sizeof(buf)) | 162 | if (count >= sizeof(buf)) |
162 | return -EINVAL; | 163 | return -EINVAL; |
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
171 | return ret; | 172 | return ret; |
172 | 173 | ||
173 | local_irq_save(flags); | 174 | local_irq_save(flags); |
174 | __raw_spin_lock(&max_stack_lock); | 175 | |
176 | /* | ||
177 | * In case we trace inside arch_spin_lock() or after (NMI), | ||
178 | * we will cause circular lock, so we also need to increase | ||
179 | * the percpu trace_active here. | ||
180 | */ | ||
181 | cpu = smp_processor_id(); | ||
182 | per_cpu(trace_active, cpu)++; | ||
183 | |||
184 | arch_spin_lock(&max_stack_lock); | ||
175 | *ptr = val; | 185 | *ptr = val; |
176 | __raw_spin_unlock(&max_stack_lock); | 186 | arch_spin_unlock(&max_stack_lock); |
187 | |||
188 | per_cpu(trace_active, cpu)--; | ||
177 | local_irq_restore(flags); | 189 | local_irq_restore(flags); |
178 | 190 | ||
179 | return count; | 191 | return count; |
@@ -206,8 +218,14 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
206 | 218 | ||
207 | static void *t_start(struct seq_file *m, loff_t *pos) | 219 | static void *t_start(struct seq_file *m, loff_t *pos) |
208 | { | 220 | { |
221 | int cpu; | ||
222 | |||
209 | local_irq_disable(); | 223 | local_irq_disable(); |
210 | __raw_spin_lock(&max_stack_lock); | 224 | |
225 | cpu = smp_processor_id(); | ||
226 | per_cpu(trace_active, cpu)++; | ||
227 | |||
228 | arch_spin_lock(&max_stack_lock); | ||
211 | 229 | ||
212 | if (*pos == 0) | 230 | if (*pos == 0) |
213 | return SEQ_START_TOKEN; | 231 | return SEQ_START_TOKEN; |
@@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
217 | 235 | ||
218 | static void t_stop(struct seq_file *m, void *p) | 236 | static void t_stop(struct seq_file *m, void *p) |
219 | { | 237 | { |
220 | __raw_spin_unlock(&max_stack_lock); | 238 | int cpu; |
239 | |||
240 | arch_spin_unlock(&max_stack_lock); | ||
241 | |||
242 | cpu = smp_processor_id(); | ||
243 | per_cpu(trace_active, cpu)--; | ||
244 | |||
221 | local_irq_enable(); | 245 | local_irq_enable(); |
222 | } | 246 | } |
223 | 247 | ||