diff options
| author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2010-02-02 02:32:09 -0500 |
|---|---|---|
| committer | Steven Rostedt <rostedt@goodmis.org> | 2010-02-02 10:20:18 -0500 |
| commit | 4f48f8b7fd18c44f8478174f9925cc3c059c6ce4 (patch) | |
| tree | 94eb6ad3dcf1d381d7b2bc81b3883d6cde98f3ba | |
| parent | ab658321f32770b903a4426e2a6fae0392757755 (diff) | |
tracing: Fix circular dead lock in stack trace
When we cat <debugfs>/tracing/stack_trace, we may cause circular lock:
sys_read()
t_start()
arch_spin_lock(&max_stack_lock);
t_show()
seq_printf(), vsnprintf() .... /* they are all trace-able,
when they are traced, max_stack_lock may be required again. */
The following script can trigger this circular dead lock very easy:
#!/bin/bash
echo 1 > /proc/sys/kernel/stack_tracer_enabled
mount -t debugfs xxx /mnt > /dev/null 2>&1
(
# make check_stack() zealous to require max_stack_lock
for ((; ;))
{
echo 1 > /mnt/tracing/stack_max_size
}
) &
for ((; ;))
{
cat /mnt/tracing/stack_trace > /dev/null
}
To fix this bug, we increase the percpu trace_active before
require the lock.
Reported-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4B67D4F9.9080905@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
| -rw-r--r-- | kernel/trace/trace_stack.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 678a5120ee30..f4bc9b27de5f 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 157 | unsigned long val, flags; | 157 | unsigned long val, flags; |
| 158 | char buf[64]; | 158 | char buf[64]; |
| 159 | int ret; | 159 | int ret; |
| 160 | int cpu; | ||
| 160 | 161 | ||
| 161 | if (count >= sizeof(buf)) | 162 | if (count >= sizeof(buf)) |
| 162 | return -EINVAL; | 163 | return -EINVAL; |
| @@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 171 | return ret; | 172 | return ret; |
| 172 | 173 | ||
| 173 | local_irq_save(flags); | 174 | local_irq_save(flags); |
| 175 | |||
| 176 | /* | ||
| 177 | * In case we trace inside arch_spin_lock() or after (NMI), | ||
| 178 | * we will cause circular lock, so we also need to increase | ||
| 179 | * the percpu trace_active here. | ||
| 180 | */ | ||
| 181 | cpu = smp_processor_id(); | ||
| 182 | per_cpu(trace_active, cpu)++; | ||
| 183 | |||
| 174 | arch_spin_lock(&max_stack_lock); | 184 | arch_spin_lock(&max_stack_lock); |
| 175 | *ptr = val; | 185 | *ptr = val; |
| 176 | arch_spin_unlock(&max_stack_lock); | 186 | arch_spin_unlock(&max_stack_lock); |
| 187 | |||
| 188 | per_cpu(trace_active, cpu)--; | ||
| 177 | local_irq_restore(flags); | 189 | local_irq_restore(flags); |
| 178 | 190 | ||
| 179 | return count; | 191 | return count; |
| @@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 206 | 218 | ||
| 207 | static void *t_start(struct seq_file *m, loff_t *pos) | 219 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 208 | { | 220 | { |
| 221 | int cpu; | ||
| 222 | |||
| 209 | local_irq_disable(); | 223 | local_irq_disable(); |
| 224 | |||
| 225 | cpu = smp_processor_id(); | ||
| 226 | per_cpu(trace_active, cpu)++; | ||
| 227 | |||
| 210 | arch_spin_lock(&max_stack_lock); | 228 | arch_spin_lock(&max_stack_lock); |
| 211 | 229 | ||
| 212 | if (*pos == 0) | 230 | if (*pos == 0) |
| @@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 217 | 235 | ||
| 218 | static void t_stop(struct seq_file *m, void *p) | 236 | static void t_stop(struct seq_file *m, void *p) |
| 219 | { | 237 | { |
| 238 | int cpu; | ||
| 239 | |||
| 220 | arch_spin_unlock(&max_stack_lock); | 240 | arch_spin_unlock(&max_stack_lock); |
| 241 | |||
| 242 | cpu = smp_processor_id(); | ||
| 243 | per_cpu(trace_active, cpu)--; | ||
| 244 | |||
| 221 | local_irq_enable(); | 245 | local_irq_enable(); |
| 222 | } | 246 | } |
| 223 | 247 | ||
