diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-01-22 19:01:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-23 05:10:57 -0500 |
commit | 7e49fcce1bdadd723ae6a0b3b324c4daced61563 (patch) | |
tree | a2bf1a143ed33ca01612dfab1fb7c993c467cdb0 | |
parent | b06a830183b610c0a88c29a92feb7991a867ab46 (diff) |
trace, lockdep: manual preempt count adding for local_bh_disable
Impact: fix to preempt trace triggering lockdep check_flag failure
In local_bh_disable, the use of add_preempt_count causes the
preempt tracer to start recording the time preemption is off.
But because it already modified the preempt_count to show
softirqs disabled, and before it called the lockdep code to
handle this, it causes a state that lockdep can not handle.
The preempt tracer will reset the ring buffer on start of a trace,
and the ring buffer reset code does a spin_lock_irqsave. This
calls into lockdep and lockdep will fail when it detects the
invalid state of having softirqs disabled but the internal
current->softirqs_enabled is still set.
The fix is to manually add the SOFTIRQ_OFFSET to preempt count
and call the preempt tracer code outside the lockdep critical
area.
Thanks to Peter Zijlstra for suggesting this solution.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 8 | ||||
-rw-r--r-- | kernel/softirq.c | 13 |
3 files changed, 18 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b81a1f8..33085b88f87b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -137,6 +137,8 @@ extern unsigned long nr_uninterruptible(void); | |||
137 | extern unsigned long nr_active(void); | 137 | extern unsigned long nr_active(void); |
138 | extern unsigned long nr_iowait(void); | 138 | extern unsigned long nr_iowait(void); |
139 | 139 | ||
140 | extern unsigned long get_parent_ip(unsigned long addr); | ||
141 | |||
140 | struct seq_file; | 142 | struct seq_file; |
141 | struct cfs_rq; | 143 | struct cfs_rq; |
142 | struct task_group; | 144 | struct task_group; |
diff --git a/kernel/sched.c b/kernel/sched.c index 52bbf1c842a8..c154825ae753 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4399,10 +4399,7 @@ void scheduler_tick(void) | |||
4399 | #endif | 4399 | #endif |
4400 | } | 4400 | } |
4401 | 4401 | ||
4402 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | 4402 | unsigned long get_parent_ip(unsigned long addr) |
4403 | defined(CONFIG_PREEMPT_TRACER)) | ||
4404 | |||
4405 | static inline unsigned long get_parent_ip(unsigned long addr) | ||
4406 | { | 4403 | { |
4407 | if (in_lock_functions(addr)) { | 4404 | if (in_lock_functions(addr)) { |
4408 | addr = CALLER_ADDR2; | 4405 | addr = CALLER_ADDR2; |
@@ -4412,6 +4409,9 @@ static inline unsigned long get_parent_ip(unsigned long addr) | |||
4412 | return addr; | 4409 | return addr; |
4413 | } | 4410 | } |
4414 | 4411 | ||
4412 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | ||
4413 | defined(CONFIG_PREEMPT_TRACER)) | ||
4414 | |||
4415 | void __kprobes add_preempt_count(int val) | 4415 | void __kprobes add_preempt_count(int val) |
4416 | { | 4416 | { |
4417 | #ifdef CONFIG_DEBUG_PREEMPT | 4417 | #ifdef CONFIG_DEBUG_PREEMPT |
diff --git a/kernel/softirq.c b/kernel/softirq.c index bdbe9de9cd8d..6edfc2c11d99 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/freezer.h> | 21 | #include <linux/freezer.h> |
22 | #include <linux/kthread.h> | 22 | #include <linux/kthread.h> |
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/ftrace.h> | ||
24 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
25 | #include <linux/tick.h> | 26 | #include <linux/tick.h> |
26 | 27 | ||
@@ -79,13 +80,23 @@ static void __local_bh_disable(unsigned long ip) | |||
79 | WARN_ON_ONCE(in_irq()); | 80 | WARN_ON_ONCE(in_irq()); |
80 | 81 | ||
81 | raw_local_irq_save(flags); | 82 | raw_local_irq_save(flags); |
82 | add_preempt_count(SOFTIRQ_OFFSET); | 83 | /* |
84 | * The preempt tracer hooks into add_preempt_count and will break | ||
85 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | ||
86 | * is set and before current->softirq_enabled is cleared. | ||
87 | * We must manually increment preempt_count here and manually | ||
88 | * call the trace_preempt_off later. | ||
89 | */ | ||
90 | preempt_count() += SOFTIRQ_OFFSET; | ||
83 | /* | 91 | /* |
84 | * Were softirqs turned off above: | 92 | * Were softirqs turned off above: |
85 | */ | 93 | */ |
86 | if (softirq_count() == SOFTIRQ_OFFSET) | 94 | if (softirq_count() == SOFTIRQ_OFFSET) |
87 | trace_softirqs_off(ip); | 95 | trace_softirqs_off(ip); |
88 | raw_local_irq_restore(flags); | 96 | raw_local_irq_restore(flags); |
97 | |||
98 | if (preempt_count() == SOFTIRQ_OFFSET) | ||
99 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | ||
89 | } | 100 | } |
90 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 101 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
91 | static inline void __local_bh_disable(unsigned long ip) | 102 | static inline void __local_bh_disable(unsigned long ip) |