diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2010-02-02 17:34:40 -0500 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2010-03-10 08:26:07 -0500 |
commit | db2c4c7791cd04512093d05afc693c3511a65fd7 (patch) | |
tree | 11ad51f5ec2010e32edde2dea5696f90822435f4 /kernel | |
parent | 65f2ed2b2fa6034ef9890b60c8fd39fbe76b9d37 (diff) |
lockdep: Move lock events under lockdep recursion protection
There are rcu locked read side areas in the path where we submit
a trace event. And these rcu_read_(un)lock() trigger lock events,
which create recursive events.
One pair in do_perf_sw_event:
__lock_acquire
|
|--96.11%-- lock_acquire
| |
| |--27.21%-- do_perf_sw_event
| | perf_tp_event
| | |
| | |--49.62%-- ftrace_profile_lock_release
| | | lock_release
| | | |
| | | |--33.85%-- _raw_spin_unlock
Another pair in perf_output_begin/end:
__lock_acquire
|--23.40%-- perf_output_begin
| | __perf_event_overflow
| | perf_swevent_overflow
| | perf_swevent_add
| | perf_swevent_ctx_event
| | do_perf_sw_event
| | perf_tp_event
| | |
| | |--55.37%-- ftrace_profile_lock_acquire
| | | lock_acquire
| | | |
| | | |--37.31%-- _raw_spin_lock
The problem is not that much the trace recursion itself, as we have a
recursion protection already (though it's always wasteful to recurse).
But the trace events are outside the lockdep recursion protection, then
each lockdep event triggers a lock trace, which will trigger two
other lockdep events. Here the recursive lock trace event won't
be taken because of the trace recursion, so the recursion stops there
but lockdep will still analyse these new events:
To sum up, for each lockdep events we have:
lock_*()
|
trace lock_acquire
|
----- rcu_read_lock()
| |
| lock_acquire()
| |
| trace_lock_acquire() (stopped)
| |
| lockdep analyze
|
----- rcu_read_unlock()
|
lock_release
|
trace_lock_release() (stopped)
|
lockdep analyze
And you can repeat the above two times as we have two rcu read side
sections when we submit an event.
This is fixed in this patch by moving the lock trace event under
the lockdep recursion protection.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 0c30d0455de1..65b5f5b7c298 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -3211,8 +3211,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3211 | { | 3211 | { |
3212 | unsigned long flags; | 3212 | unsigned long flags; |
3213 | 3213 | ||
3214 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
3215 | |||
3216 | if (unlikely(current->lockdep_recursion)) | 3214 | if (unlikely(current->lockdep_recursion)) |
3217 | return; | 3215 | return; |
3218 | 3216 | ||
@@ -3220,6 +3218,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3220 | check_flags(flags); | 3218 | check_flags(flags); |
3221 | 3219 | ||
3222 | current->lockdep_recursion = 1; | 3220 | current->lockdep_recursion = 1; |
3221 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
3223 | __lock_acquire(lock, subclass, trylock, read, check, | 3222 | __lock_acquire(lock, subclass, trylock, read, check, |
3224 | irqs_disabled_flags(flags), nest_lock, ip, 0); | 3223 | irqs_disabled_flags(flags), nest_lock, ip, 0); |
3225 | current->lockdep_recursion = 0; | 3224 | current->lockdep_recursion = 0; |
@@ -3232,14 +3231,13 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
3232 | { | 3231 | { |
3233 | unsigned long flags; | 3232 | unsigned long flags; |
3234 | 3233 | ||
3235 | trace_lock_release(lock, nested, ip); | ||
3236 | |||
3237 | if (unlikely(current->lockdep_recursion)) | 3234 | if (unlikely(current->lockdep_recursion)) |
3238 | return; | 3235 | return; |
3239 | 3236 | ||
3240 | raw_local_irq_save(flags); | 3237 | raw_local_irq_save(flags); |
3241 | check_flags(flags); | 3238 | check_flags(flags); |
3242 | current->lockdep_recursion = 1; | 3239 | current->lockdep_recursion = 1; |
3240 | trace_lock_release(lock, nested, ip); | ||
3243 | __lock_release(lock, nested, ip); | 3241 | __lock_release(lock, nested, ip); |
3244 | current->lockdep_recursion = 0; | 3242 | current->lockdep_recursion = 0; |
3245 | raw_local_irq_restore(flags); | 3243 | raw_local_irq_restore(flags); |
@@ -3413,8 +3411,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3413 | { | 3411 | { |
3414 | unsigned long flags; | 3412 | unsigned long flags; |
3415 | 3413 | ||
3416 | trace_lock_contended(lock, ip); | ||
3417 | |||
3418 | if (unlikely(!lock_stat)) | 3414 | if (unlikely(!lock_stat)) |
3419 | return; | 3415 | return; |
3420 | 3416 | ||
@@ -3424,6 +3420,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3424 | raw_local_irq_save(flags); | 3420 | raw_local_irq_save(flags); |
3425 | check_flags(flags); | 3421 | check_flags(flags); |
3426 | current->lockdep_recursion = 1; | 3422 | current->lockdep_recursion = 1; |
3423 | trace_lock_contended(lock, ip); | ||
3427 | __lock_contended(lock, ip); | 3424 | __lock_contended(lock, ip); |
3428 | current->lockdep_recursion = 0; | 3425 | current->lockdep_recursion = 0; |
3429 | raw_local_irq_restore(flags); | 3426 | raw_local_irq_restore(flags); |