diff options
author | Peter Zijlstra <peterz@infradead.org> | 2009-06-08 12:18:39 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-06-08 18:50:20 -0400 |
commit | 1f8a6a10fb9437eac3f516ea4324a19087872f30 (patch) | |
tree | 944f73519ef205c3baccd130d93ed70f7e7bc790 /kernel/trace | |
parent | 918143e8b7d6153d7a83a3f854323407939f4a7e (diff) |
ring-buffer: pass in lockdep class key for reader_lock
On Sun, 7 Jun 2009, Ingo Molnar wrote:
> Testing tracer sched_switch: <6>Starting ring buffer hammer
> PASSED
> Testing tracer sysprof: PASSED
> Testing tracer function: PASSED
> Testing tracer irqsoff:
> =============================================
> PASSED
> Testing tracer preemptoff: PASSED
> Testing tracer preemptirqsoff: [ INFO: possible recursive locking detected ]
> PASSED
> Testing tracer branch: 2.6.30-rc8-tip-01972-ge5b9078-dirty #5760
> ---------------------------------------------
> rb_consumer/431 is trying to acquire lock:
> (&cpu_buffer->reader_lock){......}, at: [<c109eef7>] ring_buffer_reset_cpu+0x37/0x70
>
> but task is already holding lock:
> (&cpu_buffer->reader_lock){......}, at: [<c10a019e>] ring_buffer_consume+0x7e/0xc0
>
> other info that might help us debug this:
> 1 lock held by rb_consumer/431:
> #0: (&cpu_buffer->reader_lock){......}, at: [<c10a019e>] ring_buffer_consume+0x7e/0xc0
The ring buffer is a generic structure, and can be used outside of
ftrace. If ftrace traces within the use of the ring buffer, it can produce
false positives with lockdep.
This patch passes in a static lock key into the allocation of the ring
buffer, so that different ring buffers will have their own lock class.
Reported-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1244477919.13761.9042.camel@twins>
[ store key in ring buffer descriptor ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 7102d7a2fadb..22878b0d370c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -426,6 +426,8 @@ struct ring_buffer { | |||
426 | atomic_t record_disabled; | 426 | atomic_t record_disabled; |
427 | cpumask_var_t cpumask; | 427 | cpumask_var_t cpumask; |
428 | 428 | ||
429 | struct lock_class_key *reader_lock_key; | ||
430 | |||
429 | struct mutex mutex; | 431 | struct mutex mutex; |
430 | 432 | ||
431 | struct ring_buffer_per_cpu **buffers; | 433 | struct ring_buffer_per_cpu **buffers; |
@@ -565,6 +567,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
565 | cpu_buffer->cpu = cpu; | 567 | cpu_buffer->cpu = cpu; |
566 | cpu_buffer->buffer = buffer; | 568 | cpu_buffer->buffer = buffer; |
567 | spin_lock_init(&cpu_buffer->reader_lock); | 569 | spin_lock_init(&cpu_buffer->reader_lock); |
570 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | ||
568 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 571 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
569 | INIT_LIST_HEAD(&cpu_buffer->pages); | 572 | INIT_LIST_HEAD(&cpu_buffer->pages); |
570 | 573 | ||
@@ -635,7 +638,8 @@ static int rb_cpu_notify(struct notifier_block *self, | |||
635 | * when the buffer wraps. If this flag is not set, the buffer will | 638 | * when the buffer wraps. If this flag is not set, the buffer will |
636 | * drop data when the tail hits the head. | 639 | * drop data when the tail hits the head. |
637 | */ | 640 | */ |
638 | struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | 641 | struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, |
642 | struct lock_class_key *key) | ||
639 | { | 643 | { |
640 | struct ring_buffer *buffer; | 644 | struct ring_buffer *buffer; |
641 | int bsize; | 645 | int bsize; |
@@ -658,6 +662,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
658 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 662 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
659 | buffer->flags = flags; | 663 | buffer->flags = flags; |
660 | buffer->clock = trace_clock_local; | 664 | buffer->clock = trace_clock_local; |
665 | buffer->reader_lock_key = key; | ||
661 | 666 | ||
662 | /* need at least two pages */ | 667 | /* need at least two pages */ |
663 | if (buffer->pages == 1) | 668 | if (buffer->pages == 1) |
@@ -715,7 +720,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
715 | kfree(buffer); | 720 | kfree(buffer); |
716 | return NULL; | 721 | return NULL; |
717 | } | 722 | } |
718 | EXPORT_SYMBOL_GPL(ring_buffer_alloc); | 723 | EXPORT_SYMBOL_GPL(__ring_buffer_alloc); |
719 | 724 | ||
720 | /** | 725 | /** |
721 | * ring_buffer_free - free a ring buffer. | 726 | * ring_buffer_free - free a ring buffer. |