aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-11-06 00:09:43 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-06 01:51:09 -0500
commit3e03fb7f1da2e691644526c0d6df42d778716349 (patch)
treefd4071e75ef62d2fc146fdd7e85671b4f5182877 /kernel/trace/ring_buffer.c
parent9036990d462e09366f7297a2d1da6582c3e6b1d3 (diff)
ring-buffer: convert to raw spinlocks
Impact: no lockdep debugging of ring buffer The problem with running lockdep on the ring buffer is that the ring buffer is the core infrastructure of ftrace. What happens is that the tracer will start tracing the lockdep code while lockdep is testing the ring buffers locks. This can cause lockdep to fail due to testing cases that have not fully finished their locking transition. This patch converts the spin locks used by the ring buffer back into raw spin locks which lockdep does not check. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 151f6a748676..a2dea5008826 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -154,7 +154,7 @@ static inline int test_time_stamp(u64 delta)
154struct ring_buffer_per_cpu { 154struct ring_buffer_per_cpu {
155 int cpu; 155 int cpu;
156 struct ring_buffer *buffer; 156 struct ring_buffer *buffer;
157 spinlock_t lock; 157 raw_spinlock_t lock;
158 struct lock_class_key lock_key; 158 struct lock_class_key lock_key;
159 struct list_head pages; 159 struct list_head pages;
160 struct buffer_page *head_page; /* read from head */ 160 struct buffer_page *head_page; /* read from head */
@@ -291,7 +291,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
291 291
292 cpu_buffer->cpu = cpu; 292 cpu_buffer->cpu = cpu;
293 cpu_buffer->buffer = buffer; 293 cpu_buffer->buffer = buffer;
294 spin_lock_init(&cpu_buffer->lock); 294 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
295 INIT_LIST_HEAD(&cpu_buffer->pages); 295 INIT_LIST_HEAD(&cpu_buffer->pages);
296 296
297 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 297 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
@@ -854,7 +854,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
854 if (write > BUF_PAGE_SIZE) { 854 if (write > BUF_PAGE_SIZE) {
855 struct buffer_page *next_page = tail_page; 855 struct buffer_page *next_page = tail_page;
856 856
857 spin_lock_irqsave(&cpu_buffer->lock, flags); 857 local_irq_save(flags);
858 __raw_spin_lock(&cpu_buffer->lock);
858 859
859 rb_inc_page(cpu_buffer, &next_page); 860 rb_inc_page(cpu_buffer, &next_page);
860 861
@@ -930,7 +931,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
930 rb_set_commit_to_write(cpu_buffer); 931 rb_set_commit_to_write(cpu_buffer);
931 } 932 }
932 933
933 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 934 __raw_spin_unlock(&cpu_buffer->lock);
935 local_irq_restore(flags);
934 936
935 /* fail and let the caller try again */ 937 /* fail and let the caller try again */
936 return ERR_PTR(-EAGAIN); 938 return ERR_PTR(-EAGAIN);
@@ -953,7 +955,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
953 return event; 955 return event;
954 956
955 out_unlock: 957 out_unlock:
956 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 958 __raw_spin_unlock(&cpu_buffer->lock);
959 local_irq_restore(flags);
957 return NULL; 960 return NULL;
958} 961}
959 962
@@ -1524,7 +1527,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1524 struct buffer_page *reader = NULL; 1527 struct buffer_page *reader = NULL;
1525 unsigned long flags; 1528 unsigned long flags;
1526 1529
1527 spin_lock_irqsave(&cpu_buffer->lock, flags); 1530 local_irq_save(flags);
1531 __raw_spin_lock(&cpu_buffer->lock);
1528 1532
1529 again: 1533 again:
1530 reader = cpu_buffer->reader_page; 1534 reader = cpu_buffer->reader_page;
@@ -1574,7 +1578,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1574 goto again; 1578 goto again;
1575 1579
1576 out: 1580 out:
1577 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1581 __raw_spin_unlock(&cpu_buffer->lock);
1582 local_irq_restore(flags);
1578 1583
1579 return reader; 1584 return reader;
1580} 1585}
@@ -1815,9 +1820,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1815 atomic_inc(&cpu_buffer->record_disabled); 1820 atomic_inc(&cpu_buffer->record_disabled);
1816 synchronize_sched(); 1821 synchronize_sched();
1817 1822
1818 spin_lock_irqsave(&cpu_buffer->lock, flags); 1823 local_irq_save(flags);
1824 __raw_spin_lock(&cpu_buffer->lock);
1819 ring_buffer_iter_reset(iter); 1825 ring_buffer_iter_reset(iter);
1820 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1826 __raw_spin_unlock(&cpu_buffer->lock);
1827 local_irq_restore(flags);
1821 1828
1822 return iter; 1829 return iter;
1823} 1830}
@@ -1903,11 +1910,13 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1903 if (!cpu_isset(cpu, buffer->cpumask)) 1910 if (!cpu_isset(cpu, buffer->cpumask))
1904 return; 1911 return;
1905 1912
1906 spin_lock_irqsave(&cpu_buffer->lock, flags); 1913 local_irq_save(flags);
1914 __raw_spin_lock(&cpu_buffer->lock);
1907 1915
1908 rb_reset_cpu(cpu_buffer); 1916 rb_reset_cpu(cpu_buffer);
1909 1917
1910 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1918 __raw_spin_unlock(&cpu_buffer->lock);
1919 local_irq_restore(flags);
1911} 1920}
1912 1921
1913/** 1922/**