aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
commit3cfef9524677a4ecb392d6fbffe6ebce6302f1d4 (patch)
tree88647d9dc50d634dee9cfeb7f354d620977a2f33 /kernel/trace/ring_buffer.c
parent982653009b883ef1529089e3e6f1ae2fee41cbe2 (diff)
parent68cc3990a545dc0da221b4844dd8b9c06623a6c5 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock() lockdep: Comment all warnings lib: atomic64: Change the type of local lock to raw_spinlock_t locking, lib/atomic64: Annotate atomic64_lock::lock as raw locking, x86, iommu: Annotate qi->q_lock as raw locking, x86, iommu: Annotate irq_2_ir_lock as raw locking, x86, iommu: Annotate iommu->register_lock as raw locking, dma, ipu: Annotate bank_lock as raw locking, ARM: Annotate low level hw locks as raw locking, drivers/dca: Annotate dca_lock as raw locking, powerpc: Annotate uic->lock as raw locking, x86: mce: Annotate cmci_discover_lock as raw locking, ACPI: Annotate c3_lock as raw locking, oprofile: Annotate oprofilefs lock as raw locking, video: Annotate vga console lock as raw locking, latencytop: Annotate latency_lock as raw locking, timer_stats: Annotate table_lock as raw locking, rwsem: Annotate inner lock as raw locking, semaphores: Annotate inner lock as raw locking, sched: Annotate thread_group_cputimer as raw ... Fix up conflicts in kernel/posix-cpu-timers.c manually: making cputimer->cputime a raw lock conflicted with the ABBA fix in commit bcd5cff7216f ("cputimer: Cure lock inversion").
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c52
1 files changed, 26 insertions, 26 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 731201bf4acc..f2f821acc597 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
478 int cpu; 478 int cpu;
479 atomic_t record_disabled; 479 atomic_t record_disabled;
480 struct ring_buffer *buffer; 480 struct ring_buffer *buffer;
481 spinlock_t reader_lock; /* serialize readers */ 481 raw_spinlock_t reader_lock; /* serialize readers */
482 arch_spinlock_t lock; 482 arch_spinlock_t lock;
483 struct lock_class_key lock_key; 483 struct lock_class_key lock_key;
484 struct list_head *pages; 484 struct list_head *pages;
@@ -1062,7 +1062,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1062 1062
1063 cpu_buffer->cpu = cpu; 1063 cpu_buffer->cpu = cpu;
1064 cpu_buffer->buffer = buffer; 1064 cpu_buffer->buffer = buffer;
1065 spin_lock_init(&cpu_buffer->reader_lock); 1065 raw_spin_lock_init(&cpu_buffer->reader_lock);
1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1068 1068
@@ -1259,7 +1259,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1259 struct list_head *p; 1259 struct list_head *p;
1260 unsigned i; 1260 unsigned i;
1261 1261
1262 spin_lock_irq(&cpu_buffer->reader_lock); 1262 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1263 rb_head_page_deactivate(cpu_buffer); 1263 rb_head_page_deactivate(cpu_buffer);
1264 1264
1265 for (i = 0; i < nr_pages; i++) { 1265 for (i = 0; i < nr_pages; i++) {
@@ -1277,7 +1277,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1277 rb_check_pages(cpu_buffer); 1277 rb_check_pages(cpu_buffer);
1278 1278
1279out: 1279out:
1280 spin_unlock_irq(&cpu_buffer->reader_lock); 1280 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1281} 1281}
1282 1282
1283static void 1283static void
@@ -1288,7 +1288,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1288 struct list_head *p; 1288 struct list_head *p;
1289 unsigned i; 1289 unsigned i;
1290 1290
1291 spin_lock_irq(&cpu_buffer->reader_lock); 1291 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1292 rb_head_page_deactivate(cpu_buffer); 1292 rb_head_page_deactivate(cpu_buffer);
1293 1293
1294 for (i = 0; i < nr_pages; i++) { 1294 for (i = 0; i < nr_pages; i++) {
@@ -1303,7 +1303,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1303 rb_check_pages(cpu_buffer); 1303 rb_check_pages(cpu_buffer);
1304 1304
1305out: 1305out:
1306 spin_unlock_irq(&cpu_buffer->reader_lock); 1306 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1307} 1307}
1308 1308
1309/** 1309/**
@@ -2804,9 +2804,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2804 2804
2805 cpu_buffer = iter->cpu_buffer; 2805 cpu_buffer = iter->cpu_buffer;
2806 2806
2807 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2807 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2808 rb_iter_reset(iter); 2808 rb_iter_reset(iter);
2809 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2809 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2810} 2810}
2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2812 2812
@@ -3265,12 +3265,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3265 again: 3265 again:
3266 local_irq_save(flags); 3266 local_irq_save(flags);
3267 if (dolock) 3267 if (dolock)
3268 spin_lock(&cpu_buffer->reader_lock); 3268 raw_spin_lock(&cpu_buffer->reader_lock);
3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3270 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3270 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3271 rb_advance_reader(cpu_buffer); 3271 rb_advance_reader(cpu_buffer);
3272 if (dolock) 3272 if (dolock)
3273 spin_unlock(&cpu_buffer->reader_lock); 3273 raw_spin_unlock(&cpu_buffer->reader_lock);
3274 local_irq_restore(flags); 3274 local_irq_restore(flags);
3275 3275
3276 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3276 if (event && event->type_len == RINGBUF_TYPE_PADDING)
@@ -3295,9 +3295,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3295 unsigned long flags; 3295 unsigned long flags;
3296 3296
3297 again: 3297 again:
3298 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3298 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3299 event = rb_iter_peek(iter, ts); 3299 event = rb_iter_peek(iter, ts);
3300 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3300 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3301 3301
3302 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3302 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3303 goto again; 3303 goto again;
@@ -3337,7 +3337,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3337 cpu_buffer = buffer->buffers[cpu]; 3337 cpu_buffer = buffer->buffers[cpu];
3338 local_irq_save(flags); 3338 local_irq_save(flags);
3339 if (dolock) 3339 if (dolock)
3340 spin_lock(&cpu_buffer->reader_lock); 3340 raw_spin_lock(&cpu_buffer->reader_lock);
3341 3341
3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3343 if (event) { 3343 if (event) {
@@ -3346,7 +3346,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3346 } 3346 }
3347 3347
3348 if (dolock) 3348 if (dolock)
3349 spin_unlock(&cpu_buffer->reader_lock); 3349 raw_spin_unlock(&cpu_buffer->reader_lock);
3350 local_irq_restore(flags); 3350 local_irq_restore(flags);
3351 3351
3352 out: 3352 out:
@@ -3438,11 +3438,11 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
3438 3438
3439 cpu_buffer = iter->cpu_buffer; 3439 cpu_buffer = iter->cpu_buffer;
3440 3440
3441 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3441 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3442 arch_spin_lock(&cpu_buffer->lock); 3442 arch_spin_lock(&cpu_buffer->lock);
3443 rb_iter_reset(iter); 3443 rb_iter_reset(iter);
3444 arch_spin_unlock(&cpu_buffer->lock); 3444 arch_spin_unlock(&cpu_buffer->lock);
3445 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3445 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3446} 3446}
3447EXPORT_SYMBOL_GPL(ring_buffer_read_start); 3447EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3448 3448
@@ -3477,7 +3477,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3478 unsigned long flags; 3478 unsigned long flags;
3479 3479
3480 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3480 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3481 again: 3481 again:
3482 event = rb_iter_peek(iter, ts); 3482 event = rb_iter_peek(iter, ts);
3483 if (!event) 3483 if (!event)
@@ -3488,7 +3488,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3488 3488
3489 rb_advance_iter(iter); 3489 rb_advance_iter(iter);
3490 out: 3490 out:
3491 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3491 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3492 3492
3493 return event; 3493 return event;
3494} 3494}
@@ -3557,7 +3557,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3557 3557
3558 atomic_inc(&cpu_buffer->record_disabled); 3558 atomic_inc(&cpu_buffer->record_disabled);
3559 3559
3560 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3560 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3561 3561
3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3563 goto out; 3563 goto out;
@@ -3569,7 +3569,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3569 arch_spin_unlock(&cpu_buffer->lock); 3569 arch_spin_unlock(&cpu_buffer->lock);
3570 3570
3571 out: 3571 out:
3572 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3572 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3573 3573
3574 atomic_dec(&cpu_buffer->record_disabled); 3574 atomic_dec(&cpu_buffer->record_disabled);
3575} 3575}
@@ -3607,10 +3607,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
3607 cpu_buffer = buffer->buffers[cpu]; 3607 cpu_buffer = buffer->buffers[cpu];
3608 local_irq_save(flags); 3608 local_irq_save(flags);
3609 if (dolock) 3609 if (dolock)
3610 spin_lock(&cpu_buffer->reader_lock); 3610 raw_spin_lock(&cpu_buffer->reader_lock);
3611 ret = rb_per_cpu_empty(cpu_buffer); 3611 ret = rb_per_cpu_empty(cpu_buffer);
3612 if (dolock) 3612 if (dolock)
3613 spin_unlock(&cpu_buffer->reader_lock); 3613 raw_spin_unlock(&cpu_buffer->reader_lock);
3614 local_irq_restore(flags); 3614 local_irq_restore(flags);
3615 3615
3616 if (!ret) 3616 if (!ret)
@@ -3641,10 +3641,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3641 cpu_buffer = buffer->buffers[cpu]; 3641 cpu_buffer = buffer->buffers[cpu];
3642 local_irq_save(flags); 3642 local_irq_save(flags);
3643 if (dolock) 3643 if (dolock)
3644 spin_lock(&cpu_buffer->reader_lock); 3644 raw_spin_lock(&cpu_buffer->reader_lock);
3645 ret = rb_per_cpu_empty(cpu_buffer); 3645 ret = rb_per_cpu_empty(cpu_buffer);
3646 if (dolock) 3646 if (dolock)
3647 spin_unlock(&cpu_buffer->reader_lock); 3647 raw_spin_unlock(&cpu_buffer->reader_lock);
3648 local_irq_restore(flags); 3648 local_irq_restore(flags);
3649 3649
3650 return ret; 3650 return ret;
@@ -3841,7 +3841,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3841 if (!bpage) 3841 if (!bpage)
3842 goto out; 3842 goto out;
3843 3843
3844 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3844 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3845 3845
3846 reader = rb_get_reader_page(cpu_buffer); 3846 reader = rb_get_reader_page(cpu_buffer);
3847 if (!reader) 3847 if (!reader)
@@ -3964,7 +3964,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3965 3965
3966 out_unlock: 3966 out_unlock:
3967 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3967 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3968 3968
3969 out: 3969 out:
3970 return ret; 3970 return ret;