aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-25 11:13:33 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:11:52 -0400
commit5389f6fad27019f2ba78f1b332f719ec05f12a42 (patch)
tree01b9511a75c147808f48a7f4408bf2e35b12623d
parent740969f91e950b64a18fdd0a25164cdee042abf0 (diff)
locking, tracing: Annotate tracing locks as raw
The tracing locks can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/trace/ring_buffer.c52
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_irqsoff.c6
3 files changed, 34 insertions, 34 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 731201bf4ac..f2f821acc59 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
478 int cpu; 478 int cpu;
479 atomic_t record_disabled; 479 atomic_t record_disabled;
480 struct ring_buffer *buffer; 480 struct ring_buffer *buffer;
481 spinlock_t reader_lock; /* serialize readers */ 481 raw_spinlock_t reader_lock; /* serialize readers */
482 arch_spinlock_t lock; 482 arch_spinlock_t lock;
483 struct lock_class_key lock_key; 483 struct lock_class_key lock_key;
484 struct list_head *pages; 484 struct list_head *pages;
@@ -1062,7 +1062,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1062 1062
1063 cpu_buffer->cpu = cpu; 1063 cpu_buffer->cpu = cpu;
1064 cpu_buffer->buffer = buffer; 1064 cpu_buffer->buffer = buffer;
1065 spin_lock_init(&cpu_buffer->reader_lock); 1065 raw_spin_lock_init(&cpu_buffer->reader_lock);
1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1068 1068
@@ -1259,7 +1259,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1259 struct list_head *p; 1259 struct list_head *p;
1260 unsigned i; 1260 unsigned i;
1261 1261
1262 spin_lock_irq(&cpu_buffer->reader_lock); 1262 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1263 rb_head_page_deactivate(cpu_buffer); 1263 rb_head_page_deactivate(cpu_buffer);
1264 1264
1265 for (i = 0; i < nr_pages; i++) { 1265 for (i = 0; i < nr_pages; i++) {
@@ -1277,7 +1277,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1277 rb_check_pages(cpu_buffer); 1277 rb_check_pages(cpu_buffer);
1278 1278
1279out: 1279out:
1280 spin_unlock_irq(&cpu_buffer->reader_lock); 1280 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1281} 1281}
1282 1282
1283static void 1283static void
@@ -1288,7 +1288,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1288 struct list_head *p; 1288 struct list_head *p;
1289 unsigned i; 1289 unsigned i;
1290 1290
1291 spin_lock_irq(&cpu_buffer->reader_lock); 1291 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1292 rb_head_page_deactivate(cpu_buffer); 1292 rb_head_page_deactivate(cpu_buffer);
1293 1293
1294 for (i = 0; i < nr_pages; i++) { 1294 for (i = 0; i < nr_pages; i++) {
@@ -1303,7 +1303,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1303 rb_check_pages(cpu_buffer); 1303 rb_check_pages(cpu_buffer);
1304 1304
1305out: 1305out:
1306 spin_unlock_irq(&cpu_buffer->reader_lock); 1306 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1307} 1307}
1308 1308
1309/** 1309/**
@@ -2804,9 +2804,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2804 2804
2805 cpu_buffer = iter->cpu_buffer; 2805 cpu_buffer = iter->cpu_buffer;
2806 2806
2807 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2807 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2808 rb_iter_reset(iter); 2808 rb_iter_reset(iter);
2809 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2809 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2810} 2810}
2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2812 2812
@@ -3265,12 +3265,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3265 again: 3265 again:
3266 local_irq_save(flags); 3266 local_irq_save(flags);
3267 if (dolock) 3267 if (dolock)
3268 spin_lock(&cpu_buffer->reader_lock); 3268 raw_spin_lock(&cpu_buffer->reader_lock);
3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3270 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3270 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3271 rb_advance_reader(cpu_buffer); 3271 rb_advance_reader(cpu_buffer);
3272 if (dolock) 3272 if (dolock)
3273 spin_unlock(&cpu_buffer->reader_lock); 3273 raw_spin_unlock(&cpu_buffer->reader_lock);
3274 local_irq_restore(flags); 3274 local_irq_restore(flags);
3275 3275
3276 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3276 if (event && event->type_len == RINGBUF_TYPE_PADDING)
@@ -3295,9 +3295,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3295 unsigned long flags; 3295 unsigned long flags;
3296 3296
3297 again: 3297 again:
3298 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3298 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3299 event = rb_iter_peek(iter, ts); 3299 event = rb_iter_peek(iter, ts);
3300 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3300 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3301 3301
3302 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3302 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3303 goto again; 3303 goto again;
@@ -3337,7 +3337,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3337 cpu_buffer = buffer->buffers[cpu]; 3337 cpu_buffer = buffer->buffers[cpu];
3338 local_irq_save(flags); 3338 local_irq_save(flags);
3339 if (dolock) 3339 if (dolock)
3340 spin_lock(&cpu_buffer->reader_lock); 3340 raw_spin_lock(&cpu_buffer->reader_lock);
3341 3341
3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3343 if (event) { 3343 if (event) {
@@ -3346,7 +3346,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3346 } 3346 }
3347 3347
3348 if (dolock) 3348 if (dolock)
3349 spin_unlock(&cpu_buffer->reader_lock); 3349 raw_spin_unlock(&cpu_buffer->reader_lock);
3350 local_irq_restore(flags); 3350 local_irq_restore(flags);
3351 3351
3352 out: 3352 out:
@@ -3438,11 +3438,11 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
3438 3438
3439 cpu_buffer = iter->cpu_buffer; 3439 cpu_buffer = iter->cpu_buffer;
3440 3440
3441 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3441 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3442 arch_spin_lock(&cpu_buffer->lock); 3442 arch_spin_lock(&cpu_buffer->lock);
3443 rb_iter_reset(iter); 3443 rb_iter_reset(iter);
3444 arch_spin_unlock(&cpu_buffer->lock); 3444 arch_spin_unlock(&cpu_buffer->lock);
3445 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3445 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3446} 3446}
3447EXPORT_SYMBOL_GPL(ring_buffer_read_start); 3447EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3448 3448
@@ -3477,7 +3477,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3478 unsigned long flags; 3478 unsigned long flags;
3479 3479
3480 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3480 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3481 again: 3481 again:
3482 event = rb_iter_peek(iter, ts); 3482 event = rb_iter_peek(iter, ts);
3483 if (!event) 3483 if (!event)
@@ -3488,7 +3488,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3488 3488
3489 rb_advance_iter(iter); 3489 rb_advance_iter(iter);
3490 out: 3490 out:
3491 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3491 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3492 3492
3493 return event; 3493 return event;
3494} 3494}
@@ -3557,7 +3557,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3557 3557
3558 atomic_inc(&cpu_buffer->record_disabled); 3558 atomic_inc(&cpu_buffer->record_disabled);
3559 3559
3560 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3560 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3561 3561
3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3563 goto out; 3563 goto out;
@@ -3569,7 +3569,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3569 arch_spin_unlock(&cpu_buffer->lock); 3569 arch_spin_unlock(&cpu_buffer->lock);
3570 3570
3571 out: 3571 out:
3572 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3572 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3573 3573
3574 atomic_dec(&cpu_buffer->record_disabled); 3574 atomic_dec(&cpu_buffer->record_disabled);
3575} 3575}
@@ -3607,10 +3607,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
3607 cpu_buffer = buffer->buffers[cpu]; 3607 cpu_buffer = buffer->buffers[cpu];
3608 local_irq_save(flags); 3608 local_irq_save(flags);
3609 if (dolock) 3609 if (dolock)
3610 spin_lock(&cpu_buffer->reader_lock); 3610 raw_spin_lock(&cpu_buffer->reader_lock);
3611 ret = rb_per_cpu_empty(cpu_buffer); 3611 ret = rb_per_cpu_empty(cpu_buffer);
3612 if (dolock) 3612 if (dolock)
3613 spin_unlock(&cpu_buffer->reader_lock); 3613 raw_spin_unlock(&cpu_buffer->reader_lock);
3614 local_irq_restore(flags); 3614 local_irq_restore(flags);
3615 3615
3616 if (!ret) 3616 if (!ret)
@@ -3641,10 +3641,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3641 cpu_buffer = buffer->buffers[cpu]; 3641 cpu_buffer = buffer->buffers[cpu];
3642 local_irq_save(flags); 3642 local_irq_save(flags);
3643 if (dolock) 3643 if (dolock)
3644 spin_lock(&cpu_buffer->reader_lock); 3644 raw_spin_lock(&cpu_buffer->reader_lock);
3645 ret = rb_per_cpu_empty(cpu_buffer); 3645 ret = rb_per_cpu_empty(cpu_buffer);
3646 if (dolock) 3646 if (dolock)
3647 spin_unlock(&cpu_buffer->reader_lock); 3647 raw_spin_unlock(&cpu_buffer->reader_lock);
3648 local_irq_restore(flags); 3648 local_irq_restore(flags);
3649 3649
3650 return ret; 3650 return ret;
@@ -3841,7 +3841,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3841 if (!bpage) 3841 if (!bpage)
3842 goto out; 3842 goto out;
3843 3843
3844 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3844 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3845 3845
3846 reader = rb_get_reader_page(cpu_buffer); 3846 reader = rb_get_reader_page(cpu_buffer);
3847 if (!reader) 3847 if (!reader)
@@ -3964,7 +3964,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3965 3965
3966 out_unlock: 3966 out_unlock:
3967 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3967 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3968 3968
3969 out: 3969 out:
3970 return ret; 3970 return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e5df02c69b1..0c8bdeeb358 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; 341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
342 342
343static int trace_stop_count; 343static int trace_stop_count;
344static DEFINE_SPINLOCK(tracing_start_lock); 344static DEFINE_RAW_SPINLOCK(tracing_start_lock);
345 345
346static void wakeup_work_handler(struct work_struct *work) 346static void wakeup_work_handler(struct work_struct *work)
347{ 347{
@@ -960,7 +960,7 @@ void tracing_start(void)
960 if (tracing_disabled) 960 if (tracing_disabled)
961 return; 961 return;
962 962
963 spin_lock_irqsave(&tracing_start_lock, flags); 963 raw_spin_lock_irqsave(&tracing_start_lock, flags);
964 if (--trace_stop_count) { 964 if (--trace_stop_count) {
965 if (trace_stop_count < 0) { 965 if (trace_stop_count < 0) {
966 /* Someone screwed up their debugging */ 966 /* Someone screwed up their debugging */
@@ -985,7 +985,7 @@ void tracing_start(void)
985 985
986 ftrace_start(); 986 ftrace_start();
987 out: 987 out:
988 spin_unlock_irqrestore(&tracing_start_lock, flags); 988 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
989} 989}
990 990
991/** 991/**
@@ -1000,7 +1000,7 @@ void tracing_stop(void)
1000 unsigned long flags; 1000 unsigned long flags;
1001 1001
1002 ftrace_stop(); 1002 ftrace_stop();
1003 spin_lock_irqsave(&tracing_start_lock, flags); 1003 raw_spin_lock_irqsave(&tracing_start_lock, flags);
1004 if (trace_stop_count++) 1004 if (trace_stop_count++)
1005 goto out; 1005 goto out;
1006 1006
@@ -1018,7 +1018,7 @@ void tracing_stop(void)
1018 arch_spin_unlock(&ftrace_max_lock); 1018 arch_spin_unlock(&ftrace_max_lock);
1019 1019
1020 out: 1020 out:
1021 spin_unlock_irqrestore(&tracing_start_lock, flags); 1021 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1022} 1022}
1023 1023
1024void trace_stop_cmdline_recording(void); 1024void trace_stop_cmdline_recording(void);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 667aa8cc0cf..11186212068 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
23 23
24static DEFINE_PER_CPU(int, tracing_cpu); 24static DEFINE_PER_CPU(int, tracing_cpu);
25 25
26static DEFINE_SPINLOCK(max_trace_lock); 26static DEFINE_RAW_SPINLOCK(max_trace_lock);
27 27
28enum { 28enum {
29 TRACER_IRQS_OFF = (1 << 1), 29 TRACER_IRQS_OFF = (1 << 1),
@@ -321,7 +321,7 @@ check_critical_timing(struct trace_array *tr,
321 if (!report_latency(delta)) 321 if (!report_latency(delta))
322 goto out; 322 goto out;
323 323
324 spin_lock_irqsave(&max_trace_lock, flags); 324 raw_spin_lock_irqsave(&max_trace_lock, flags);
325 325
326 /* check if we are still the max latency */ 326 /* check if we are still the max latency */
327 if (!report_latency(delta)) 327 if (!report_latency(delta))
@@ -344,7 +344,7 @@ check_critical_timing(struct trace_array *tr,
344 max_sequence++; 344 max_sequence++;
345 345
346out_unlock: 346out_unlock:
347 spin_unlock_irqrestore(&max_trace_lock, flags); 347 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
348 348
349out: 349out:
350 data->critical_sequence = max_sequence; 350 data->critical_sequence = max_sequence;