diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 65 |
1 files changed, 36 insertions, 29 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 54191d6ed195..850918a4a8ee 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -422,7 +422,7 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
422 | struct ring_buffer_per_cpu { | 422 | struct ring_buffer_per_cpu { |
423 | int cpu; | 423 | int cpu; |
424 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
425 | spinlock_t reader_lock; /* serialize readers */ | 425 | raw_spinlock_t reader_lock; /* serialize readers */ |
426 | arch_spinlock_t lock; | 426 | arch_spinlock_t lock; |
427 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
428 | struct list_head *pages; | 428 | struct list_head *pages; |
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
998 | 998 | ||
999 | cpu_buffer->cpu = cpu; | 999 | cpu_buffer->cpu = cpu; |
1000 | cpu_buffer->buffer = buffer; | 1000 | cpu_buffer->buffer = buffer; |
1001 | spin_lock_init(&cpu_buffer->reader_lock); | 1001 | raw_spin_lock_init(&cpu_buffer->reader_lock); |
1002 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1002 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
1003 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 1003 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1004 | 1004 | ||
@@ -1191,11 +1191,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | |||
1191 | static void | 1191 | static void |
1192 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 1192 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) |
1193 | { | 1193 | { |
1194 | struct buffer_page *bpage; | 1194 | struct buffer_page *bpage, *tmp; |
1195 | struct list_head *p; | 1195 | struct list_head *p; |
1196 | LIST_HEAD(tofree); | ||
1196 | unsigned i; | 1197 | unsigned i; |
1197 | 1198 | ||
1198 | spin_lock_irq(&cpu_buffer->reader_lock); | 1199 | raw_spin_lock_irq(&cpu_buffer->reader_lock); |
1199 | rb_head_page_deactivate(cpu_buffer); | 1200 | rb_head_page_deactivate(cpu_buffer); |
1200 | 1201 | ||
1201 | for (i = 0; i < nr_pages; i++) { | 1202 | for (i = 0; i < nr_pages; i++) { |
@@ -1203,8 +1204,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1203 | return; | 1204 | return; |
1204 | p = cpu_buffer->pages->next; | 1205 | p = cpu_buffer->pages->next; |
1205 | bpage = list_entry(p, struct buffer_page, list); | 1206 | bpage = list_entry(p, struct buffer_page, list); |
1206 | list_del_init(&bpage->list); | 1207 | list_del(&bpage->list); |
1207 | free_buffer_page(bpage); | 1208 | list_add(&bpage->list, &tofree); |
1208 | } | 1209 | } |
1209 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1210 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1210 | return; | 1211 | return; |
@@ -1212,7 +1213,13 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1212 | rb_reset_cpu(cpu_buffer); | 1213 | rb_reset_cpu(cpu_buffer); |
1213 | rb_check_pages(cpu_buffer); | 1214 | rb_check_pages(cpu_buffer); |
1214 | 1215 | ||
1215 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1216 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); |
1217 | |||
1218 | list_for_each_entry_safe(bpage, tmp, &tofree, list) { | ||
1219 | list_del_init(&bpage->list); | ||
1220 | free_buffer_page(bpage); | ||
1221 | } | ||
1222 | |||
1216 | } | 1223 | } |
1217 | 1224 | ||
1218 | static void | 1225 | static void |
@@ -1223,7 +1230,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1223 | struct list_head *p; | 1230 | struct list_head *p; |
1224 | unsigned i; | 1231 | unsigned i; |
1225 | 1232 | ||
1226 | spin_lock_irq(&cpu_buffer->reader_lock); | 1233 | raw_spin_lock_irq(&cpu_buffer->reader_lock); |
1227 | rb_head_page_deactivate(cpu_buffer); | 1234 | rb_head_page_deactivate(cpu_buffer); |
1228 | 1235 | ||
1229 | for (i = 0; i < nr_pages; i++) { | 1236 | for (i = 0; i < nr_pages; i++) { |
@@ -1237,7 +1244,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1237 | rb_reset_cpu(cpu_buffer); | 1244 | rb_reset_cpu(cpu_buffer); |
1238 | rb_check_pages(cpu_buffer); | 1245 | rb_check_pages(cpu_buffer); |
1239 | 1246 | ||
1240 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1247 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); |
1241 | } | 1248 | } |
1242 | 1249 | ||
1243 | /** | 1250 | /** |
@@ -2739,9 +2746,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
2739 | 2746 | ||
2740 | cpu_buffer = iter->cpu_buffer; | 2747 | cpu_buffer = iter->cpu_buffer; |
2741 | 2748 | ||
2742 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2749 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2743 | rb_iter_reset(iter); | 2750 | rb_iter_reset(iter); |
2744 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2751 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2745 | } | 2752 | } |
2746 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | 2753 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); |
2747 | 2754 | ||
@@ -3175,12 +3182,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3175 | again: | 3182 | again: |
3176 | local_irq_save(flags); | 3183 | local_irq_save(flags); |
3177 | if (dolock) | 3184 | if (dolock) |
3178 | spin_lock(&cpu_buffer->reader_lock); | 3185 | raw_spin_lock(&cpu_buffer->reader_lock); |
3179 | event = rb_buffer_peek(cpu_buffer, ts); | 3186 | event = rb_buffer_peek(cpu_buffer, ts); |
3180 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3187 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3181 | rb_advance_reader(cpu_buffer); | 3188 | rb_advance_reader(cpu_buffer); |
3182 | if (dolock) | 3189 | if (dolock) |
3183 | spin_unlock(&cpu_buffer->reader_lock); | 3190 | raw_spin_unlock(&cpu_buffer->reader_lock); |
3184 | local_irq_restore(flags); | 3191 | local_irq_restore(flags); |
3185 | 3192 | ||
3186 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3193 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
@@ -3205,9 +3212,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3205 | unsigned long flags; | 3212 | unsigned long flags; |
3206 | 3213 | ||
3207 | again: | 3214 | again: |
3208 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3215 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3209 | event = rb_iter_peek(iter, ts); | 3216 | event = rb_iter_peek(iter, ts); |
3210 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3217 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3211 | 3218 | ||
3212 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3219 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3213 | goto again; | 3220 | goto again; |
@@ -3243,14 +3250,14 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3243 | cpu_buffer = buffer->buffers[cpu]; | 3250 | cpu_buffer = buffer->buffers[cpu]; |
3244 | local_irq_save(flags); | 3251 | local_irq_save(flags); |
3245 | if (dolock) | 3252 | if (dolock) |
3246 | spin_lock(&cpu_buffer->reader_lock); | 3253 | raw_spin_lock(&cpu_buffer->reader_lock); |
3247 | 3254 | ||
3248 | event = rb_buffer_peek(cpu_buffer, ts); | 3255 | event = rb_buffer_peek(cpu_buffer, ts); |
3249 | if (event) | 3256 | if (event) |
3250 | rb_advance_reader(cpu_buffer); | 3257 | rb_advance_reader(cpu_buffer); |
3251 | 3258 | ||
3252 | if (dolock) | 3259 | if (dolock) |
3253 | spin_unlock(&cpu_buffer->reader_lock); | 3260 | raw_spin_unlock(&cpu_buffer->reader_lock); |
3254 | local_irq_restore(flags); | 3261 | local_irq_restore(flags); |
3255 | 3262 | ||
3256 | out: | 3263 | out: |
@@ -3296,11 +3303,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
3296 | atomic_inc(&cpu_buffer->record_disabled); | 3303 | atomic_inc(&cpu_buffer->record_disabled); |
3297 | synchronize_sched(); | 3304 | synchronize_sched(); |
3298 | 3305 | ||
3299 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3306 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3300 | arch_spin_lock(&cpu_buffer->lock); | 3307 | arch_spin_lock(&cpu_buffer->lock); |
3301 | rb_iter_reset(iter); | 3308 | rb_iter_reset(iter); |
3302 | arch_spin_unlock(&cpu_buffer->lock); | 3309 | arch_spin_unlock(&cpu_buffer->lock); |
3303 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3310 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3304 | 3311 | ||
3305 | return iter; | 3312 | return iter; |
3306 | } | 3313 | } |
@@ -3337,7 +3344,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
3337 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 3344 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
3338 | unsigned long flags; | 3345 | unsigned long flags; |
3339 | 3346 | ||
3340 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3347 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3341 | again: | 3348 | again: |
3342 | event = rb_iter_peek(iter, ts); | 3349 | event = rb_iter_peek(iter, ts); |
3343 | if (!event) | 3350 | if (!event) |
@@ -3348,7 +3355,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
3348 | 3355 | ||
3349 | rb_advance_iter(iter); | 3356 | rb_advance_iter(iter); |
3350 | out: | 3357 | out: |
3351 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3358 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3352 | 3359 | ||
3353 | return event; | 3360 | return event; |
3354 | } | 3361 | } |
@@ -3414,7 +3421,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3414 | 3421 | ||
3415 | atomic_inc(&cpu_buffer->record_disabled); | 3422 | atomic_inc(&cpu_buffer->record_disabled); |
3416 | 3423 | ||
3417 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3424 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3418 | 3425 | ||
3419 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3426 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
3420 | goto out; | 3427 | goto out; |
@@ -3426,7 +3433,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3426 | arch_spin_unlock(&cpu_buffer->lock); | 3433 | arch_spin_unlock(&cpu_buffer->lock); |
3427 | 3434 | ||
3428 | out: | 3435 | out: |
3429 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3436 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3430 | 3437 | ||
3431 | atomic_dec(&cpu_buffer->record_disabled); | 3438 | atomic_dec(&cpu_buffer->record_disabled); |
3432 | } | 3439 | } |
@@ -3464,10 +3471,10 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
3464 | cpu_buffer = buffer->buffers[cpu]; | 3471 | cpu_buffer = buffer->buffers[cpu]; |
3465 | local_irq_save(flags); | 3472 | local_irq_save(flags); |
3466 | if (dolock) | 3473 | if (dolock) |
3467 | spin_lock(&cpu_buffer->reader_lock); | 3474 | raw_spin_lock(&cpu_buffer->reader_lock); |
3468 | ret = rb_per_cpu_empty(cpu_buffer); | 3475 | ret = rb_per_cpu_empty(cpu_buffer); |
3469 | if (dolock) | 3476 | if (dolock) |
3470 | spin_unlock(&cpu_buffer->reader_lock); | 3477 | raw_spin_unlock(&cpu_buffer->reader_lock); |
3471 | local_irq_restore(flags); | 3478 | local_irq_restore(flags); |
3472 | 3479 | ||
3473 | if (!ret) | 3480 | if (!ret) |
@@ -3498,10 +3505,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
3498 | cpu_buffer = buffer->buffers[cpu]; | 3505 | cpu_buffer = buffer->buffers[cpu]; |
3499 | local_irq_save(flags); | 3506 | local_irq_save(flags); |
3500 | if (dolock) | 3507 | if (dolock) |
3501 | spin_lock(&cpu_buffer->reader_lock); | 3508 | raw_spin_lock(&cpu_buffer->reader_lock); |
3502 | ret = rb_per_cpu_empty(cpu_buffer); | 3509 | ret = rb_per_cpu_empty(cpu_buffer); |
3503 | if (dolock) | 3510 | if (dolock) |
3504 | spin_unlock(&cpu_buffer->reader_lock); | 3511 | raw_spin_unlock(&cpu_buffer->reader_lock); |
3505 | local_irq_restore(flags); | 3512 | local_irq_restore(flags); |
3506 | 3513 | ||
3507 | return ret; | 3514 | return ret; |
@@ -3696,7 +3703,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3696 | if (!bpage) | 3703 | if (!bpage) |
3697 | goto out; | 3704 | goto out; |
3698 | 3705 | ||
3699 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3706 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3700 | 3707 | ||
3701 | reader = rb_get_reader_page(cpu_buffer); | 3708 | reader = rb_get_reader_page(cpu_buffer); |
3702 | if (!reader) | 3709 | if (!reader) |
@@ -3771,7 +3778,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3771 | ret = read; | 3778 | ret = read; |
3772 | 3779 | ||
3773 | out_unlock: | 3780 | out_unlock: |
3774 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3781 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3775 | 3782 | ||
3776 | out: | 3783 | out: |
3777 | return ret; | 3784 | return ret; |