aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c122
1 files changed, 95 insertions, 27 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 731201bf4acc..f5b7b5c1195b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
478 int cpu; 478 int cpu;
479 atomic_t record_disabled; 479 atomic_t record_disabled;
480 struct ring_buffer *buffer; 480 struct ring_buffer *buffer;
481 spinlock_t reader_lock; /* serialize readers */ 481 raw_spinlock_t reader_lock; /* serialize readers */
482 arch_spinlock_t lock; 482 arch_spinlock_t lock;
483 struct lock_class_key lock_key; 483 struct lock_class_key lock_key;
484 struct list_head *pages; 484 struct list_head *pages;
@@ -488,12 +488,14 @@ struct ring_buffer_per_cpu {
488 struct buffer_page *reader_page; 488 struct buffer_page *reader_page;
489 unsigned long lost_events; 489 unsigned long lost_events;
490 unsigned long last_overrun; 490 unsigned long last_overrun;
491 local_t entries_bytes;
491 local_t commit_overrun; 492 local_t commit_overrun;
492 local_t overrun; 493 local_t overrun;
493 local_t entries; 494 local_t entries;
494 local_t committing; 495 local_t committing;
495 local_t commits; 496 local_t commits;
496 unsigned long read; 497 unsigned long read;
498 unsigned long read_bytes;
497 u64 write_stamp; 499 u64 write_stamp;
498 u64 read_stamp; 500 u64 read_stamp;
499}; 501};
@@ -1062,7 +1064,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1062 1064
1063 cpu_buffer->cpu = cpu; 1065 cpu_buffer->cpu = cpu;
1064 cpu_buffer->buffer = buffer; 1066 cpu_buffer->buffer = buffer;
1065 spin_lock_init(&cpu_buffer->reader_lock); 1067 raw_spin_lock_init(&cpu_buffer->reader_lock);
1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1068 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1069 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1068 1070
@@ -1259,7 +1261,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1259 struct list_head *p; 1261 struct list_head *p;
1260 unsigned i; 1262 unsigned i;
1261 1263
1262 spin_lock_irq(&cpu_buffer->reader_lock); 1264 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1263 rb_head_page_deactivate(cpu_buffer); 1265 rb_head_page_deactivate(cpu_buffer);
1264 1266
1265 for (i = 0; i < nr_pages; i++) { 1267 for (i = 0; i < nr_pages; i++) {
@@ -1277,7 +1279,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1277 rb_check_pages(cpu_buffer); 1279 rb_check_pages(cpu_buffer);
1278 1280
1279out: 1281out:
1280 spin_unlock_irq(&cpu_buffer->reader_lock); 1282 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1281} 1283}
1282 1284
1283static void 1285static void
@@ -1288,7 +1290,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1288 struct list_head *p; 1290 struct list_head *p;
1289 unsigned i; 1291 unsigned i;
1290 1292
1291 spin_lock_irq(&cpu_buffer->reader_lock); 1293 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1292 rb_head_page_deactivate(cpu_buffer); 1294 rb_head_page_deactivate(cpu_buffer);
1293 1295
1294 for (i = 0; i < nr_pages; i++) { 1296 for (i = 0; i < nr_pages; i++) {
@@ -1303,7 +1305,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1303 rb_check_pages(cpu_buffer); 1305 rb_check_pages(cpu_buffer);
1304 1306
1305out: 1307out:
1306 spin_unlock_irq(&cpu_buffer->reader_lock); 1308 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1307} 1309}
1308 1310
1309/** 1311/**
@@ -1708,6 +1710,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1708 * the counters. 1710 * the counters.
1709 */ 1711 */
1710 local_add(entries, &cpu_buffer->overrun); 1712 local_add(entries, &cpu_buffer->overrun);
1713 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1711 1714
1712 /* 1715 /*
1713 * The entries will be zeroed out when we move the 1716 * The entries will be zeroed out when we move the
@@ -1863,6 +1866,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1863 event = __rb_page_index(tail_page, tail); 1866 event = __rb_page_index(tail_page, tail);
1864 kmemcheck_annotate_bitfield(event, bitfield); 1867 kmemcheck_annotate_bitfield(event, bitfield);
1865 1868
1869 /* account for padding bytes */
1870 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
1871
1866 /* 1872 /*
1867 * Save the original length to the meta data. 1873 * Save the original length to the meta data.
1868 * This will be used by the reader to add lost event 1874 * This will be used by the reader to add lost event
@@ -2054,6 +2060,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2054 if (!tail) 2060 if (!tail)
2055 tail_page->page->time_stamp = ts; 2061 tail_page->page->time_stamp = ts;
2056 2062
2063 /* account for these added bytes */
2064 local_add(length, &cpu_buffer->entries_bytes);
2065
2057 return event; 2066 return event;
2058} 2067}
2059 2068
@@ -2076,6 +2085,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2076 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 2085 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2077 unsigned long write_mask = 2086 unsigned long write_mask =
2078 local_read(&bpage->write) & ~RB_WRITE_MASK; 2087 local_read(&bpage->write) & ~RB_WRITE_MASK;
2088 unsigned long event_length = rb_event_length(event);
2079 /* 2089 /*
2080 * This is on the tail page. It is possible that 2090 * This is on the tail page. It is possible that
2081 * a write could come in and move the tail page 2091 * a write could come in and move the tail page
@@ -2085,8 +2095,11 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2085 old_index += write_mask; 2095 old_index += write_mask;
2086 new_index += write_mask; 2096 new_index += write_mask;
2087 index = local_cmpxchg(&bpage->write, old_index, new_index); 2097 index = local_cmpxchg(&bpage->write, old_index, new_index);
2088 if (index == old_index) 2098 if (index == old_index) {
2099 /* update counters */
2100 local_sub(event_length, &cpu_buffer->entries_bytes);
2089 return 1; 2101 return 1;
2102 }
2090 } 2103 }
2091 2104
2092 /* could not discard */ 2105 /* could not discard */
@@ -2661,6 +2674,58 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2661} 2674}
2662 2675
2663/** 2676/**
2677 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2678 * @buffer: The ring buffer
2679 * @cpu: The per CPU buffer to read from.
2680 */
2681unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2682{
2683 unsigned long flags;
2684 struct ring_buffer_per_cpu *cpu_buffer;
2685 struct buffer_page *bpage;
2686 unsigned long ret;
2687
2688 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2689 return 0;
2690
2691 cpu_buffer = buffer->buffers[cpu];
2692 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2693 /*
2694 * if the tail is on reader_page, oldest time stamp is on the reader
2695 * page
2696 */
2697 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2698 bpage = cpu_buffer->reader_page;
2699 else
2700 bpage = rb_set_head_page(cpu_buffer);
2701 ret = bpage->page->time_stamp;
2702 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2703
2704 return ret;
2705}
2706EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
2707
2708/**
2709 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
2710 * @buffer: The ring buffer
2711 * @cpu: The per CPU buffer to read from.
2712 */
2713unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
2714{
2715 struct ring_buffer_per_cpu *cpu_buffer;
2716 unsigned long ret;
2717
2718 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2719 return 0;
2720
2721 cpu_buffer = buffer->buffers[cpu];
2722 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
2723
2724 return ret;
2725}
2726EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
2727
2728/**
2664 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 2729 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2665 * @buffer: The ring buffer 2730 * @buffer: The ring buffer
2666 * @cpu: The per CPU buffer to get the entries from. 2731 * @cpu: The per CPU buffer to get the entries from.
@@ -2804,9 +2869,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2804 2869
2805 cpu_buffer = iter->cpu_buffer; 2870 cpu_buffer = iter->cpu_buffer;
2806 2871
2807 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2872 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2808 rb_iter_reset(iter); 2873 rb_iter_reset(iter);
2809 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2874 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2810} 2875}
2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 2876EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2812 2877
@@ -3265,12 +3330,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3265 again: 3330 again:
3266 local_irq_save(flags); 3331 local_irq_save(flags);
3267 if (dolock) 3332 if (dolock)
3268 spin_lock(&cpu_buffer->reader_lock); 3333 raw_spin_lock(&cpu_buffer->reader_lock);
3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3334 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3270 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3335 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3271 rb_advance_reader(cpu_buffer); 3336 rb_advance_reader(cpu_buffer);
3272 if (dolock) 3337 if (dolock)
3273 spin_unlock(&cpu_buffer->reader_lock); 3338 raw_spin_unlock(&cpu_buffer->reader_lock);
3274 local_irq_restore(flags); 3339 local_irq_restore(flags);
3275 3340
3276 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3341 if (event && event->type_len == RINGBUF_TYPE_PADDING)
@@ -3295,9 +3360,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3295 unsigned long flags; 3360 unsigned long flags;
3296 3361
3297 again: 3362 again:
3298 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3363 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3299 event = rb_iter_peek(iter, ts); 3364 event = rb_iter_peek(iter, ts);
3300 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3365 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3301 3366
3302 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3367 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3303 goto again; 3368 goto again;
@@ -3337,7 +3402,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3337 cpu_buffer = buffer->buffers[cpu]; 3402 cpu_buffer = buffer->buffers[cpu];
3338 local_irq_save(flags); 3403 local_irq_save(flags);
3339 if (dolock) 3404 if (dolock)
3340 spin_lock(&cpu_buffer->reader_lock); 3405 raw_spin_lock(&cpu_buffer->reader_lock);
3341 3406
3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3407 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3343 if (event) { 3408 if (event) {
@@ -3346,7 +3411,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3346 } 3411 }
3347 3412
3348 if (dolock) 3413 if (dolock)
3349 spin_unlock(&cpu_buffer->reader_lock); 3414 raw_spin_unlock(&cpu_buffer->reader_lock);
3350 local_irq_restore(flags); 3415 local_irq_restore(flags);
3351 3416
3352 out: 3417 out:
@@ -3438,11 +3503,11 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
3438 3503
3439 cpu_buffer = iter->cpu_buffer; 3504 cpu_buffer = iter->cpu_buffer;
3440 3505
3441 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3506 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3442 arch_spin_lock(&cpu_buffer->lock); 3507 arch_spin_lock(&cpu_buffer->lock);
3443 rb_iter_reset(iter); 3508 rb_iter_reset(iter);
3444 arch_spin_unlock(&cpu_buffer->lock); 3509 arch_spin_unlock(&cpu_buffer->lock);
3445 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3510 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3446} 3511}
3447EXPORT_SYMBOL_GPL(ring_buffer_read_start); 3512EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3448 3513
@@ -3477,7 +3542,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3542 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3478 unsigned long flags; 3543 unsigned long flags;
3479 3544
3480 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3545 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3481 again: 3546 again:
3482 event = rb_iter_peek(iter, ts); 3547 event = rb_iter_peek(iter, ts);
3483 if (!event) 3548 if (!event)
@@ -3488,7 +3553,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3488 3553
3489 rb_advance_iter(iter); 3554 rb_advance_iter(iter);
3490 out: 3555 out:
3491 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3556 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3492 3557
3493 return event; 3558 return event;
3494} 3559}
@@ -3527,11 +3592,13 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3527 cpu_buffer->reader_page->read = 0; 3592 cpu_buffer->reader_page->read = 0;
3528 3593
3529 local_set(&cpu_buffer->commit_overrun, 0); 3594 local_set(&cpu_buffer->commit_overrun, 0);
3595 local_set(&cpu_buffer->entries_bytes, 0);
3530 local_set(&cpu_buffer->overrun, 0); 3596 local_set(&cpu_buffer->overrun, 0);
3531 local_set(&cpu_buffer->entries, 0); 3597 local_set(&cpu_buffer->entries, 0);
3532 local_set(&cpu_buffer->committing, 0); 3598 local_set(&cpu_buffer->committing, 0);
3533 local_set(&cpu_buffer->commits, 0); 3599 local_set(&cpu_buffer->commits, 0);
3534 cpu_buffer->read = 0; 3600 cpu_buffer->read = 0;
3601 cpu_buffer->read_bytes = 0;
3535 3602
3536 cpu_buffer->write_stamp = 0; 3603 cpu_buffer->write_stamp = 0;
3537 cpu_buffer->read_stamp = 0; 3604 cpu_buffer->read_stamp = 0;
@@ -3557,7 +3624,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3557 3624
3558 atomic_inc(&cpu_buffer->record_disabled); 3625 atomic_inc(&cpu_buffer->record_disabled);
3559 3626
3560 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3627 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3561 3628
3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3629 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3563 goto out; 3630 goto out;
@@ -3569,7 +3636,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3569 arch_spin_unlock(&cpu_buffer->lock); 3636 arch_spin_unlock(&cpu_buffer->lock);
3570 3637
3571 out: 3638 out:
3572 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3639 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3573 3640
3574 atomic_dec(&cpu_buffer->record_disabled); 3641 atomic_dec(&cpu_buffer->record_disabled);
3575} 3642}
@@ -3607,10 +3674,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
3607 cpu_buffer = buffer->buffers[cpu]; 3674 cpu_buffer = buffer->buffers[cpu];
3608 local_irq_save(flags); 3675 local_irq_save(flags);
3609 if (dolock) 3676 if (dolock)
3610 spin_lock(&cpu_buffer->reader_lock); 3677 raw_spin_lock(&cpu_buffer->reader_lock);
3611 ret = rb_per_cpu_empty(cpu_buffer); 3678 ret = rb_per_cpu_empty(cpu_buffer);
3612 if (dolock) 3679 if (dolock)
3613 spin_unlock(&cpu_buffer->reader_lock); 3680 raw_spin_unlock(&cpu_buffer->reader_lock);
3614 local_irq_restore(flags); 3681 local_irq_restore(flags);
3615 3682
3616 if (!ret) 3683 if (!ret)
@@ -3641,10 +3708,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3641 cpu_buffer = buffer->buffers[cpu]; 3708 cpu_buffer = buffer->buffers[cpu];
3642 local_irq_save(flags); 3709 local_irq_save(flags);
3643 if (dolock) 3710 if (dolock)
3644 spin_lock(&cpu_buffer->reader_lock); 3711 raw_spin_lock(&cpu_buffer->reader_lock);
3645 ret = rb_per_cpu_empty(cpu_buffer); 3712 ret = rb_per_cpu_empty(cpu_buffer);
3646 if (dolock) 3713 if (dolock)
3647 spin_unlock(&cpu_buffer->reader_lock); 3714 raw_spin_unlock(&cpu_buffer->reader_lock);
3648 local_irq_restore(flags); 3715 local_irq_restore(flags);
3649 3716
3650 return ret; 3717 return ret;
@@ -3841,7 +3908,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3841 if (!bpage) 3908 if (!bpage)
3842 goto out; 3909 goto out;
3843 3910
3844 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3911 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3845 3912
3846 reader = rb_get_reader_page(cpu_buffer); 3913 reader = rb_get_reader_page(cpu_buffer);
3847 if (!reader) 3914 if (!reader)
@@ -3918,6 +3985,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3918 } else { 3985 } else {
3919 /* update the entry counter */ 3986 /* update the entry counter */
3920 cpu_buffer->read += rb_page_entries(reader); 3987 cpu_buffer->read += rb_page_entries(reader);
3988 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
3921 3989
3922 /* swap the pages */ 3990 /* swap the pages */
3923 rb_init_page(bpage); 3991 rb_init_page(bpage);
@@ -3964,7 +4032,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 4032 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3965 4033
3966 out_unlock: 4034 out_unlock:
3967 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4035 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3968 4036
3969 out: 4037 out:
3970 return ret; 4038 return ret;