aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-03-31 22:11:42 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-03-31 22:57:08 -0400
commitff0ff84a0767df48d728c36510365344a7e7d582 (patch)
tree9f32396f4b5a53a3b55429a257c0630dffc8176e /kernel/trace
parentbc21b478425ac73f66a5ec0b375a5e0d12d609ce (diff)
ring-buffer: Add lost event count to end of sub buffer
Currently, binary readers of the ring buffer only know where events were lost, but not how many events were lost at that location. This information is available, but it would require adding another field to the sub buffer header to include it. But when a event can not fit at the end of a sub buffer, it is written to the next sub buffer. This means there is a good chance that the buffer may have room to hold this counter. If it does, write the counter at the end of the sub buffer and set another flag in the data size field that states that this information exists. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c37
1 files changed, 33 insertions, 4 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8295650444c5..dc6d563a6d22 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -320,6 +320,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
320 320
321/* Flag when events were overwritten */ 321/* Flag when events were overwritten */
322#define RB_MISSED_EVENTS (1 << 31) 322#define RB_MISSED_EVENTS (1 << 31)
323/* Missed count stored at end */
324#define RB_MISSED_STORED (1 << 30)
323 325
324struct buffer_data_page { 326struct buffer_data_page {
325 u64 time_stamp; /* page time stamp */ 327 u64 time_stamp; /* page time stamp */
@@ -340,6 +342,7 @@ struct buffer_page {
340 local_t write; /* index for next write */ 342 local_t write; /* index for next write */
341 unsigned read; /* index for next read */ 343 unsigned read; /* index for next read */
342 local_t entries; /* entries on this page */ 344 local_t entries; /* entries on this page */
345 unsigned long real_end; /* real end of data */
343 struct buffer_data_page *page; /* Actual data page */ 346 struct buffer_data_page *page; /* Actual data page */
344}; 347};
345 348
@@ -1770,6 +1773,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1770 kmemcheck_annotate_bitfield(event, bitfield); 1773 kmemcheck_annotate_bitfield(event, bitfield);
1771 1774
1772 /* 1775 /*
1776 * Save the original length to the meta data.
1777 * This will be used by the reader to add lost event
1778 * counter.
1779 */
1780 tail_page->real_end = tail;
1781
1782 /*
1773 * If this event is bigger than the minimum size, then 1783 * If this event is bigger than the minimum size, then
1774 * we need to be careful that we don't subtract the 1784 * we need to be careful that we don't subtract the
1775 * write counter enough to allow another writer to slip 1785 * write counter enough to allow another writer to slip
@@ -2888,6 +2898,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2888 local_set(&cpu_buffer->reader_page->write, 0); 2898 local_set(&cpu_buffer->reader_page->write, 0);
2889 local_set(&cpu_buffer->reader_page->entries, 0); 2899 local_set(&cpu_buffer->reader_page->entries, 0);
2890 local_set(&cpu_buffer->reader_page->page->commit, 0); 2900 local_set(&cpu_buffer->reader_page->page->commit, 0);
2901 cpu_buffer->reader_page->real_end = 0;
2891 2902
2892 spin: 2903 spin:
2893 /* 2904 /*
@@ -3728,11 +3739,11 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3728 struct ring_buffer_event *event; 3739 struct ring_buffer_event *event;
3729 struct buffer_data_page *bpage; 3740 struct buffer_data_page *bpage;
3730 struct buffer_page *reader; 3741 struct buffer_page *reader;
3742 unsigned long missed_events;
3731 unsigned long flags; 3743 unsigned long flags;
3732 unsigned int commit; 3744 unsigned int commit;
3733 unsigned int read; 3745 unsigned int read;
3734 u64 save_timestamp; 3746 u64 save_timestamp;
3735 int missed_events = 0;
3736 int ret = -1; 3747 int ret = -1;
3737 3748
3738 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3749 if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -3766,8 +3777,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3766 commit = rb_page_commit(reader); 3777 commit = rb_page_commit(reader);
3767 3778
3768 /* Check if any events were dropped */ 3779 /* Check if any events were dropped */
3769 if (cpu_buffer->lost_events) 3780 missed_events = cpu_buffer->lost_events;
3770 missed_events = 1;
3771 3781
3772 /* 3782 /*
3773 * If this page has been partially read or 3783 * If this page has been partially read or
@@ -3829,6 +3839,14 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3829 local_set(&reader->entries, 0); 3839 local_set(&reader->entries, 0);
3830 reader->read = 0; 3840 reader->read = 0;
3831 *data_page = bpage; 3841 *data_page = bpage;
3842
3843 /*
3844 * Use the real_end for the data size,
3845 * This gives us a chance to store the lost events
3846 * on the page.
3847 */
3848 if (reader->real_end)
3849 local_set(&bpage->commit, reader->real_end);
3832 } 3850 }
3833 ret = read; 3851 ret = read;
3834 3852
@@ -3836,8 +3854,19 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3836 /* 3854 /*
3837 * Set a flag in the commit field if we lost events 3855 * Set a flag in the commit field if we lost events
3838 */ 3856 */
3839 if (missed_events) 3857 if (missed_events) {
3858 commit = local_read(&bpage->commit);
3859
3860 /* If there is room at the end of the page to save the
3861 * missed events, then record it there.
3862 */
3863 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
3864 memcpy(&bpage->data[commit], &missed_events,
3865 sizeof(missed_events));
3866 local_add(RB_MISSED_STORED, &bpage->commit);
3867 }
3840 local_add(RB_MISSED_EVENTS, &bpage->commit); 3868 local_add(RB_MISSED_EVENTS, &bpage->commit);
3869 }
3841 3870
3842 out_unlock: 3871 out_unlock:
3843 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3872 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);