diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-05-01 18:44:45 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-05-05 14:28:08 -0400 |
commit | 778c55d44eb4f5f658915ed631d68ed9d1ac3ad1 (patch) | |
tree | fb410e06820276a499a1ff116e02f0ab8a1f20ad | |
parent | e4906eff9e6fbd2d311abcbcc53d5a531773c982 (diff) |
ring-buffer: record page entries in buffer page descriptor
Currently, when the ring buffer writer overflows the buffer and must
write over non consumed data, we increment the overrun counter by
reading the entries on the page we are about to overwrite. This reads
the entries one by one.
This is not very effecient. This patch adds another entry counter
into each buffer page descriptor that keeps track of the number of
entries on the page. Now on overwrite, the overrun counter simply
needs to add the number of entries that is on the page it is about
to overwrite.
[ Impact: speed up of ring buffer in overwrite mode ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/ring_buffer.c | 39 |
1 files changed, 13 insertions, 26 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c792ea893b01..342eacc4baa8 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -321,9 +321,10 @@ struct buffer_data_page { | |||
321 | }; | 321 | }; |
322 | 322 | ||
323 | struct buffer_page { | 323 | struct buffer_page { |
324 | struct list_head list; /* list of buffer pages */ | ||
324 | local_t write; /* index for next write */ | 325 | local_t write; /* index for next write */ |
325 | unsigned read; /* index for next read */ | 326 | unsigned read; /* index for next read */ |
326 | struct list_head list; /* list of free pages */ | 327 | local_t entries; /* entries on this page */ |
327 | struct buffer_data_page *page; /* Actual data page */ | 328 | struct buffer_data_page *page; /* Actual data page */ |
328 | }; | 329 | }; |
329 | 330 | ||
@@ -977,30 +978,6 @@ static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) | |||
977 | return rb_page_commit(cpu_buffer->head_page); | 978 | return rb_page_commit(cpu_buffer->head_page); |
978 | } | 979 | } |
979 | 980 | ||
980 | /* | ||
981 | * When the tail hits the head and the buffer is in overwrite mode, | ||
982 | * the head jumps to the next page and all content on the previous | ||
983 | * page is discarded. But before doing so, we update the overrun | ||
984 | * variable of the buffer. | ||
985 | */ | ||
986 | static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | ||
987 | { | ||
988 | struct ring_buffer_event *event; | ||
989 | unsigned long head; | ||
990 | |||
991 | for (head = 0; head < rb_head_size(cpu_buffer); | ||
992 | head += rb_event_length(event)) { | ||
993 | |||
994 | event = __rb_page_index(cpu_buffer->head_page, head); | ||
995 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) | ||
996 | return; | ||
997 | /* Only count data entries */ | ||
998 | if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | ||
999 | continue; | ||
1000 | cpu_buffer->overrun++; | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, | 981 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, |
1005 | struct buffer_page **bpage) | 982 | struct buffer_page **bpage) |
1006 | { | 983 | { |
@@ -1253,7 +1230,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1253 | /* tail_page has not moved yet? */ | 1230 | /* tail_page has not moved yet? */ |
1254 | if (tail_page == cpu_buffer->tail_page) { | 1231 | if (tail_page == cpu_buffer->tail_page) { |
1255 | /* count overflows */ | 1232 | /* count overflows */ |
1256 | rb_update_overflow(cpu_buffer); | 1233 | cpu_buffer->overrun += |
1234 | local_read(&head_page->entries); | ||
1257 | 1235 | ||
1258 | rb_inc_page(cpu_buffer, &head_page); | 1236 | rb_inc_page(cpu_buffer, &head_page); |
1259 | cpu_buffer->head_page = head_page; | 1237 | cpu_buffer->head_page = head_page; |
@@ -1268,6 +1246,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1268 | */ | 1246 | */ |
1269 | if (tail_page == cpu_buffer->tail_page) { | 1247 | if (tail_page == cpu_buffer->tail_page) { |
1270 | local_set(&next_page->write, 0); | 1248 | local_set(&next_page->write, 0); |
1249 | local_set(&next_page->entries, 0); | ||
1271 | local_set(&next_page->page->commit, 0); | 1250 | local_set(&next_page->page->commit, 0); |
1272 | cpu_buffer->tail_page = next_page; | 1251 | cpu_buffer->tail_page = next_page; |
1273 | 1252 | ||
@@ -1313,6 +1292,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1313 | event = __rb_page_index(tail_page, tail); | 1292 | event = __rb_page_index(tail_page, tail); |
1314 | rb_update_event(event, type, length); | 1293 | rb_update_event(event, type, length); |
1315 | 1294 | ||
1295 | /* The passed in type is zero for DATA */ | ||
1296 | if (likely(!type)) | ||
1297 | local_inc(&tail_page->entries); | ||
1298 | |||
1316 | /* | 1299 | /* |
1317 | * If this is a commit and the tail is zero, then update | 1300 | * If this is a commit and the tail is zero, then update |
1318 | * this page's time stamp. | 1301 | * this page's time stamp. |
@@ -2183,6 +2166,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2183 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 2166 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
2184 | 2167 | ||
2185 | local_set(&cpu_buffer->reader_page->write, 0); | 2168 | local_set(&cpu_buffer->reader_page->write, 0); |
2169 | local_set(&cpu_buffer->reader_page->entries, 0); | ||
2186 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 2170 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2187 | 2171 | ||
2188 | /* Make the reader page now replace the head */ | 2172 | /* Make the reader page now replace the head */ |
@@ -2629,6 +2613,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2629 | cpu_buffer->head_page | 2613 | cpu_buffer->head_page |
2630 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); | 2614 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
2631 | local_set(&cpu_buffer->head_page->write, 0); | 2615 | local_set(&cpu_buffer->head_page->write, 0); |
2616 | local_set(&cpu_buffer->head_page->entries, 0); | ||
2632 | local_set(&cpu_buffer->head_page->page->commit, 0); | 2617 | local_set(&cpu_buffer->head_page->page->commit, 0); |
2633 | 2618 | ||
2634 | cpu_buffer->head_page->read = 0; | 2619 | cpu_buffer->head_page->read = 0; |
@@ -2638,6 +2623,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2638 | 2623 | ||
2639 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 2624 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
2640 | local_set(&cpu_buffer->reader_page->write, 0); | 2625 | local_set(&cpu_buffer->reader_page->write, 0); |
2626 | local_set(&cpu_buffer->reader_page->entries, 0); | ||
2641 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 2627 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2642 | cpu_buffer->reader_page->read = 0; | 2628 | cpu_buffer->reader_page->read = 0; |
2643 | 2629 | ||
@@ -2996,6 +2982,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2996 | bpage = reader->page; | 2982 | bpage = reader->page; |
2997 | reader->page = *data_page; | 2983 | reader->page = *data_page; |
2998 | local_set(&reader->write, 0); | 2984 | local_set(&reader->write, 0); |
2985 | local_set(&reader->entries, 0); | ||
2999 | reader->read = 0; | 2986 | reader->read = 0; |
3000 | *data_page = bpage; | 2987 | *data_page = bpage; |
3001 | 2988 | ||