aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-04 02:00:58 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-14 04:39:18 -0400
commit6f807acd27734197b11d42829d3cbb9c0937b572 (patch)
tree3587f14a894f05a607cc71fd1ee4e63a3eb3b679 /kernel
parent097d036a2f25eecc42435c57e010aaf4a2eed2d9 (diff)
ring-buffer: move page indexes into page headers
Remove the global head and tail indexes and move them into the page header. Each page will now keep track of where the last write and read was made. We also rename the head and tail to read and write for better clarification. This patch is needed for future enhancements to move the ring buffer to a lockless solution. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c75
1 files changed, 41 insertions, 34 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6b8dac02364f..09d4f0d879a7 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -117,6 +117,8 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
117struct buffer_page { 117struct buffer_page {
118 u64 time_stamp; /* page time stamp */ 118 u64 time_stamp; /* page time stamp */
119 unsigned size; /* size of page data */ 119 unsigned size; /* size of page data */
120 unsigned write; /* index for next write */
121 unsigned read; /* index for next read */
120 struct list_head list; /* list of free pages */ 122 struct list_head list; /* list of free pages */
121 void *page; /* Actual data page */ 123 void *page; /* Actual data page */
122}; 124};
@@ -153,11 +155,8 @@ struct ring_buffer_per_cpu {
153 spinlock_t lock; 155 spinlock_t lock;
154 struct lock_class_key lock_key; 156 struct lock_class_key lock_key;
155 struct list_head pages; 157 struct list_head pages;
156 unsigned long head; /* read from head */ 158 struct buffer_page *head_page; /* read from head */
157 unsigned long tail; /* write to tail */ 159 struct buffer_page *tail_page; /* write to tail */
158 unsigned long reader;
159 struct buffer_page *head_page;
160 struct buffer_page *tail_page;
161 struct buffer_page *reader_page; 160 struct buffer_page *reader_page;
162 unsigned long overrun; 161 unsigned long overrun;
163 unsigned long entries; 162 unsigned long entries;
@@ -566,10 +565,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
566 565
567static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 566static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
568{ 567{
569 return (cpu_buffer->reader == cpu_buffer->reader_page->size && 568 return cpu_buffer->reader_page->read == cpu_buffer->reader_page->size &&
570 (cpu_buffer->tail_page == cpu_buffer->reader_page || 569 (cpu_buffer->tail_page == cpu_buffer->reader_page ||
571 (cpu_buffer->tail_page == cpu_buffer->head_page && 570 (cpu_buffer->tail_page == cpu_buffer->head_page &&
572 cpu_buffer->head == cpu_buffer->tail))); 571 cpu_buffer->head_page->read ==
572 cpu_buffer->tail_page->write));
573} 573}
574 574
575static inline int rb_null_event(struct ring_buffer_event *event) 575static inline int rb_null_event(struct ring_buffer_event *event)
@@ -577,7 +577,7 @@ static inline int rb_null_event(struct ring_buffer_event *event)
577 return event->type == RINGBUF_TYPE_PADDING; 577 return event->type == RINGBUF_TYPE_PADDING;
578} 578}
579 579
580static inline void *rb_page_index(struct buffer_page *page, unsigned index) 580static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
581{ 581{
582 return page->page + index; 582 return page->page + index;
583} 583}
@@ -585,15 +585,21 @@ static inline void *rb_page_index(struct buffer_page *page, unsigned index)
585static inline struct ring_buffer_event * 585static inline struct ring_buffer_event *
586rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 586rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
587{ 587{
588 return rb_page_index(cpu_buffer->reader_page, 588 return __rb_page_index(cpu_buffer->reader_page,
589 cpu_buffer->reader); 589 cpu_buffer->reader_page->read);
590}
591
592static inline struct ring_buffer_event *
593rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
594{
595 return __rb_page_index(cpu_buffer->head_page,
596 cpu_buffer->head_page->read);
590} 597}
591 598
592static inline struct ring_buffer_event * 599static inline struct ring_buffer_event *
593rb_iter_head_event(struct ring_buffer_iter *iter) 600rb_iter_head_event(struct ring_buffer_iter *iter)
594{ 601{
595 return rb_page_index(iter->head_page, 602 return __rb_page_index(iter->head_page, iter->head);
596 iter->head);
597} 603}
598 604
599/* 605/*
@@ -610,7 +616,7 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
610 for (head = 0; head < rb_head_size(cpu_buffer); 616 for (head = 0; head < rb_head_size(cpu_buffer);
611 head += rb_event_length(event)) { 617 head += rb_event_length(event)) {
612 618
613 event = rb_page_index(cpu_buffer->head_page, head); 619 event = __rb_page_index(cpu_buffer->head_page, head);
614 BUG_ON(rb_null_event(event)); 620 BUG_ON(rb_null_event(event));
615 /* Only count data entries */ 621 /* Only count data entries */
616 if (event->type != RINGBUF_TYPE_DATA) 622 if (event->type != RINGBUF_TYPE_DATA)
@@ -640,13 +646,13 @@ rb_add_stamp(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
640 646
641static void rb_reset_head_page(struct ring_buffer_per_cpu *cpu_buffer) 647static void rb_reset_head_page(struct ring_buffer_per_cpu *cpu_buffer)
642{ 648{
643 cpu_buffer->head = 0; 649 cpu_buffer->head_page->read = 0;
644} 650}
645 651
646static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 652static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
647{ 653{
648 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; 654 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
649 cpu_buffer->reader = 0; 655 cpu_buffer->reader_page->read = 0;
650} 656}
651 657
652static inline void rb_inc_iter(struct ring_buffer_iter *iter) 658static inline void rb_inc_iter(struct ring_buffer_iter *iter)
@@ -743,9 +749,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
743 struct ring_buffer *buffer = cpu_buffer->buffer; 749 struct ring_buffer *buffer = cpu_buffer->buffer;
744 struct ring_buffer_event *event; 750 struct ring_buffer_event *event;
745 751
746 /* No locking needed for tail page */
747 tail_page = cpu_buffer->tail_page; 752 tail_page = cpu_buffer->tail_page;
748 tail = cpu_buffer->tail; 753 tail = cpu_buffer->tail_page->write;
749 754
750 if (tail + length > BUF_PAGE_SIZE) { 755 if (tail + length > BUF_PAGE_SIZE) {
751 struct buffer_page *next_page = tail_page; 756 struct buffer_page *next_page = tail_page;
@@ -774,7 +779,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
774 } 779 }
775 780
776 if (tail != BUF_PAGE_SIZE) { 781 if (tail != BUF_PAGE_SIZE) {
777 event = rb_page_index(tail_page, tail); 782 event = __rb_page_index(tail_page, tail);
778 /* page padding */ 783 /* page padding */
779 event->type = RINGBUF_TYPE_PADDING; 784 event->type = RINGBUF_TYPE_PADDING;
780 } 785 }
@@ -784,14 +789,14 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
784 tail_page->size = 0; 789 tail_page->size = 0;
785 tail = 0; 790 tail = 0;
786 cpu_buffer->tail_page = tail_page; 791 cpu_buffer->tail_page = tail_page;
787 cpu_buffer->tail = tail; 792 cpu_buffer->tail_page->write = tail;
788 rb_add_stamp(cpu_buffer, ts); 793 rb_add_stamp(cpu_buffer, ts);
789 spin_unlock(&cpu_buffer->lock); 794 spin_unlock(&cpu_buffer->lock);
790 } 795 }
791 796
792 BUG_ON(tail + length > BUF_PAGE_SIZE); 797 BUG_ON(tail + length > BUF_PAGE_SIZE);
793 798
794 event = rb_page_index(tail_page, tail); 799 event = __rb_page_index(tail_page, tail);
795 rb_update_event(event, type, length); 800 rb_update_event(event, type, length);
796 801
797 return event; 802 return event;
@@ -823,12 +828,12 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
823 return -1; 828 return -1;
824 829
825 /* check to see if we went to the next page */ 830 /* check to see if we went to the next page */
826 if (cpu_buffer->tail) { 831 if (cpu_buffer->tail_page->write) {
827 /* Still on same page, update timestamp */ 832 /* Still on same page, update timestamp */
828 event->time_delta = *delta & TS_MASK; 833 event->time_delta = *delta & TS_MASK;
829 event->array[0] = *delta >> TS_SHIFT; 834 event->array[0] = *delta >> TS_SHIFT;
830 /* commit the time event */ 835 /* commit the time event */
831 cpu_buffer->tail += 836 cpu_buffer->tail_page->write +=
832 rb_event_length(event); 837 rb_event_length(event);
833 cpu_buffer->write_stamp = *ts; 838 cpu_buffer->write_stamp = *ts;
834 *delta = 0; 839 *delta = 0;
@@ -846,7 +851,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
846 851
847 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 852 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
848 853
849 if (cpu_buffer->tail) { 854 if (cpu_buffer->tail_page->write) {
850 delta = ts - cpu_buffer->write_stamp; 855 delta = ts - cpu_buffer->write_stamp;
851 856
852 if (test_time_stamp(delta)) { 857 if (test_time_stamp(delta)) {
@@ -868,7 +873,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
868 return NULL; 873 return NULL;
869 874
870 /* If the reserve went to the next page, our delta is zero */ 875 /* If the reserve went to the next page, our delta is zero */
871 if (!cpu_buffer->tail) 876 if (!cpu_buffer->tail_page->write)
872 delta = 0; 877 delta = 0;
873 878
874 event->time_delta = delta; 879 event->time_delta = delta;
@@ -933,8 +938,8 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
933static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 938static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
934 struct ring_buffer_event *event) 939 struct ring_buffer_event *event)
935{ 940{
936 cpu_buffer->tail += rb_event_length(event); 941 cpu_buffer->tail_page->write += rb_event_length(event);
937 cpu_buffer->tail_page->size = cpu_buffer->tail; 942 cpu_buffer->tail_page->size = cpu_buffer->tail_page->write;
938 cpu_buffer->write_stamp += event->time_delta; 943 cpu_buffer->write_stamp += event->time_delta;
939 cpu_buffer->entries++; 944 cpu_buffer->entries++;
940} 945}
@@ -1178,10 +1183,10 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1178 /* Iterator usage is expected to have record disabled */ 1183 /* Iterator usage is expected to have record disabled */
1179 if (list_empty(&cpu_buffer->reader_page->list)) { 1184 if (list_empty(&cpu_buffer->reader_page->list)) {
1180 iter->head_page = cpu_buffer->head_page; 1185 iter->head_page = cpu_buffer->head_page;
1181 iter->head = cpu_buffer->head; 1186 iter->head = cpu_buffer->head_page->read;
1182 } else { 1187 } else {
1183 iter->head_page = cpu_buffer->reader_page; 1188 iter->head_page = cpu_buffer->reader_page;
1184 iter->head = cpu_buffer->reader; 1189 iter->head = cpu_buffer->reader_page->read;
1185 } 1190 }
1186 if (iter->head) 1191 if (iter->head)
1187 iter->read_stamp = cpu_buffer->read_stamp; 1192 iter->read_stamp = cpu_buffer->read_stamp;
@@ -1200,7 +1205,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1200 cpu_buffer = iter->cpu_buffer; 1205 cpu_buffer = iter->cpu_buffer;
1201 1206
1202 return iter->head_page == cpu_buffer->tail_page && 1207 return iter->head_page == cpu_buffer->tail_page &&
1203 iter->head == cpu_buffer->tail; 1208 iter->head == cpu_buffer->tail_page->write;
1204} 1209}
1205 1210
1206static void 1211static void
@@ -1277,11 +1282,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1277 reader = cpu_buffer->reader_page; 1282 reader = cpu_buffer->reader_page;
1278 1283
1279 /* If there's more to read, return this page */ 1284 /* If there's more to read, return this page */
1280 if (cpu_buffer->reader < reader->size) 1285 if (cpu_buffer->reader_page->read < reader->size)
1281 goto out; 1286 goto out;
1282 1287
1283 /* Never should we have an index greater than the size */ 1288 /* Never should we have an index greater than the size */
1284 WARN_ON(cpu_buffer->reader > reader->size); 1289 WARN_ON(cpu_buffer->reader_page->read > reader->size);
1285 1290
1286 /* check if we caught up to the tail */ 1291 /* check if we caught up to the tail */
1287 reader = NULL; 1292 reader = NULL;
@@ -1342,7 +1347,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1342 rb_update_read_stamp(cpu_buffer, event); 1347 rb_update_read_stamp(cpu_buffer, event);
1343 1348
1344 length = rb_event_length(event); 1349 length = rb_event_length(event);
1345 cpu_buffer->reader += length; 1350 cpu_buffer->reader_page->read += length;
1346} 1351}
1347 1352
1348static void rb_advance_iter(struct ring_buffer_iter *iter) 1353static void rb_advance_iter(struct ring_buffer_iter *iter)
@@ -1373,7 +1378,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1373 * at the tail of the buffer. 1378 * at the tail of the buffer.
1374 */ 1379 */
1375 BUG_ON((iter->head_page == cpu_buffer->tail_page) && 1380 BUG_ON((iter->head_page == cpu_buffer->tail_page) &&
1376 (iter->head + length > cpu_buffer->tail)); 1381 (iter->head + length > cpu_buffer->tail_page->write));
1377 1382
1378 rb_update_iter_read_stamp(iter, event); 1383 rb_update_iter_read_stamp(iter, event);
1379 1384
@@ -1623,7 +1628,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1623 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1628 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1624 cpu_buffer->reader_page->size = 0; 1629 cpu_buffer->reader_page->size = 0;
1625 1630
1626 cpu_buffer->head = cpu_buffer->tail = cpu_buffer->reader = 0; 1631 cpu_buffer->head_page->read = 0;
1632 cpu_buffer->tail_page->write = 0;
1633 cpu_buffer->reader_page->read = 0;
1627 1634
1628 cpu_buffer->overrun = 0; 1635 cpu_buffer->overrun = 0;
1629 cpu_buffer->entries = 0; 1636 cpu_buffer->entries = 0;