diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-12-02 15:34:06 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-03 02:56:20 -0500 |
commit | abc9b56d66fbd4d93302ef4bf6fa726e1b8255f9 (patch) | |
tree | a93b0dca471b616bc2987530d536a1829ef537a8 /kernel/trace | |
parent | a5e25883a445dce94a087ca479b21a5959cd5c18 (diff) |
ring-buffer: move some metadata into buffer page
Impact: get ready for splice changes
This patch moves the commit and timestamp into the beginning of each
data page of the buffer. This change will allow the page to be moved
to another location (disk, network, etc) and still have information
in the page to be able to read it.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 63 |
1 files changed, 36 insertions, 27 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e206951603c1..8619c5345889 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -195,20 +195,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
196 | #define TS_DELTA_TEST (~TS_MASK) | 196 | #define TS_DELTA_TEST (~TS_MASK) |
197 | 197 | ||
198 | /* | 198 | struct buffer_data_page { |
199 | * This hack stolen from mm/slob.c. | ||
200 | * We can store per page timing information in the page frame of the page. | ||
201 | * Thanks to Peter Zijlstra for suggesting this idea. | ||
202 | */ | ||
203 | struct buffer_page { | ||
204 | u64 time_stamp; /* page time stamp */ | 199 | u64 time_stamp; /* page time stamp */ |
205 | local_t write; /* index for next write */ | ||
206 | local_t commit; /* write commited index */ | 200 | local_t commit; /* write commited index */ |
201 | unsigned char data[]; /* data of buffer page */ | ||
202 | }; | ||
203 | |||
204 | struct buffer_page { | ||
205 | local_t write; /* index for next write */ | ||
207 | unsigned read; /* index for next read */ | 206 | unsigned read; /* index for next read */ |
208 | struct list_head list; /* list of free pages */ | 207 | struct list_head list; /* list of free pages */ |
209 | void *page; /* Actual data page */ | 208 | struct buffer_data_page *page; /* Actual data page */ |
210 | }; | 209 | }; |
211 | 210 | ||
211 | static void rb_init_page(struct buffer_data_page *page) | ||
212 | { | ||
213 | local_set(&page->commit, 0); | ||
214 | } | ||
215 | |||
212 | /* | 216 | /* |
213 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 217 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
214 | * this issue out. | 218 | * this issue out. |
@@ -230,7 +234,7 @@ static inline int test_time_stamp(u64 delta) | |||
230 | return 0; | 234 | return 0; |
231 | } | 235 | } |
232 | 236 | ||
233 | #define BUF_PAGE_SIZE PAGE_SIZE | 237 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) |
234 | 238 | ||
235 | /* | 239 | /* |
236 | * head_page == tail_page && head == tail then buffer is empty. | 240 | * head_page == tail_page && head == tail then buffer is empty. |
@@ -333,6 +337,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
333 | if (!addr) | 337 | if (!addr) |
334 | goto free_pages; | 338 | goto free_pages; |
335 | page->page = (void *)addr; | 339 | page->page = (void *)addr; |
340 | rb_init_page(page->page); | ||
336 | } | 341 | } |
337 | 342 | ||
338 | list_splice(&pages, head); | 343 | list_splice(&pages, head); |
@@ -378,6 +383,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
378 | if (!addr) | 383 | if (!addr) |
379 | goto fail_free_reader; | 384 | goto fail_free_reader; |
380 | page->page = (void *)addr; | 385 | page->page = (void *)addr; |
386 | rb_init_page(page->page); | ||
381 | 387 | ||
382 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 388 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
383 | 389 | ||
@@ -647,6 +653,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
647 | if (!addr) | 653 | if (!addr) |
648 | goto free_pages; | 654 | goto free_pages; |
649 | page->page = (void *)addr; | 655 | page->page = (void *)addr; |
656 | rb_init_page(page->page); | ||
650 | } | 657 | } |
651 | } | 658 | } |
652 | 659 | ||
@@ -682,7 +689,7 @@ static inline int rb_null_event(struct ring_buffer_event *event) | |||
682 | 689 | ||
683 | static inline void *__rb_page_index(struct buffer_page *page, unsigned index) | 690 | static inline void *__rb_page_index(struct buffer_page *page, unsigned index) |
684 | { | 691 | { |
685 | return page->page + index; | 692 | return page->page->data + index; |
686 | } | 693 | } |
687 | 694 | ||
688 | static inline struct ring_buffer_event * | 695 | static inline struct ring_buffer_event * |
@@ -712,7 +719,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage) | |||
712 | 719 | ||
713 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 720 | static inline unsigned rb_page_commit(struct buffer_page *bpage) |
714 | { | 721 | { |
715 | return local_read(&bpage->commit); | 722 | return local_read(&bpage->page->commit); |
716 | } | 723 | } |
717 | 724 | ||
718 | /* Size is determined by what has been commited */ | 725 | /* Size is determined by what has been commited */ |
@@ -804,14 +811,15 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
804 | if (RB_WARN_ON(cpu_buffer, | 811 | if (RB_WARN_ON(cpu_buffer, |
805 | cpu_buffer->commit_page == cpu_buffer->tail_page)) | 812 | cpu_buffer->commit_page == cpu_buffer->tail_page)) |
806 | return; | 813 | return; |
807 | cpu_buffer->commit_page->commit = | 814 | cpu_buffer->commit_page->page->commit = |
808 | cpu_buffer->commit_page->write; | 815 | cpu_buffer->commit_page->write; |
809 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 816 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
810 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 817 | cpu_buffer->write_stamp = |
818 | cpu_buffer->commit_page->page->time_stamp; | ||
811 | } | 819 | } |
812 | 820 | ||
813 | /* Now set the commit to the event's index */ | 821 | /* Now set the commit to the event's index */ |
814 | local_set(&cpu_buffer->commit_page->commit, index); | 822 | local_set(&cpu_buffer->commit_page->page->commit, index); |
815 | } | 823 | } |
816 | 824 | ||
817 | static inline void | 825 | static inline void |
@@ -826,16 +834,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
826 | * assign the commit to the tail. | 834 | * assign the commit to the tail. |
827 | */ | 835 | */ |
828 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 836 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
829 | cpu_buffer->commit_page->commit = | 837 | cpu_buffer->commit_page->page->commit = |
830 | cpu_buffer->commit_page->write; | 838 | cpu_buffer->commit_page->write; |
831 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 839 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
832 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 840 | cpu_buffer->write_stamp = |
841 | cpu_buffer->commit_page->page->time_stamp; | ||
833 | /* add barrier to keep gcc from optimizing too much */ | 842 | /* add barrier to keep gcc from optimizing too much */ |
834 | barrier(); | 843 | barrier(); |
835 | } | 844 | } |
836 | while (rb_commit_index(cpu_buffer) != | 845 | while (rb_commit_index(cpu_buffer) != |
837 | rb_page_write(cpu_buffer->commit_page)) { | 846 | rb_page_write(cpu_buffer->commit_page)) { |
838 | cpu_buffer->commit_page->commit = | 847 | cpu_buffer->commit_page->page->commit = |
839 | cpu_buffer->commit_page->write; | 848 | cpu_buffer->commit_page->write; |
840 | barrier(); | 849 | barrier(); |
841 | } | 850 | } |
@@ -843,7 +852,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
843 | 852 | ||
844 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 853 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
845 | { | 854 | { |
846 | cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; | 855 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; |
847 | cpu_buffer->reader_page->read = 0; | 856 | cpu_buffer->reader_page->read = 0; |
848 | } | 857 | } |
849 | 858 | ||
@@ -862,7 +871,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter) | |||
862 | else | 871 | else |
863 | rb_inc_page(cpu_buffer, &iter->head_page); | 872 | rb_inc_page(cpu_buffer, &iter->head_page); |
864 | 873 | ||
865 | iter->read_stamp = iter->head_page->time_stamp; | 874 | iter->read_stamp = iter->head_page->page->time_stamp; |
866 | iter->head = 0; | 875 | iter->head = 0; |
867 | } | 876 | } |
868 | 877 | ||
@@ -998,12 +1007,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
998 | */ | 1007 | */ |
999 | if (tail_page == cpu_buffer->tail_page) { | 1008 | if (tail_page == cpu_buffer->tail_page) { |
1000 | local_set(&next_page->write, 0); | 1009 | local_set(&next_page->write, 0); |
1001 | local_set(&next_page->commit, 0); | 1010 | local_set(&next_page->page->commit, 0); |
1002 | cpu_buffer->tail_page = next_page; | 1011 | cpu_buffer->tail_page = next_page; |
1003 | 1012 | ||
1004 | /* reread the time stamp */ | 1013 | /* reread the time stamp */ |
1005 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1014 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1006 | cpu_buffer->tail_page->time_stamp = *ts; | 1015 | cpu_buffer->tail_page->page->time_stamp = *ts; |
1007 | } | 1016 | } |
1008 | 1017 | ||
1009 | /* | 1018 | /* |
@@ -1048,7 +1057,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1048 | * this page's time stamp. | 1057 | * this page's time stamp. |
1049 | */ | 1058 | */ |
1050 | if (!tail && rb_is_commit(cpu_buffer, event)) | 1059 | if (!tail && rb_is_commit(cpu_buffer, event)) |
1051 | cpu_buffer->commit_page->time_stamp = *ts; | 1060 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1052 | 1061 | ||
1053 | return event; | 1062 | return event; |
1054 | 1063 | ||
@@ -1099,7 +1108,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
1099 | event->time_delta = *delta & TS_MASK; | 1108 | event->time_delta = *delta & TS_MASK; |
1100 | event->array[0] = *delta >> TS_SHIFT; | 1109 | event->array[0] = *delta >> TS_SHIFT; |
1101 | } else { | 1110 | } else { |
1102 | cpu_buffer->commit_page->time_stamp = *ts; | 1111 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1103 | event->time_delta = 0; | 1112 | event->time_delta = 0; |
1104 | event->array[0] = 0; | 1113 | event->array[0] = 0; |
1105 | } | 1114 | } |
@@ -1552,7 +1561,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) | |||
1552 | if (iter->head) | 1561 | if (iter->head) |
1553 | iter->read_stamp = cpu_buffer->read_stamp; | 1562 | iter->read_stamp = cpu_buffer->read_stamp; |
1554 | else | 1563 | else |
1555 | iter->read_stamp = iter->head_page->time_stamp; | 1564 | iter->read_stamp = iter->head_page->page->time_stamp; |
1556 | } | 1565 | } |
1557 | 1566 | ||
1558 | /** | 1567 | /** |
@@ -1696,7 +1705,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1696 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 1705 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
1697 | 1706 | ||
1698 | local_set(&cpu_buffer->reader_page->write, 0); | 1707 | local_set(&cpu_buffer->reader_page->write, 0); |
1699 | local_set(&cpu_buffer->reader_page->commit, 0); | 1708 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
1700 | 1709 | ||
1701 | /* Make the reader page now replace the head */ | 1710 | /* Make the reader page now replace the head */ |
1702 | reader->list.prev->next = &cpu_buffer->reader_page->list; | 1711 | reader->list.prev->next = &cpu_buffer->reader_page->list; |
@@ -2088,7 +2097,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2088 | cpu_buffer->head_page | 2097 | cpu_buffer->head_page |
2089 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); | 2098 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
2090 | local_set(&cpu_buffer->head_page->write, 0); | 2099 | local_set(&cpu_buffer->head_page->write, 0); |
2091 | local_set(&cpu_buffer->head_page->commit, 0); | 2100 | local_set(&cpu_buffer->head_page->page->commit, 0); |
2092 | 2101 | ||
2093 | cpu_buffer->head_page->read = 0; | 2102 | cpu_buffer->head_page->read = 0; |
2094 | 2103 | ||
@@ -2097,7 +2106,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2097 | 2106 | ||
2098 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 2107 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
2099 | local_set(&cpu_buffer->reader_page->write, 0); | 2108 | local_set(&cpu_buffer->reader_page->write, 0); |
2100 | local_set(&cpu_buffer->reader_page->commit, 0); | 2109 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2101 | cpu_buffer->reader_page->read = 0; | 2110 | cpu_buffer->reader_page->read = 0; |
2102 | 2111 | ||
2103 | cpu_buffer->overrun = 0; | 2112 | cpu_buffer->overrun = 0; |