aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-01 11:14:54 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-14 04:39:08 -0400
commite4c2ce82ca2710e17cb4df8eb2b249fa2eb5af30 (patch)
tree186a96aa4cace7fe51ede64c8b4f426a5007f007 /kernel
parent7104f300c5a69b46dda00d898034dd05c9f21739 (diff)
ring_buffer: allocate buffer page pointer
The current method of overlaying the page frame as the buffer page pointer can be very dangerous and limits our ability to do other things with a page from the buffer, like send it off to disk. This patch allocates the buffer_page instead of overlaying the page's page frame. The use of the buffer_page has hardly changed due to this. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c54
1 files changed, 32 insertions, 22 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9631abf2ae29..98145718988d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -115,16 +115,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
115 * Thanks to Peter Zijlstra for suggesting this idea. 115 * Thanks to Peter Zijlstra for suggesting this idea.
116 */ 116 */
117struct buffer_page { 117struct buffer_page {
118 union { 118 u64 time_stamp; /* page time stamp */
119 struct { 119 unsigned size; /* size of page data */
120 unsigned long flags; /* mandatory */ 120 struct list_head list; /* list of free pages */
121 atomic_t _count; /* mandatory */ 121 void *page; /* Actual data page */
122 u64 time_stamp; /* page time stamp */
123 unsigned size; /* size of page data */
124 struct list_head list; /* list of free pages */
125 };
126 struct page page;
127 };
128}; 122};
129 123
130/* 124/*
@@ -133,9 +127,9 @@ struct buffer_page {
133 */ 127 */
134static inline void free_buffer_page(struct buffer_page *bpage) 128static inline void free_buffer_page(struct buffer_page *bpage)
135{ 129{
136 reset_page_mapcount(&bpage->page); 130 if (bpage->page)
137 bpage->page.mapping = NULL; 131 __free_page(bpage->page);
138 __free_page(&bpage->page); 132 kfree(bpage);
139} 133}
140 134
141/* 135/*
@@ -237,11 +231,16 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
237 unsigned i; 231 unsigned i;
238 232
239 for (i = 0; i < nr_pages; i++) { 233 for (i = 0; i < nr_pages; i++) {
234 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
235 GFP_KERNEL, cpu_to_node(cpu));
236 if (!page)
237 goto free_pages;
238 list_add(&page->list, &pages);
239
240 addr = __get_free_page(GFP_KERNEL); 240 addr = __get_free_page(GFP_KERNEL);
241 if (!addr) 241 if (!addr)
242 goto free_pages; 242 goto free_pages;
243 page = (struct buffer_page *)virt_to_page(addr); 243 page->page = (void *)addr;
244 list_add(&page->list, &pages);
245 } 244 }
246 245
247 list_splice(&pages, head); 246 list_splice(&pages, head);
@@ -262,6 +261,7 @@ static struct ring_buffer_per_cpu *
262rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 261rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
263{ 262{
264 struct ring_buffer_per_cpu *cpu_buffer; 263 struct ring_buffer_per_cpu *cpu_buffer;
264 struct buffer_page *page;
265 unsigned long addr; 265 unsigned long addr;
266 int ret; 266 int ret;
267 267
@@ -275,10 +275,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
275 spin_lock_init(&cpu_buffer->lock); 275 spin_lock_init(&cpu_buffer->lock);
276 INIT_LIST_HEAD(&cpu_buffer->pages); 276 INIT_LIST_HEAD(&cpu_buffer->pages);
277 277
278 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
279 GFP_KERNEL, cpu_to_node(cpu));
280 if (!page)
281 goto fail_free_buffer;
282
283 cpu_buffer->reader_page = page;
278 addr = __get_free_page(GFP_KERNEL); 284 addr = __get_free_page(GFP_KERNEL);
279 if (!addr) 285 if (!addr)
280 goto fail_free_buffer; 286 goto fail_free_reader;
281 cpu_buffer->reader_page = (struct buffer_page *)virt_to_page(addr); 287 page->page = (void *)addr;
288
282 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 289 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
283 cpu_buffer->reader_page->size = 0; 290 cpu_buffer->reader_page->size = 0;
284 291
@@ -523,11 +530,16 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
523 530
524 for_each_buffer_cpu(buffer, cpu) { 531 for_each_buffer_cpu(buffer, cpu) {
525 for (i = 0; i < new_pages; i++) { 532 for (i = 0; i < new_pages; i++) {
533 page = kzalloc_node(ALIGN(sizeof(*page),
534 cache_line_size()),
535 GFP_KERNEL, cpu_to_node(cpu));
536 if (!page)
537 goto free_pages;
538 list_add(&page->list, &pages);
526 addr = __get_free_page(GFP_KERNEL); 539 addr = __get_free_page(GFP_KERNEL);
527 if (!addr) 540 if (!addr)
528 goto free_pages; 541 goto free_pages;
529 page = (struct buffer_page *)virt_to_page(addr); 542 page->page = (void *)addr;
530 list_add(&page->list, &pages);
531 } 543 }
532 } 544 }
533 545
@@ -567,9 +579,7 @@ static inline int rb_null_event(struct ring_buffer_event *event)
567 579
568static inline void *rb_page_index(struct buffer_page *page, unsigned index) 580static inline void *rb_page_index(struct buffer_page *page, unsigned index)
569{ 581{
570 void *addr = page_address(&page->page); 582 return page->page + index;
571
572 return addr + index;
573} 583}
574 584
575static inline struct ring_buffer_event * 585static inline struct ring_buffer_event *