aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-03-30 15:32:01 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-07-07 18:36:10 -0400
commit3adc54fa82a68be1cd1ac82ad786ee362796e50a (patch)
tree0538fac360f776354ec6b171e6adb0049cfa9f3b /kernel/trace/ring_buffer.c
parentddc1637af217dbd8bc51f30e6d24e84476a869a6 (diff)
ring-buffer: make the buffer a true circular link list
This patch changes the ring buffer data pages from using a link list head pointer, to making each buffer page point to another buffer page and never back to a "head". This makes the handling of the ring buffer less complex, since the traversing of the ring buffer pages no longer needs to account for the head pointer. This change also is needed to make the ring buffer lockless. [ Changes in version 2: - Added change that Lai Jiangshan mentioned. From: Lai Jiangshan <laijs@cn.fujitsu.com> Date: Thu, 11 Jun 2009 11:25:48 +0800 LKML-Reference: <4A30793C.6090208@cn.fujitsu.com> I'm not sure whether these 4 lines: bpage = list_entry(pages.next, struct buffer_page, list); list_del_init(&bpage->list); cpu_buffer->pages = &bpage->list; list_splice(&pages, cpu_buffer->pages); equal to these 2 lines: cpu_buffer->pages = pages.next; list_del(&pages); If there are equivalent, I think the second one are simpler. It may be not a really necessarily cleanup. What I asked is: if there are equivalent, could you use these two line: cpu_buffer->pages = pages.next; list_del(&pages); ] [ Impact: simplify the ring buffer to help make it lockless ] Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c49
1 files changed, 32 insertions, 17 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bf27bb7a63e2..7c0168ad6d51 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -406,7 +406,7 @@ struct ring_buffer_per_cpu {
406 spinlock_t reader_lock; /* serialize readers */ 406 spinlock_t reader_lock; /* serialize readers */
407 raw_spinlock_t lock; 407 raw_spinlock_t lock;
408 struct lock_class_key lock_key; 408 struct lock_class_key lock_key;
409 struct list_head pages; 409 struct list_head *pages;
410 struct buffer_page *head_page; /* read from head */ 410 struct buffer_page *head_page; /* read from head */
411 struct buffer_page *tail_page; /* write to tail */ 411 struct buffer_page *tail_page; /* write to tail */
412 struct buffer_page *commit_page; /* committed pages */ 412 struct buffer_page *commit_page; /* committed pages */
@@ -498,7 +498,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
498 */ 498 */
499static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 499static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
500{ 500{
501 struct list_head *head = &cpu_buffer->pages; 501 struct list_head *head = cpu_buffer->pages;
502 struct buffer_page *bpage, *tmp; 502 struct buffer_page *bpage, *tmp;
503 503
504 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 504 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
@@ -521,12 +521,13 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
521static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 521static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
522 unsigned nr_pages) 522 unsigned nr_pages)
523{ 523{
524 struct list_head *head = &cpu_buffer->pages;
525 struct buffer_page *bpage, *tmp; 524 struct buffer_page *bpage, *tmp;
526 unsigned long addr; 525 unsigned long addr;
527 LIST_HEAD(pages); 526 LIST_HEAD(pages);
528 unsigned i; 527 unsigned i;
529 528
529 WARN_ON(!nr_pages);
530
530 for (i = 0; i < nr_pages; i++) { 531 for (i = 0; i < nr_pages; i++) {
531 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 532 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
532 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 533 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
@@ -541,7 +542,13 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
541 rb_init_page(bpage->page); 542 rb_init_page(bpage->page);
542 } 543 }
543 544
544 list_splice(&pages, head); 545 /*
546 * The ring buffer page list is a circular list that does not
547 * start and end with a list head. All page list items point to
548 * other pages.
549 */
550 cpu_buffer->pages = pages.next;
551 list_del(&pages);
545 552
546 rb_check_pages(cpu_buffer); 553 rb_check_pages(cpu_buffer);
547 554
@@ -573,7 +580,6 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
573 spin_lock_init(&cpu_buffer->reader_lock); 580 spin_lock_init(&cpu_buffer->reader_lock);
574 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 581 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
575 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 582 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
576 INIT_LIST_HEAD(&cpu_buffer->pages);
577 583
578 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 584 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
579 GFP_KERNEL, cpu_to_node(cpu)); 585 GFP_KERNEL, cpu_to_node(cpu));
@@ -594,7 +600,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
594 goto fail_free_reader; 600 goto fail_free_reader;
595 601
596 cpu_buffer->head_page 602 cpu_buffer->head_page
597 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 603 = list_entry(cpu_buffer->pages, struct buffer_page, list);
598 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 604 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
599 605
600 return cpu_buffer; 606 return cpu_buffer;
@@ -609,15 +615,20 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
609 615
610static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 616static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
611{ 617{
612 struct list_head *head = &cpu_buffer->pages; 618 struct list_head *head = cpu_buffer->pages;
613 struct buffer_page *bpage, *tmp; 619 struct buffer_page *bpage, *tmp;
614 620
615 free_buffer_page(cpu_buffer->reader_page); 621 free_buffer_page(cpu_buffer->reader_page);
616 622
617 list_for_each_entry_safe(bpage, tmp, head, list) { 623 if (head) {
618 list_del_init(&bpage->list); 624 list_for_each_entry_safe(bpage, tmp, head, list) {
625 list_del_init(&bpage->list);
626 free_buffer_page(bpage);
627 }
628 bpage = list_entry(head, struct buffer_page, list);
619 free_buffer_page(bpage); 629 free_buffer_page(bpage);
620 } 630 }
631
621 kfree(cpu_buffer); 632 kfree(cpu_buffer);
622} 633}
623 634
@@ -760,14 +771,14 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
760 synchronize_sched(); 771 synchronize_sched();
761 772
762 for (i = 0; i < nr_pages; i++) { 773 for (i = 0; i < nr_pages; i++) {
763 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 774 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
764 return; 775 return;
765 p = cpu_buffer->pages.next; 776 p = cpu_buffer->pages->next;
766 bpage = list_entry(p, struct buffer_page, list); 777 bpage = list_entry(p, struct buffer_page, list);
767 list_del_init(&bpage->list); 778 list_del_init(&bpage->list);
768 free_buffer_page(bpage); 779 free_buffer_page(bpage);
769 } 780 }
770 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 781 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
771 return; 782 return;
772 783
773 rb_reset_cpu(cpu_buffer); 784 rb_reset_cpu(cpu_buffer);
@@ -795,7 +806,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
795 p = pages->next; 806 p = pages->next;
796 bpage = list_entry(p, struct buffer_page, list); 807 bpage = list_entry(p, struct buffer_page, list);
797 list_del_init(&bpage->list); 808 list_del_init(&bpage->list);
798 list_add_tail(&bpage->list, &cpu_buffer->pages); 809 list_add_tail(&bpage->list, cpu_buffer->pages);
799 } 810 }
800 rb_reset_cpu(cpu_buffer); 811 rb_reset_cpu(cpu_buffer);
801 812
@@ -992,9 +1003,6 @@ static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
992{ 1003{
993 struct list_head *p = (*bpage)->list.next; 1004 struct list_head *p = (*bpage)->list.next;
994 1005
995 if (p == &cpu_buffer->pages)
996 p = p->next;
997
998 *bpage = list_entry(p, struct buffer_page, list); 1006 *bpage = list_entry(p, struct buffer_page, list);
999} 1007}
1000 1008
@@ -2247,6 +2255,13 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2247 cpu_buffer->reader_page->list.next = reader->list.next; 2255 cpu_buffer->reader_page->list.next = reader->list.next;
2248 cpu_buffer->reader_page->list.prev = reader->list.prev; 2256 cpu_buffer->reader_page->list.prev = reader->list.prev;
2249 2257
2258 /*
2259 * cpu_buffer->pages just needs to point to the buffer, it
2260 * has no specific buffer page to point to. Lets move it out
2261 * of our way so we don't accidently swap it.
2262 */
2263 cpu_buffer->pages = reader->list.prev;
2264
2250 local_set(&cpu_buffer->reader_page->write, 0); 2265 local_set(&cpu_buffer->reader_page->write, 0);
2251 local_set(&cpu_buffer->reader_page->entries, 0); 2266 local_set(&cpu_buffer->reader_page->entries, 0);
2252 local_set(&cpu_buffer->reader_page->page->commit, 0); 2267 local_set(&cpu_buffer->reader_page->page->commit, 0);
@@ -2719,7 +2734,7 @@ static void
2719rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2734rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2720{ 2735{
2721 cpu_buffer->head_page 2736 cpu_buffer->head_page
2722 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 2737 = list_entry(cpu_buffer->pages, struct buffer_page, list);
2723 local_set(&cpu_buffer->head_page->write, 0); 2738 local_set(&cpu_buffer->head_page->write, 0);
2724 local_set(&cpu_buffer->head_page->entries, 0); 2739 local_set(&cpu_buffer->head_page->entries, 0);
2725 local_set(&cpu_buffer->head_page->page->commit, 0); 2740 local_set(&cpu_buffer->head_page->page->commit, 0);