aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e43c928356ee..db223fe8887f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -486,7 +486,7 @@ struct ring_buffer_iter {
486/* Up this if you want to test the TIME_EXTENTS and normalization */ 486/* Up this if you want to test the TIME_EXTENTS and normalization */
487#define DEBUG_SHIFT 0 487#define DEBUG_SHIFT 0
488 488
489static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) 489static inline u64 rb_time_stamp(struct ring_buffer *buffer)
490{ 490{
491 /* shift to debug/test normalization and TIME_EXTENTS */ 491 /* shift to debug/test normalization and TIME_EXTENTS */
492 return buffer->clock() << DEBUG_SHIFT; 492 return buffer->clock() << DEBUG_SHIFT;
@@ -497,7 +497,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
497 u64 time; 497 u64 time;
498 498
499 preempt_disable_notrace(); 499 preempt_disable_notrace();
500 time = rb_time_stamp(buffer, cpu); 500 time = rb_time_stamp(buffer);
501 preempt_enable_no_resched_notrace(); 501 preempt_enable_no_resched_notrace();
502 502
503 return time; 503 return time;
@@ -602,7 +602,7 @@ static struct list_head *rb_list_head(struct list_head *list)
602} 602}
603 603
604/* 604/*
605 * rb_is_head_page - test if the give page is the head page 605 * rb_is_head_page - test if the given page is the head page
606 * 606 *
607 * Because the reader may move the head_page pointer, we can 607 * Because the reader may move the head_page pointer, we can
608 * not trust what the head page is (it may be pointing to 608 * not trust what the head page is (it may be pointing to
@@ -1196,6 +1196,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1196 atomic_inc(&cpu_buffer->record_disabled); 1196 atomic_inc(&cpu_buffer->record_disabled);
1197 synchronize_sched(); 1197 synchronize_sched();
1198 1198
1199 spin_lock_irq(&cpu_buffer->reader_lock);
1199 rb_head_page_deactivate(cpu_buffer); 1200 rb_head_page_deactivate(cpu_buffer);
1200 1201
1201 for (i = 0; i < nr_pages; i++) { 1202 for (i = 0; i < nr_pages; i++) {
@@ -1210,6 +1211,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1210 return; 1211 return;
1211 1212
1212 rb_reset_cpu(cpu_buffer); 1213 rb_reset_cpu(cpu_buffer);
1214 spin_unlock_irq(&cpu_buffer->reader_lock);
1213 1215
1214 rb_check_pages(cpu_buffer); 1216 rb_check_pages(cpu_buffer);
1215 1217
@@ -1871,7 +1873,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1871 * Nested commits always have zero deltas, so 1873 * Nested commits always have zero deltas, so
1872 * just reread the time stamp 1874 * just reread the time stamp
1873 */ 1875 */
1874 *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1876 *ts = rb_time_stamp(buffer);
1875 next_page->page->time_stamp = *ts; 1877 next_page->page->time_stamp = *ts;
1876 } 1878 }
1877 1879
@@ -2114,7 +2116,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2114 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2116 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2115 goto out_fail; 2117 goto out_fail;
2116 2118
2117 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 2119 ts = rb_time_stamp(cpu_buffer->buffer);
2118 2120
2119 /* 2121 /*
2120 * Only the first commit can update the timestamp. 2122 * Only the first commit can update the timestamp.
@@ -2684,7 +2686,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2684EXPORT_SYMBOL_GPL(ring_buffer_entries); 2686EXPORT_SYMBOL_GPL(ring_buffer_entries);
2685 2687
2686/** 2688/**
2687 * ring_buffer_overrun_cpu - get the number of overruns in buffer 2689 * ring_buffer_overruns - get the number of overruns in buffer
2688 * @buffer: The ring buffer 2690 * @buffer: The ring buffer
2689 * 2691 *
2690 * Returns the total number of overruns in the ring buffer 2692 * Returns the total number of overruns in the ring buffer