diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-12-08 09:42:17 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-12-08 09:42:47 -0500 |
commit | 08cd2a6960ae2e1aa7f44b44ebafa84f503a2dd1 (patch) | |
tree | b8a4f41686c3478f3b938155c866730164b8906c | |
parent | fd6da696f38b00ffeae1185d6f0ec5d4ab3b472d (diff) | |
parent | 9366c1ba13fbc41bdb57702e75ca4382f209c82f (diff) |
Merge branch 'tip/perf/urgent-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/urgent
Pull ftrace fixes from Steve Rostedt.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/trace/ftrace.c | 2 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 16 |
2 files changed, 14 insertions, 4 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9dcf15d38380..51b71594f321 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2437,7 +2437,7 @@ static void reset_iter_read(struct ftrace_iterator *iter) | |||
2437 | { | 2437 | { |
2438 | iter->pos = 0; | 2438 | iter->pos = 0; |
2439 | iter->func_pos = 0; | 2439 | iter->func_pos = 0; |
2440 | iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); | 2440 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); |
2441 | } | 2441 | } |
2442 | 2442 | ||
2443 | static void *t_start(struct seq_file *m, loff_t *pos) | 2443 | static void *t_start(struct seq_file *m, loff_t *pos) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b979426d16c6..4cb5e5147165 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1396,6 +1396,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) | |||
1396 | struct list_head *head_page_with_bit; | 1396 | struct list_head *head_page_with_bit; |
1397 | 1397 | ||
1398 | head_page = &rb_set_head_page(cpu_buffer)->list; | 1398 | head_page = &rb_set_head_page(cpu_buffer)->list; |
1399 | if (!head_page) | ||
1400 | break; | ||
1399 | prev_page = head_page->prev; | 1401 | prev_page = head_page->prev; |
1400 | 1402 | ||
1401 | first_page = pages->next; | 1403 | first_page = pages->next; |
@@ -2934,7 +2936,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) | |||
2934 | unsigned long flags; | 2936 | unsigned long flags; |
2935 | struct ring_buffer_per_cpu *cpu_buffer; | 2937 | struct ring_buffer_per_cpu *cpu_buffer; |
2936 | struct buffer_page *bpage; | 2938 | struct buffer_page *bpage; |
2937 | unsigned long ret; | 2939 | unsigned long ret = 0; |
2938 | 2940 | ||
2939 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2941 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2940 | return 0; | 2942 | return 0; |
@@ -2949,7 +2951,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) | |||
2949 | bpage = cpu_buffer->reader_page; | 2951 | bpage = cpu_buffer->reader_page; |
2950 | else | 2952 | else |
2951 | bpage = rb_set_head_page(cpu_buffer); | 2953 | bpage = rb_set_head_page(cpu_buffer); |
2952 | ret = bpage->page->time_stamp; | 2954 | if (bpage) |
2955 | ret = bpage->page->time_stamp; | ||
2953 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2956 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2954 | 2957 | ||
2955 | return ret; | 2958 | return ret; |
@@ -3260,6 +3263,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
3260 | * Splice the empty reader page into the list around the head. | 3263 | * Splice the empty reader page into the list around the head. |
3261 | */ | 3264 | */ |
3262 | reader = rb_set_head_page(cpu_buffer); | 3265 | reader = rb_set_head_page(cpu_buffer); |
3266 | if (!reader) | ||
3267 | goto out; | ||
3263 | cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); | 3268 | cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); |
3264 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 3269 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
3265 | 3270 | ||
@@ -3778,12 +3783,17 @@ void | |||
3778 | ring_buffer_read_finish(struct ring_buffer_iter *iter) | 3783 | ring_buffer_read_finish(struct ring_buffer_iter *iter) |
3779 | { | 3784 | { |
3780 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 3785 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
3786 | unsigned long flags; | ||
3781 | 3787 | ||
3782 | /* | 3788 | /* |
3783 | * Ring buffer is disabled from recording, here's a good place | 3789 | * Ring buffer is disabled from recording, here's a good place |
3784 | * to check the integrity of the ring buffer. | 3790 | * to check the integrity of the ring buffer. |
3791 | * Must prevent readers from trying to read, as the check | ||
3792 | * clears the HEAD page and readers require it. | ||
3785 | */ | 3793 | */ |
3794 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
3786 | rb_check_pages(cpu_buffer); | 3795 | rb_check_pages(cpu_buffer); |
3796 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
3787 | 3797 | ||
3788 | atomic_dec(&cpu_buffer->record_disabled); | 3798 | atomic_dec(&cpu_buffer->record_disabled); |
3789 | atomic_dec(&cpu_buffer->buffer->resize_disabled); | 3799 | atomic_dec(&cpu_buffer->buffer->resize_disabled); |