aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-11-29 22:27:22 -0500
committerSteven Rostedt <rostedt@goodmis.org>2012-11-30 11:09:28 -0500
commit54f7be5b831254199522523ccab4c3d954bbf576 (patch)
tree7a65782f3bb7f67438db53444b59e8206916d478 /kernel/trace
parent70f77b3f7ec010ff9624c1f2e39a81babc9e2429 (diff)
ring-buffer: Fix NULL pointer if rb_set_head_page() fails
The function rb_set_head_page() searches the list of ring buffer pages for a the page that has the HEAD page flag set. If it does not find it, it will do a WARN_ON(), disable the ring buffer and return NULL, as this should never happen. But if this bug happens to happen, not all callers of this function can handle a NULL pointer being returned from it. That needs to be fixed. Cc: stable@vger.kernel.org # 3.0+ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b979426d16c..ec01803e0a5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1396,6 +1396,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1396 struct list_head *head_page_with_bit; 1396 struct list_head *head_page_with_bit;
1397 1397
1398 head_page = &rb_set_head_page(cpu_buffer)->list; 1398 head_page = &rb_set_head_page(cpu_buffer)->list;
1399 if (!head_page)
1400 break;
1399 prev_page = head_page->prev; 1401 prev_page = head_page->prev;
1400 1402
1401 first_page = pages->next; 1403 first_page = pages->next;
@@ -2934,7 +2936,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2934 unsigned long flags; 2936 unsigned long flags;
2935 struct ring_buffer_per_cpu *cpu_buffer; 2937 struct ring_buffer_per_cpu *cpu_buffer;
2936 struct buffer_page *bpage; 2938 struct buffer_page *bpage;
2937 unsigned long ret; 2939 unsigned long ret = 0;
2938 2940
2939 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2941 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2940 return 0; 2942 return 0;
@@ -2949,7 +2951,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2949 bpage = cpu_buffer->reader_page; 2951 bpage = cpu_buffer->reader_page;
2950 else 2952 else
2951 bpage = rb_set_head_page(cpu_buffer); 2953 bpage = rb_set_head_page(cpu_buffer);
2952 ret = bpage->page->time_stamp; 2954 if (bpage)
2955 ret = bpage->page->time_stamp;
2953 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2956 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2954 2957
2955 return ret; 2958 return ret;
@@ -3260,6 +3263,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3260 * Splice the empty reader page into the list around the head. 3263 * Splice the empty reader page into the list around the head.
3261 */ 3264 */
3262 reader = rb_set_head_page(cpu_buffer); 3265 reader = rb_set_head_page(cpu_buffer);
3266 if (!reader)
3267 goto out;
3263 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 3268 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3264 cpu_buffer->reader_page->list.prev = reader->list.prev; 3269 cpu_buffer->reader_page->list.prev = reader->list.prev;
3265 3270