aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /kernel/trace/ring_buffer.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c137
1 files changed, 80 insertions, 57 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5dd017fea6f5..41ca394feb22 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -14,12 +14,14 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/slab.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/hash.h> 19#include <linux/hash.h>
19#include <linux/list.h> 20#include <linux/list.h>
20#include <linux/cpu.h> 21#include <linux/cpu.h>
21#include <linux/fs.h> 22#include <linux/fs.h>
22 23
24#include <asm/local.h>
23#include "trace.h" 25#include "trace.h"
24 26
25/* 27/*
@@ -206,6 +208,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
206#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 208#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
207#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 209#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
208 210
211#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
212# define RB_FORCE_8BYTE_ALIGNMENT 0
213# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
214#else
215# define RB_FORCE_8BYTE_ALIGNMENT 1
216# define RB_ARCH_ALIGNMENT 8U
217#endif
218
209/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 219/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 220#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
211 221
@@ -397,18 +407,21 @@ int ring_buffer_print_page_header(struct trace_seq *s)
397 int ret; 407 int ret;
398 408
399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 409 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
400 "offset:0;\tsize:%u;\n", 410 "offset:0;\tsize:%u;\tsigned:%u;\n",
401 (unsigned int)sizeof(field.time_stamp)); 411 (unsigned int)sizeof(field.time_stamp),
412 (unsigned int)is_signed_type(u64));
402 413
403 ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 414 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
404 "offset:%u;\tsize:%u;\n", 415 "offset:%u;\tsize:%u;\tsigned:%u;\n",
405 (unsigned int)offsetof(typeof(field), commit), 416 (unsigned int)offsetof(typeof(field), commit),
406 (unsigned int)sizeof(field.commit)); 417 (unsigned int)sizeof(field.commit),
418 (unsigned int)is_signed_type(long));
407 419
408 ret = trace_seq_printf(s, "\tfield: char data;\t" 420 ret = trace_seq_printf(s, "\tfield: char data;\t"
409 "offset:%u;\tsize:%u;\n", 421 "offset:%u;\tsize:%u;\tsigned:%u;\n",
410 (unsigned int)offsetof(typeof(field), data), 422 (unsigned int)offsetof(typeof(field), data),
411 (unsigned int)BUF_PAGE_SIZE); 423 (unsigned int)BUF_PAGE_SIZE,
424 (unsigned int)is_signed_type(char));
412 425
413 return ret; 426 return ret;
414} 427}
@@ -420,7 +433,7 @@ struct ring_buffer_per_cpu {
420 int cpu; 433 int cpu;
421 struct ring_buffer *buffer; 434 struct ring_buffer *buffer;
422 spinlock_t reader_lock; /* serialize readers */ 435 spinlock_t reader_lock; /* serialize readers */
423 raw_spinlock_t lock; 436 arch_spinlock_t lock;
424 struct lock_class_key lock_key; 437 struct lock_class_key lock_key;
425 struct list_head *pages; 438 struct list_head *pages;
426 struct buffer_page *head_page; /* read from head */ 439 struct buffer_page *head_page; /* read from head */
@@ -461,6 +474,8 @@ struct ring_buffer_iter {
461 struct ring_buffer_per_cpu *cpu_buffer; 474 struct ring_buffer_per_cpu *cpu_buffer;
462 unsigned long head; 475 unsigned long head;
463 struct buffer_page *head_page; 476 struct buffer_page *head_page;
477 struct buffer_page *cache_reader_page;
478 unsigned long cache_read;
464 u64 read_stamp; 479 u64 read_stamp;
465}; 480};
466 481
@@ -995,7 +1010,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
995 cpu_buffer->buffer = buffer; 1010 cpu_buffer->buffer = buffer;
996 spin_lock_init(&cpu_buffer->reader_lock); 1011 spin_lock_init(&cpu_buffer->reader_lock);
997 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1012 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
998 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1013 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
999 1014
1000 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1015 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1001 GFP_KERNEL, cpu_to_node(cpu)); 1016 GFP_KERNEL, cpu_to_node(cpu));
@@ -1190,30 +1205,25 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1190 struct list_head *p; 1205 struct list_head *p;
1191 unsigned i; 1206 unsigned i;
1192 1207
1193 atomic_inc(&cpu_buffer->record_disabled);
1194 synchronize_sched();
1195
1196 spin_lock_irq(&cpu_buffer->reader_lock); 1208 spin_lock_irq(&cpu_buffer->reader_lock);
1197 rb_head_page_deactivate(cpu_buffer); 1209 rb_head_page_deactivate(cpu_buffer);
1198 1210
1199 for (i = 0; i < nr_pages; i++) { 1211 for (i = 0; i < nr_pages; i++) {
1200 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1212 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1201 return; 1213 goto out;
1202 p = cpu_buffer->pages->next; 1214 p = cpu_buffer->pages->next;
1203 bpage = list_entry(p, struct buffer_page, list); 1215 bpage = list_entry(p, struct buffer_page, list);
1204 list_del_init(&bpage->list); 1216 list_del_init(&bpage->list);
1205 free_buffer_page(bpage); 1217 free_buffer_page(bpage);
1206 } 1218 }
1207 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1219 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1208 return; 1220 goto out;
1209 1221
1210 rb_reset_cpu(cpu_buffer); 1222 rb_reset_cpu(cpu_buffer);
1211 spin_unlock_irq(&cpu_buffer->reader_lock);
1212
1213 rb_check_pages(cpu_buffer); 1223 rb_check_pages(cpu_buffer);
1214 1224
1215 atomic_dec(&cpu_buffer->record_disabled); 1225out:
1216 1226 spin_unlock_irq(&cpu_buffer->reader_lock);
1217} 1227}
1218 1228
1219static void 1229static void
@@ -1224,26 +1234,22 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1224 struct list_head *p; 1234 struct list_head *p;
1225 unsigned i; 1235 unsigned i;
1226 1236
1227 atomic_inc(&cpu_buffer->record_disabled);
1228 synchronize_sched();
1229
1230 spin_lock_irq(&cpu_buffer->reader_lock); 1237 spin_lock_irq(&cpu_buffer->reader_lock);
1231 rb_head_page_deactivate(cpu_buffer); 1238 rb_head_page_deactivate(cpu_buffer);
1232 1239
1233 for (i = 0; i < nr_pages; i++) { 1240 for (i = 0; i < nr_pages; i++) {
1234 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1241 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1235 return; 1242 goto out;
1236 p = pages->next; 1243 p = pages->next;
1237 bpage = list_entry(p, struct buffer_page, list); 1244 bpage = list_entry(p, struct buffer_page, list);
1238 list_del_init(&bpage->list); 1245 list_del_init(&bpage->list);
1239 list_add_tail(&bpage->list, cpu_buffer->pages); 1246 list_add_tail(&bpage->list, cpu_buffer->pages);
1240 } 1247 }
1241 rb_reset_cpu(cpu_buffer); 1248 rb_reset_cpu(cpu_buffer);
1242 spin_unlock_irq(&cpu_buffer->reader_lock);
1243
1244 rb_check_pages(cpu_buffer); 1249 rb_check_pages(cpu_buffer);
1245 1250
1246 atomic_dec(&cpu_buffer->record_disabled); 1251out:
1252 spin_unlock_irq(&cpu_buffer->reader_lock);
1247} 1253}
1248 1254
1249/** 1255/**
@@ -1251,11 +1257,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1251 * @buffer: the buffer to resize. 1257 * @buffer: the buffer to resize.
1252 * @size: the new size. 1258 * @size: the new size.
1253 * 1259 *
1254 * The tracer is responsible for making sure that the buffer is
1255 * not being used while changing the size.
1256 * Note: We may be able to change the above requirement by using
1257 * RCU synchronizations.
1258 *
1259 * Minimum size is 2 * BUF_PAGE_SIZE. 1260 * Minimum size is 2 * BUF_PAGE_SIZE.
1260 * 1261 *
1261 * Returns -1 on failure. 1262 * Returns -1 on failure.
@@ -1287,6 +1288,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1287 if (size == buffer_size) 1288 if (size == buffer_size)
1288 return size; 1289 return size;
1289 1290
1291 atomic_inc(&buffer->record_disabled);
1292
1293 /* Make sure all writers are done with this buffer. */
1294 synchronize_sched();
1295
1290 mutex_lock(&buffer->mutex); 1296 mutex_lock(&buffer->mutex);
1291 get_online_cpus(); 1297 get_online_cpus();
1292 1298
@@ -1349,6 +1355,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1349 put_online_cpus(); 1355 put_online_cpus();
1350 mutex_unlock(&buffer->mutex); 1356 mutex_unlock(&buffer->mutex);
1351 1357
1358 atomic_dec(&buffer->record_disabled);
1359
1352 return size; 1360 return size;
1353 1361
1354 free_pages: 1362 free_pages:
@@ -1358,6 +1366,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1358 } 1366 }
1359 put_online_cpus(); 1367 put_online_cpus();
1360 mutex_unlock(&buffer->mutex); 1368 mutex_unlock(&buffer->mutex);
1369 atomic_dec(&buffer->record_disabled);
1361 return -ENOMEM; 1370 return -ENOMEM;
1362 1371
1363 /* 1372 /*
@@ -1367,6 +1376,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1367 out_fail: 1376 out_fail:
1368 put_online_cpus(); 1377 put_online_cpus();
1369 mutex_unlock(&buffer->mutex); 1378 mutex_unlock(&buffer->mutex);
1379 atomic_dec(&buffer->record_disabled);
1370 return -1; 1380 return -1;
1371} 1381}
1372EXPORT_SYMBOL_GPL(ring_buffer_resize); 1382EXPORT_SYMBOL_GPL(ring_buffer_resize);
@@ -1548,7 +1558,7 @@ rb_update_event(struct ring_buffer_event *event,
1548 1558
1549 case 0: 1559 case 0:
1550 length -= RB_EVNT_HDR_SIZE; 1560 length -= RB_EVNT_HDR_SIZE;
1551 if (length > RB_MAX_SMALL_DATA) 1561 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1552 event->array[0] = length; 1562 event->array[0] = length;
1553 else 1563 else
1554 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 1564 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
@@ -1723,11 +1733,11 @@ static unsigned rb_calculate_event_length(unsigned length)
1723 if (!length) 1733 if (!length)
1724 length = 1; 1734 length = 1;
1725 1735
1726 if (length > RB_MAX_SMALL_DATA) 1736 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1727 length += sizeof(event.array[0]); 1737 length += sizeof(event.array[0]);
1728 1738
1729 length += RB_EVNT_HDR_SIZE; 1739 length += RB_EVNT_HDR_SIZE;
1730 length = ALIGN(length, RB_ALIGNMENT); 1740 length = ALIGN(length, RB_ARCH_ALIGNMENT);
1731 1741
1732 return length; 1742 return length;
1733} 1743}
@@ -1787,9 +1797,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1787static struct ring_buffer_event * 1797static struct ring_buffer_event *
1788rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1798rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1789 unsigned long length, unsigned long tail, 1799 unsigned long length, unsigned long tail,
1790 struct buffer_page *commit_page,
1791 struct buffer_page *tail_page, u64 *ts) 1800 struct buffer_page *tail_page, u64 *ts)
1792{ 1801{
1802 struct buffer_page *commit_page = cpu_buffer->commit_page;
1793 struct ring_buffer *buffer = cpu_buffer->buffer; 1803 struct ring_buffer *buffer = cpu_buffer->buffer;
1794 struct buffer_page *next_page; 1804 struct buffer_page *next_page;
1795 int ret; 1805 int ret;
@@ -1892,13 +1902,10 @@ static struct ring_buffer_event *
1892__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1902__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1893 unsigned type, unsigned long length, u64 *ts) 1903 unsigned type, unsigned long length, u64 *ts)
1894{ 1904{
1895 struct buffer_page *tail_page, *commit_page; 1905 struct buffer_page *tail_page;
1896 struct ring_buffer_event *event; 1906 struct ring_buffer_event *event;
1897 unsigned long tail, write; 1907 unsigned long tail, write;
1898 1908
1899 commit_page = cpu_buffer->commit_page;
1900 /* we just need to protect against interrupts */
1901 barrier();
1902 tail_page = cpu_buffer->tail_page; 1909 tail_page = cpu_buffer->tail_page;
1903 write = local_add_return(length, &tail_page->write); 1910 write = local_add_return(length, &tail_page->write);
1904 1911
@@ -1909,7 +1916,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1909 /* See if we shot pass the end of this buffer page */ 1916 /* See if we shot pass the end of this buffer page */
1910 if (write > BUF_PAGE_SIZE) 1917 if (write > BUF_PAGE_SIZE)
1911 return rb_move_tail(cpu_buffer, length, tail, 1918 return rb_move_tail(cpu_buffer, length, tail,
1912 commit_page, tail_page, ts); 1919 tail_page, ts);
1913 1920
1914 /* We reserved something on the buffer */ 1921 /* We reserved something on the buffer */
1915 1922
@@ -2237,12 +2244,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2237 if (ring_buffer_flags != RB_BUFFERS_ON) 2244 if (ring_buffer_flags != RB_BUFFERS_ON)
2238 return NULL; 2245 return NULL;
2239 2246
2240 if (atomic_read(&buffer->record_disabled))
2241 return NULL;
2242
2243 /* If we are tracing schedule, we don't want to recurse */ 2247 /* If we are tracing schedule, we don't want to recurse */
2244 resched = ftrace_preempt_disable(); 2248 resched = ftrace_preempt_disable();
2245 2249
2250 if (atomic_read(&buffer->record_disabled))
2251 goto out_nocheck;
2252
2246 if (trace_recursive_lock()) 2253 if (trace_recursive_lock())
2247 goto out_nocheck; 2254 goto out_nocheck;
2248 2255
@@ -2474,11 +2481,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
2474 if (ring_buffer_flags != RB_BUFFERS_ON) 2481 if (ring_buffer_flags != RB_BUFFERS_ON)
2475 return -EBUSY; 2482 return -EBUSY;
2476 2483
2477 if (atomic_read(&buffer->record_disabled))
2478 return -EBUSY;
2479
2480 resched = ftrace_preempt_disable(); 2484 resched = ftrace_preempt_disable();
2481 2485
2486 if (atomic_read(&buffer->record_disabled))
2487 goto out;
2488
2482 cpu = raw_smp_processor_id(); 2489 cpu = raw_smp_processor_id();
2483 2490
2484 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2491 if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -2546,7 +2553,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2546 * @buffer: The ring buffer to enable writes 2553 * @buffer: The ring buffer to enable writes
2547 * 2554 *
2548 * Note, multiple disables will need the same number of enables 2555 * Note, multiple disables will need the same number of enables
2549 * to truely enable the writing (much like preempt_disable). 2556 * to truly enable the writing (much like preempt_disable).
2550 */ 2557 */
2551void ring_buffer_record_enable(struct ring_buffer *buffer) 2558void ring_buffer_record_enable(struct ring_buffer *buffer)
2552{ 2559{
@@ -2582,7 +2589,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2582 * @cpu: The CPU to enable. 2589 * @cpu: The CPU to enable.
2583 * 2590 *
2584 * Note, multiple disables will need the same number of enables 2591 * Note, multiple disables will need the same number of enables
2585 * to truely enable the writing (much like preempt_disable). 2592 * to truly enable the writing (much like preempt_disable).
2586 */ 2593 */
2587void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 2594void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2588{ 2595{
@@ -2723,6 +2730,8 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
2723 iter->read_stamp = cpu_buffer->read_stamp; 2730 iter->read_stamp = cpu_buffer->read_stamp;
2724 else 2731 else
2725 iter->read_stamp = iter->head_page->page->time_stamp; 2732 iter->read_stamp = iter->head_page->page->time_stamp;
2733 iter->cache_reader_page = cpu_buffer->reader_page;
2734 iter->cache_read = cpu_buffer->read;
2726} 2735}
2727 2736
2728/** 2737/**
@@ -2834,7 +2843,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2834 int ret; 2843 int ret;
2835 2844
2836 local_irq_save(flags); 2845 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock); 2846 arch_spin_lock(&cpu_buffer->lock);
2838 2847
2839 again: 2848 again:
2840 /* 2849 /*
@@ -2876,7 +2885,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2876 * Splice the empty reader page into the list around the head. 2885 * Splice the empty reader page into the list around the head.
2877 */ 2886 */
2878 reader = rb_set_head_page(cpu_buffer); 2887 reader = rb_set_head_page(cpu_buffer);
2879 cpu_buffer->reader_page->list.next = reader->list.next; 2888 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2880 cpu_buffer->reader_page->list.prev = reader->list.prev; 2889 cpu_buffer->reader_page->list.prev = reader->list.prev;
2881 2890
2882 /* 2891 /*
@@ -2913,7 +2922,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2913 * 2922 *
2914 * Now make the new head point back to the reader page. 2923 * Now make the new head point back to the reader page.
2915 */ 2924 */
2916 reader->list.next->prev = &cpu_buffer->reader_page->list; 2925 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2917 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 2926 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2918 2927
2919 /* Finally update the reader page to the new head */ 2928 /* Finally update the reader page to the new head */
@@ -2923,7 +2932,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2923 goto again; 2932 goto again;
2924 2933
2925 out: 2934 out:
2926 __raw_spin_unlock(&cpu_buffer->lock); 2935 arch_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags); 2936 local_irq_restore(flags);
2928 2937
2929 return reader; 2938 return reader;
@@ -3067,13 +3076,22 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3067 struct ring_buffer_event *event; 3076 struct ring_buffer_event *event;
3068 int nr_loops = 0; 3077 int nr_loops = 0;
3069 3078
3070 if (ring_buffer_iter_empty(iter))
3071 return NULL;
3072
3073 cpu_buffer = iter->cpu_buffer; 3079 cpu_buffer = iter->cpu_buffer;
3074 buffer = cpu_buffer->buffer; 3080 buffer = cpu_buffer->buffer;
3075 3081
3082 /*
3083 * Check if someone performed a consuming read to
3084 * the buffer. A consuming read invalidates the iterator
3085 * and we need to reset the iterator in this case.
3086 */
3087 if (unlikely(iter->cache_read != cpu_buffer->read ||
3088 iter->cache_reader_page != cpu_buffer->reader_page))
3089 rb_iter_reset(iter);
3090
3076 again: 3091 again:
3092 if (ring_buffer_iter_empty(iter))
3093 return NULL;
3094
3077 /* 3095 /*
3078 * We repeat when a timestamp is encountered. 3096 * We repeat when a timestamp is encountered.
3079 * We can get multiple timestamps by nested interrupts or also 3097 * We can get multiple timestamps by nested interrupts or also
@@ -3088,6 +3106,11 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3088 if (rb_per_cpu_empty(cpu_buffer)) 3106 if (rb_per_cpu_empty(cpu_buffer))
3089 return NULL; 3107 return NULL;
3090 3108
3109 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3110 rb_inc_iter(iter);
3111 goto again;
3112 }
3113
3091 event = rb_iter_head_event(iter); 3114 event = rb_iter_head_event(iter);
3092 3115
3093 switch (event->type_len) { 3116 switch (event->type_len) {
@@ -3286,9 +3309,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3286 synchronize_sched(); 3309 synchronize_sched();
3287 3310
3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3311 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock); 3312 arch_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter); 3313 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock); 3314 arch_spin_unlock(&cpu_buffer->lock);
3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3315 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293 3316
3294 return iter; 3317 return iter;
@@ -3408,11 +3431,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3431 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out; 3432 goto out;
3410 3433
3411 __raw_spin_lock(&cpu_buffer->lock); 3434 arch_spin_lock(&cpu_buffer->lock);
3412 3435
3413 rb_reset_cpu(cpu_buffer); 3436 rb_reset_cpu(cpu_buffer);
3414 3437
3415 __raw_spin_unlock(&cpu_buffer->lock); 3438 arch_spin_unlock(&cpu_buffer->lock);
3416 3439
3417 out: 3440 out:
3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3441 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);