aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-21 14:17:22 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-02-21 14:17:22 -0500
commit5f854cfc024622e4aae14d7cf422f6ff86278688 (patch)
tree426e77c6f6e4939c80440bf1fabcb020e3ee145b /kernel/trace/ring_buffer.c
parentcc24da0742870f152ddf1002aa39dfcd83f7cf9c (diff)
parent4ec62b2b2e6bd7ddef7b6cea6e5db7b5578a6532 (diff)
Forward to 2.6.33-rc8
Merge branch 'linus' into rt/head with a pile of conflicts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c168
1 files changed, 93 insertions, 75 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f780675b54cb..9e095ef33be6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -201,8 +201,6 @@ int tracing_is_on(void)
201} 201}
202EXPORT_SYMBOL_GPL(tracing_is_on); 202EXPORT_SYMBOL_GPL(tracing_is_on);
203 203
204#include "trace.h"
205
206#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 204#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
207#define RB_ALIGNMENT 4U 205#define RB_ALIGNMENT 4U
208#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 206#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -399,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s)
399 int ret; 397 int ret;
400 398
401 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
402 "offset:0;\tsize:%u;\n", 400 "offset:0;\tsize:%u;\tsigned:%u;\n",
403 (unsigned int)sizeof(field.time_stamp)); 401 (unsigned int)sizeof(field.time_stamp),
402 (unsigned int)is_signed_type(u64));
404 403
405 ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 404 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
406 "offset:%u;\tsize:%u;\n", 405 "offset:%u;\tsize:%u;\tsigned:%u;\n",
407 (unsigned int)offsetof(typeof(field), commit), 406 (unsigned int)offsetof(typeof(field), commit),
408 (unsigned int)sizeof(field.commit)); 407 (unsigned int)sizeof(field.commit),
408 (unsigned int)is_signed_type(long));
409 409
410 ret = trace_seq_printf(s, "\tfield: char data;\t" 410 ret = trace_seq_printf(s, "\tfield: char data;\t"
411 "offset:%u;\tsize:%u;\n", 411 "offset:%u;\tsize:%u;\tsigned:%u;\n",
412 (unsigned int)offsetof(typeof(field), data), 412 (unsigned int)offsetof(typeof(field), data),
413 (unsigned int)BUF_PAGE_SIZE); 413 (unsigned int)BUF_PAGE_SIZE,
414 (unsigned int)is_signed_type(char));
414 415
415 return ret; 416 return ret;
416} 417}
@@ -421,8 +422,8 @@ int ring_buffer_print_page_header(struct trace_seq *s)
421struct ring_buffer_per_cpu { 422struct ring_buffer_per_cpu {
422 int cpu; 423 int cpu;
423 struct ring_buffer *buffer; 424 struct ring_buffer *buffer;
424 atomic_spinlock_t reader_lock; /* serialize readers */ 425 raw_spinlock_t reader_lock; /* serialize readers */
425 raw_spinlock_t lock; 426 arch_spinlock_t lock;
426 struct lock_class_key lock_key; 427 struct lock_class_key lock_key;
427 struct list_head *pages; 428 struct list_head *pages;
428 struct buffer_page *head_page; /* read from head */ 429 struct buffer_page *head_page; /* read from head */
@@ -463,6 +464,8 @@ struct ring_buffer_iter {
463 struct ring_buffer_per_cpu *cpu_buffer; 464 struct ring_buffer_per_cpu *cpu_buffer;
464 unsigned long head; 465 unsigned long head;
465 struct buffer_page *head_page; 466 struct buffer_page *head_page;
467 struct buffer_page *cache_reader_page;
468 unsigned long cache_read;
466 u64 read_stamp; 469 u64 read_stamp;
467}; 470};
468 471
@@ -485,7 +488,7 @@ struct ring_buffer_iter {
485/* Up this if you want to test the TIME_EXTENTS and normalization */ 488/* Up this if you want to test the TIME_EXTENTS and normalization */
486#define DEBUG_SHIFT 0 489#define DEBUG_SHIFT 0
487 490
488static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) 491static inline u64 rb_time_stamp(struct ring_buffer *buffer)
489{ 492{
490 /* shift to debug/test normalization and TIME_EXTENTS */ 493 /* shift to debug/test normalization and TIME_EXTENTS */
491 return buffer->clock() << DEBUG_SHIFT; 494 return buffer->clock() << DEBUG_SHIFT;
@@ -496,7 +499,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
496 u64 time; 499 u64 time;
497 500
498 preempt_disable_notrace(); 501 preempt_disable_notrace();
499 time = rb_time_stamp(buffer, cpu); 502 time = rb_time_stamp(buffer);
500 preempt_enable_no_resched_notrace(); 503 preempt_enable_no_resched_notrace();
501 504
502 return time; 505 return time;
@@ -601,7 +604,7 @@ static struct list_head *rb_list_head(struct list_head *list)
601} 604}
602 605
603/* 606/*
604 * rb_is_head_page - test if the give page is the head page 607 * rb_is_head_page - test if the given page is the head page
605 * 608 *
606 * Because the reader may move the head_page pointer, we can 609 * Because the reader may move the head_page pointer, we can
607 * not trust what the head page is (it may be pointing to 610 * not trust what the head page is (it may be pointing to
@@ -995,9 +998,9 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
995 998
996 cpu_buffer->cpu = cpu; 999 cpu_buffer->cpu = cpu;
997 cpu_buffer->buffer = buffer; 1000 cpu_buffer->buffer = buffer;
998 atomic_spin_lock_init(&cpu_buffer->reader_lock); 1001 raw_spin_lock_init(&cpu_buffer->reader_lock);
999 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1002 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1000 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1003 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1001 1004
1002 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1005 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1003 GFP_KERNEL, cpu_to_node(cpu)); 1006 GFP_KERNEL, cpu_to_node(cpu));
@@ -1188,13 +1191,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1188static void 1191static void
1189rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) 1192rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1190{ 1193{
1191 struct buffer_page *bpage; 1194 struct buffer_page *bpage, *tmp;
1192 struct list_head *p; 1195 struct list_head *p;
1196 LIST_HEAD(tofree);
1193 unsigned i; 1197 unsigned i;
1194 1198
1195 atomic_inc(&cpu_buffer->record_disabled); 1199 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1196 synchronize_sched();
1197
1198 rb_head_page_deactivate(cpu_buffer); 1200 rb_head_page_deactivate(cpu_buffer);
1199 1201
1200 for (i = 0; i < nr_pages; i++) { 1202 for (i = 0; i < nr_pages; i++) {
@@ -1202,17 +1204,21 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1202 return; 1204 return;
1203 p = cpu_buffer->pages->next; 1205 p = cpu_buffer->pages->next;
1204 bpage = list_entry(p, struct buffer_page, list); 1206 bpage = list_entry(p, struct buffer_page, list);
1205 list_del_init(&bpage->list); 1207 list_del(&bpage->list);
1206 free_buffer_page(bpage); 1208 list_add(&bpage->list, &tofree);
1207 } 1209 }
1208 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1210 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1209 return; 1211 return;
1210 1212
1211 rb_reset_cpu(cpu_buffer); 1213 rb_reset_cpu(cpu_buffer);
1212
1213 rb_check_pages(cpu_buffer); 1214 rb_check_pages(cpu_buffer);
1214 1215
1215 atomic_dec(&cpu_buffer->record_disabled); 1216 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1217
1218 list_for_each_entry_safe(bpage, tmp, &tofree, list) {
1219 list_del_init(&bpage->list);
1220 free_buffer_page(bpage);
1221 }
1216 1222
1217} 1223}
1218 1224
@@ -1224,10 +1230,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1224 struct list_head *p; 1230 struct list_head *p;
1225 unsigned i; 1231 unsigned i;
1226 1232
1227 atomic_inc(&cpu_buffer->record_disabled); 1233 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1228 synchronize_sched();
1229
1230 atomic_spin_lock_irq(&cpu_buffer->reader_lock);
1231 rb_head_page_deactivate(cpu_buffer); 1234 rb_head_page_deactivate(cpu_buffer);
1232 1235
1233 for (i = 0; i < nr_pages; i++) { 1236 for (i = 0; i < nr_pages; i++) {
@@ -1239,11 +1242,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1239 list_add_tail(&bpage->list, cpu_buffer->pages); 1242 list_add_tail(&bpage->list, cpu_buffer->pages);
1240 } 1243 }
1241 rb_reset_cpu(cpu_buffer); 1244 rb_reset_cpu(cpu_buffer);
1242 atomic_spin_unlock_irq(&cpu_buffer->reader_lock);
1243
1244 rb_check_pages(cpu_buffer); 1245 rb_check_pages(cpu_buffer);
1245 1246
1246 atomic_dec(&cpu_buffer->record_disabled); 1247 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1247} 1248}
1248 1249
1249/** 1250/**
@@ -1251,11 +1252,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1251 * @buffer: the buffer to resize. 1252 * @buffer: the buffer to resize.
1252 * @size: the new size. 1253 * @size: the new size.
1253 * 1254 *
1254 * The tracer is responsible for making sure that the buffer is
1255 * not being used while changing the size.
1256 * Note: We may be able to change the above requirement by using
1257 * RCU synchronizations.
1258 *
1259 * Minimum size is 2 * BUF_PAGE_SIZE. 1255 * Minimum size is 2 * BUF_PAGE_SIZE.
1260 * 1256 *
1261 * Returns -1 on failure. 1257 * Returns -1 on failure.
@@ -1287,6 +1283,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1287 if (size == buffer_size) 1283 if (size == buffer_size)
1288 return size; 1284 return size;
1289 1285
1286 atomic_inc(&buffer->record_disabled);
1287
1288 /* Make sure all writers are done with this buffer. */
1289 synchronize_sched();
1290
1290 mutex_lock(&buffer->mutex); 1291 mutex_lock(&buffer->mutex);
1291 get_online_cpus(); 1292 get_online_cpus();
1292 1293
@@ -1349,6 +1350,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1349 put_online_cpus(); 1350 put_online_cpus();
1350 mutex_unlock(&buffer->mutex); 1351 mutex_unlock(&buffer->mutex);
1351 1352
1353 atomic_dec(&buffer->record_disabled);
1354
1352 return size; 1355 return size;
1353 1356
1354 free_pages: 1357 free_pages:
@@ -1358,6 +1361,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1358 } 1361 }
1359 put_online_cpus(); 1362 put_online_cpus();
1360 mutex_unlock(&buffer->mutex); 1363 mutex_unlock(&buffer->mutex);
1364 atomic_dec(&buffer->record_disabled);
1361 return -ENOMEM; 1365 return -ENOMEM;
1362 1366
1363 /* 1367 /*
@@ -1367,6 +1371,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1367 out_fail: 1371 out_fail:
1368 put_online_cpus(); 1372 put_online_cpus();
1369 mutex_unlock(&buffer->mutex); 1373 mutex_unlock(&buffer->mutex);
1374 atomic_dec(&buffer->record_disabled);
1370 return -1; 1375 return -1;
1371} 1376}
1372EXPORT_SYMBOL_GPL(ring_buffer_resize); 1377EXPORT_SYMBOL_GPL(ring_buffer_resize);
@@ -1787,9 +1792,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1787static struct ring_buffer_event * 1792static struct ring_buffer_event *
1788rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1793rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1789 unsigned long length, unsigned long tail, 1794 unsigned long length, unsigned long tail,
1790 struct buffer_page *commit_page,
1791 struct buffer_page *tail_page, u64 *ts) 1795 struct buffer_page *tail_page, u64 *ts)
1792{ 1796{
1797 struct buffer_page *commit_page = cpu_buffer->commit_page;
1793 struct ring_buffer *buffer = cpu_buffer->buffer; 1798 struct ring_buffer *buffer = cpu_buffer->buffer;
1794 struct buffer_page *next_page; 1799 struct buffer_page *next_page;
1795 int ret; 1800 int ret;
@@ -1870,7 +1875,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1870 * Nested commits always have zero deltas, so 1875 * Nested commits always have zero deltas, so
1871 * just reread the time stamp 1876 * just reread the time stamp
1872 */ 1877 */
1873 *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1878 *ts = rb_time_stamp(buffer);
1874 next_page->page->time_stamp = *ts; 1879 next_page->page->time_stamp = *ts;
1875 } 1880 }
1876 1881
@@ -1892,13 +1897,10 @@ static struct ring_buffer_event *
1892__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1897__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1893 unsigned type, unsigned long length, u64 *ts) 1898 unsigned type, unsigned long length, u64 *ts)
1894{ 1899{
1895 struct buffer_page *tail_page, *commit_page; 1900 struct buffer_page *tail_page;
1896 struct ring_buffer_event *event; 1901 struct ring_buffer_event *event;
1897 unsigned long tail, write; 1902 unsigned long tail, write;
1898 1903
1899 commit_page = cpu_buffer->commit_page;
1900 /* we just need to protect against interrupts */
1901 barrier();
1902 tail_page = cpu_buffer->tail_page; 1904 tail_page = cpu_buffer->tail_page;
1903 write = local_add_return(length, &tail_page->write); 1905 write = local_add_return(length, &tail_page->write);
1904 1906
@@ -1909,7 +1911,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1909 /* See if we shot pass the end of this buffer page */ 1911 /* See if we shot pass the end of this buffer page */
1910 if (write > BUF_PAGE_SIZE) 1912 if (write > BUF_PAGE_SIZE)
1911 return rb_move_tail(cpu_buffer, length, tail, 1913 return rb_move_tail(cpu_buffer, length, tail,
1912 commit_page, tail_page, ts); 1914 tail_page, ts);
1913 1915
1914 /* We reserved something on the buffer */ 1916 /* We reserved something on the buffer */
1915 1917
@@ -2113,7 +2115,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2113 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2115 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2114 goto out_fail; 2116 goto out_fail;
2115 2117
2116 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 2118 ts = rb_time_stamp(cpu_buffer->buffer);
2117 2119
2118 /* 2120 /*
2119 * Only the first commit can update the timestamp. 2121 * Only the first commit can update the timestamp.
@@ -2683,7 +2685,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2683EXPORT_SYMBOL_GPL(ring_buffer_entries); 2685EXPORT_SYMBOL_GPL(ring_buffer_entries);
2684 2686
2685/** 2687/**
2686 * ring_buffer_overrun_cpu - get the number of overruns in buffer 2688 * ring_buffer_overruns - get the number of overruns in buffer
2687 * @buffer: The ring buffer 2689 * @buffer: The ring buffer
2688 * 2690 *
2689 * Returns the total number of overruns in the ring buffer 2691 * Returns the total number of overruns in the ring buffer
@@ -2723,6 +2725,8 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
2723 iter->read_stamp = cpu_buffer->read_stamp; 2725 iter->read_stamp = cpu_buffer->read_stamp;
2724 else 2726 else
2725 iter->read_stamp = iter->head_page->page->time_stamp; 2727 iter->read_stamp = iter->head_page->page->time_stamp;
2728 iter->cache_reader_page = cpu_buffer->reader_page;
2729 iter->cache_read = cpu_buffer->read;
2726} 2730}
2727 2731
2728/** 2732/**
@@ -2742,9 +2746,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2742 2746
2743 cpu_buffer = iter->cpu_buffer; 2747 cpu_buffer = iter->cpu_buffer;
2744 2748
2745 atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2749 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2746 rb_iter_reset(iter); 2750 rb_iter_reset(iter);
2747 atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2751 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2748} 2752}
2749EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 2753EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2750 2754
@@ -2834,7 +2838,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2834 int ret; 2838 int ret;
2835 2839
2836 local_irq_save(flags); 2840 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock); 2841 arch_spin_lock(&cpu_buffer->lock);
2838 2842
2839 again: 2843 again:
2840 /* 2844 /*
@@ -2876,7 +2880,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2876 * Splice the empty reader page into the list around the head. 2880 * Splice the empty reader page into the list around the head.
2877 */ 2881 */
2878 reader = rb_set_head_page(cpu_buffer); 2882 reader = rb_set_head_page(cpu_buffer);
2879 cpu_buffer->reader_page->list.next = reader->list.next; 2883 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2880 cpu_buffer->reader_page->list.prev = reader->list.prev; 2884 cpu_buffer->reader_page->list.prev = reader->list.prev;
2881 2885
2882 /* 2886 /*
@@ -2913,7 +2917,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2913 * 2917 *
2914 * Now make the new head point back to the reader page. 2918 * Now make the new head point back to the reader page.
2915 */ 2919 */
2916 reader->list.next->prev = &cpu_buffer->reader_page->list; 2920 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2917 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 2921 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2918 2922
2919 /* Finally update the reader page to the new head */ 2923 /* Finally update the reader page to the new head */
@@ -2923,7 +2927,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2923 goto again; 2927 goto again;
2924 2928
2925 out: 2929 out:
2926 __raw_spin_unlock(&cpu_buffer->lock); 2930 arch_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags); 2931 local_irq_restore(flags);
2928 2932
2929 return reader; 2933 return reader;
@@ -3067,13 +3071,22 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3067 struct ring_buffer_event *event; 3071 struct ring_buffer_event *event;
3068 int nr_loops = 0; 3072 int nr_loops = 0;
3069 3073
3070 if (ring_buffer_iter_empty(iter))
3071 return NULL;
3072
3073 cpu_buffer = iter->cpu_buffer; 3074 cpu_buffer = iter->cpu_buffer;
3074 buffer = cpu_buffer->buffer; 3075 buffer = cpu_buffer->buffer;
3075 3076
3077 /*
3078 * Check if someone performed a consuming read to
3079 * the buffer. A consuming read invalidates the iterator
3080 * and we need to reset the iterator in this case.
3081 */
3082 if (unlikely(iter->cache_read != cpu_buffer->read ||
3083 iter->cache_reader_page != cpu_buffer->reader_page))
3084 rb_iter_reset(iter);
3085
3076 again: 3086 again:
3087 if (ring_buffer_iter_empty(iter))
3088 return NULL;
3089
3077 /* 3090 /*
3078 * We repeat when a timestamp is encountered. 3091 * We repeat when a timestamp is encountered.
3079 * We can get multiple timestamps by nested interrupts or also 3092 * We can get multiple timestamps by nested interrupts or also
@@ -3088,6 +3101,11 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3088 if (rb_per_cpu_empty(cpu_buffer)) 3101 if (rb_per_cpu_empty(cpu_buffer))
3089 return NULL; 3102 return NULL;
3090 3103
3104 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3105 rb_inc_iter(iter);
3106 goto again;
3107 }
3108
3091 event = rb_iter_head_event(iter); 3109 event = rb_iter_head_event(iter);
3092 3110
3093 switch (event->type_len) { 3111 switch (event->type_len) {
@@ -3164,12 +3182,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
3164 again: 3182 again:
3165 local_irq_save(flags); 3183 local_irq_save(flags);
3166 if (dolock) 3184 if (dolock)
3167 atomic_spin_lock(&cpu_buffer->reader_lock); 3185 raw_spin_lock(&cpu_buffer->reader_lock);
3168 event = rb_buffer_peek(cpu_buffer, ts); 3186 event = rb_buffer_peek(cpu_buffer, ts);
3169 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3187 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3170 rb_advance_reader(cpu_buffer); 3188 rb_advance_reader(cpu_buffer);
3171 if (dolock) 3189 if (dolock)
3172 atomic_spin_unlock(&cpu_buffer->reader_lock); 3190 raw_spin_unlock(&cpu_buffer->reader_lock);
3173 local_irq_restore(flags); 3191 local_irq_restore(flags);
3174 3192
3175 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3193 if (event && event->type_len == RINGBUF_TYPE_PADDING)
@@ -3194,9 +3212,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3194 unsigned long flags; 3212 unsigned long flags;
3195 3213
3196 again: 3214 again:
3197 atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3215 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3198 event = rb_iter_peek(iter, ts); 3216 event = rb_iter_peek(iter, ts);
3199 atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3217 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3200 3218
3201 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3219 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3202 goto again; 3220 goto again;
@@ -3232,14 +3250,14 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
3232 cpu_buffer = buffer->buffers[cpu]; 3250 cpu_buffer = buffer->buffers[cpu];
3233 local_irq_save(flags); 3251 local_irq_save(flags);
3234 if (dolock) 3252 if (dolock)
3235 atomic_spin_lock(&cpu_buffer->reader_lock); 3253 raw_spin_lock(&cpu_buffer->reader_lock);
3236 3254
3237 event = rb_buffer_peek(cpu_buffer, ts); 3255 event = rb_buffer_peek(cpu_buffer, ts);
3238 if (event) 3256 if (event)
3239 rb_advance_reader(cpu_buffer); 3257 rb_advance_reader(cpu_buffer);
3240 3258
3241 if (dolock) 3259 if (dolock)
3242 atomic_spin_unlock(&cpu_buffer->reader_lock); 3260 raw_spin_unlock(&cpu_buffer->reader_lock);
3243 local_irq_restore(flags); 3261 local_irq_restore(flags);
3244 3262
3245 out: 3263 out:
@@ -3285,11 +3303,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3285 atomic_inc(&cpu_buffer->record_disabled); 3303 atomic_inc(&cpu_buffer->record_disabled);
3286 synchronize_sched(); 3304 synchronize_sched();
3287 3305
3288 atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3306 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock); 3307 arch_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter); 3308 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock); 3309 arch_spin_unlock(&cpu_buffer->lock);
3292 atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3310 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293 3311
3294 return iter; 3312 return iter;
3295} 3313}
@@ -3326,7 +3344,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3326 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3344 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3327 unsigned long flags; 3345 unsigned long flags;
3328 3346
3329 atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3347 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3330 again: 3348 again:
3331 event = rb_iter_peek(iter, ts); 3349 event = rb_iter_peek(iter, ts);
3332 if (!event) 3350 if (!event)
@@ -3337,7 +3355,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3337 3355
3338 rb_advance_iter(iter); 3356 rb_advance_iter(iter);
3339 out: 3357 out:
3340 atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3358 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3341 3359
3342 return event; 3360 return event;
3343} 3361}
@@ -3403,19 +3421,19 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3403 3421
3404 atomic_inc(&cpu_buffer->record_disabled); 3422 atomic_inc(&cpu_buffer->record_disabled);
3405 3423
3406 atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3424 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3407 3425
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3426 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out; 3427 goto out;
3410 3428
3411 __raw_spin_lock(&cpu_buffer->lock); 3429 arch_spin_lock(&cpu_buffer->lock);
3412 3430
3413 rb_reset_cpu(cpu_buffer); 3431 rb_reset_cpu(cpu_buffer);
3414 3432
3415 __raw_spin_unlock(&cpu_buffer->lock); 3433 arch_spin_unlock(&cpu_buffer->lock);
3416 3434
3417 out: 3435 out:
3418 atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3436 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3419 3437
3420 atomic_dec(&cpu_buffer->record_disabled); 3438 atomic_dec(&cpu_buffer->record_disabled);
3421} 3439}
@@ -3453,10 +3471,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
3453 cpu_buffer = buffer->buffers[cpu]; 3471 cpu_buffer = buffer->buffers[cpu];
3454 local_irq_save(flags); 3472 local_irq_save(flags);
3455 if (dolock) 3473 if (dolock)
3456 atomic_spin_lock(&cpu_buffer->reader_lock); 3474 raw_spin_lock(&cpu_buffer->reader_lock);
3457 ret = rb_per_cpu_empty(cpu_buffer); 3475 ret = rb_per_cpu_empty(cpu_buffer);
3458 if (dolock) 3476 if (dolock)
3459 atomic_spin_unlock(&cpu_buffer->reader_lock); 3477 raw_spin_unlock(&cpu_buffer->reader_lock);
3460 local_irq_restore(flags); 3478 local_irq_restore(flags);
3461 3479
3462 if (!ret) 3480 if (!ret)
@@ -3487,10 +3505,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3487 cpu_buffer = buffer->buffers[cpu]; 3505 cpu_buffer = buffer->buffers[cpu];
3488 local_irq_save(flags); 3506 local_irq_save(flags);
3489 if (dolock) 3507 if (dolock)
3490 atomic_spin_lock(&cpu_buffer->reader_lock); 3508 raw_spin_lock(&cpu_buffer->reader_lock);
3491 ret = rb_per_cpu_empty(cpu_buffer); 3509 ret = rb_per_cpu_empty(cpu_buffer);
3492 if (dolock) 3510 if (dolock)
3493 atomic_spin_unlock(&cpu_buffer->reader_lock); 3511 raw_spin_unlock(&cpu_buffer->reader_lock);
3494 local_irq_restore(flags); 3512 local_irq_restore(flags);
3495 3513
3496 return ret; 3514 return ret;
@@ -3685,7 +3703,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3685 if (!bpage) 3703 if (!bpage)
3686 goto out; 3704 goto out;
3687 3705
3688 atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3706 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3689 3707
3690 reader = rb_get_reader_page(cpu_buffer); 3708 reader = rb_get_reader_page(cpu_buffer);
3691 if (!reader) 3709 if (!reader)
@@ -3760,7 +3778,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3760 ret = read; 3778 ret = read;
3761 3779
3762 out_unlock: 3780 out_unlock:
3763 atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3781 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3764 3782
3765 out: 3783 out:
3766 return ret; 3784 return ret;