aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c98
1 files changed, 55 insertions, 43 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5dd017fea6f5..0287f9f52f5a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -20,6 +20,7 @@
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22 22
23#include <asm/local.h>
23#include "trace.h" 24#include "trace.h"
24 25
25/* 26/*
@@ -397,18 +398,21 @@ int ring_buffer_print_page_header(struct trace_seq *s)
397 int ret; 398 int ret;
398 399
399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 400 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
400 "offset:0;\tsize:%u;\n", 401 "offset:0;\tsize:%u;\tsigned:%u;\n",
401 (unsigned int)sizeof(field.time_stamp)); 402 (unsigned int)sizeof(field.time_stamp),
403 (unsigned int)is_signed_type(u64));
402 404
403 ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 405 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
404 "offset:%u;\tsize:%u;\n", 406 "offset:%u;\tsize:%u;\tsigned:%u;\n",
405 (unsigned int)offsetof(typeof(field), commit), 407 (unsigned int)offsetof(typeof(field), commit),
406 (unsigned int)sizeof(field.commit)); 408 (unsigned int)sizeof(field.commit),
409 (unsigned int)is_signed_type(long));
407 410
408 ret = trace_seq_printf(s, "\tfield: char data;\t" 411 ret = trace_seq_printf(s, "\tfield: char data;\t"
409 "offset:%u;\tsize:%u;\n", 412 "offset:%u;\tsize:%u;\tsigned:%u;\n",
410 (unsigned int)offsetof(typeof(field), data), 413 (unsigned int)offsetof(typeof(field), data),
411 (unsigned int)BUF_PAGE_SIZE); 414 (unsigned int)BUF_PAGE_SIZE,
415 (unsigned int)is_signed_type(char));
412 416
413 return ret; 417 return ret;
414} 418}
@@ -420,7 +424,7 @@ struct ring_buffer_per_cpu {
420 int cpu; 424 int cpu;
421 struct ring_buffer *buffer; 425 struct ring_buffer *buffer;
422 spinlock_t reader_lock; /* serialize readers */ 426 spinlock_t reader_lock; /* serialize readers */
423 raw_spinlock_t lock; 427 arch_spinlock_t lock;
424 struct lock_class_key lock_key; 428 struct lock_class_key lock_key;
425 struct list_head *pages; 429 struct list_head *pages;
426 struct buffer_page *head_page; /* read from head */ 430 struct buffer_page *head_page; /* read from head */
@@ -461,6 +465,8 @@ struct ring_buffer_iter {
461 struct ring_buffer_per_cpu *cpu_buffer; 465 struct ring_buffer_per_cpu *cpu_buffer;
462 unsigned long head; 466 unsigned long head;
463 struct buffer_page *head_page; 467 struct buffer_page *head_page;
468 struct buffer_page *cache_reader_page;
469 unsigned long cache_read;
464 u64 read_stamp; 470 u64 read_stamp;
465}; 471};
466 472
@@ -995,7 +1001,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
995 cpu_buffer->buffer = buffer; 1001 cpu_buffer->buffer = buffer;
996 spin_lock_init(&cpu_buffer->reader_lock); 1002 spin_lock_init(&cpu_buffer->reader_lock);
997 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1003 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
998 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1004 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
999 1005
1000 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1006 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1001 GFP_KERNEL, cpu_to_node(cpu)); 1007 GFP_KERNEL, cpu_to_node(cpu));
@@ -1190,9 +1196,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1190 struct list_head *p; 1196 struct list_head *p;
1191 unsigned i; 1197 unsigned i;
1192 1198
1193 atomic_inc(&cpu_buffer->record_disabled);
1194 synchronize_sched();
1195
1196 spin_lock_irq(&cpu_buffer->reader_lock); 1199 spin_lock_irq(&cpu_buffer->reader_lock);
1197 rb_head_page_deactivate(cpu_buffer); 1200 rb_head_page_deactivate(cpu_buffer);
1198 1201
@@ -1208,12 +1211,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1208 return; 1211 return;
1209 1212
1210 rb_reset_cpu(cpu_buffer); 1213 rb_reset_cpu(cpu_buffer);
1211 spin_unlock_irq(&cpu_buffer->reader_lock);
1212
1213 rb_check_pages(cpu_buffer); 1214 rb_check_pages(cpu_buffer);
1214 1215
1215 atomic_dec(&cpu_buffer->record_disabled); 1216 spin_unlock_irq(&cpu_buffer->reader_lock);
1216
1217} 1217}
1218 1218
1219static void 1219static void
@@ -1224,9 +1224,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1224 struct list_head *p; 1224 struct list_head *p;
1225 unsigned i; 1225 unsigned i;
1226 1226
1227 atomic_inc(&cpu_buffer->record_disabled);
1228 synchronize_sched();
1229
1230 spin_lock_irq(&cpu_buffer->reader_lock); 1227 spin_lock_irq(&cpu_buffer->reader_lock);
1231 rb_head_page_deactivate(cpu_buffer); 1228 rb_head_page_deactivate(cpu_buffer);
1232 1229
@@ -1239,11 +1236,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1239 list_add_tail(&bpage->list, cpu_buffer->pages); 1236 list_add_tail(&bpage->list, cpu_buffer->pages);
1240 } 1237 }
1241 rb_reset_cpu(cpu_buffer); 1238 rb_reset_cpu(cpu_buffer);
1242 spin_unlock_irq(&cpu_buffer->reader_lock);
1243
1244 rb_check_pages(cpu_buffer); 1239 rb_check_pages(cpu_buffer);
1245 1240
1246 atomic_dec(&cpu_buffer->record_disabled); 1241 spin_unlock_irq(&cpu_buffer->reader_lock);
1247} 1242}
1248 1243
1249/** 1244/**
@@ -1251,11 +1246,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1251 * @buffer: the buffer to resize. 1246 * @buffer: the buffer to resize.
1252 * @size: the new size. 1247 * @size: the new size.
1253 * 1248 *
1254 * The tracer is responsible for making sure that the buffer is
1255 * not being used while changing the size.
1256 * Note: We may be able to change the above requirement by using
1257 * RCU synchronizations.
1258 *
1259 * Minimum size is 2 * BUF_PAGE_SIZE. 1249 * Minimum size is 2 * BUF_PAGE_SIZE.
1260 * 1250 *
1261 * Returns -1 on failure. 1251 * Returns -1 on failure.
@@ -1287,6 +1277,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1287 if (size == buffer_size) 1277 if (size == buffer_size)
1288 return size; 1278 return size;
1289 1279
1280 atomic_inc(&buffer->record_disabled);
1281
1282 /* Make sure all writers are done with this buffer. */
1283 synchronize_sched();
1284
1290 mutex_lock(&buffer->mutex); 1285 mutex_lock(&buffer->mutex);
1291 get_online_cpus(); 1286 get_online_cpus();
1292 1287
@@ -1349,6 +1344,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1349 put_online_cpus(); 1344 put_online_cpus();
1350 mutex_unlock(&buffer->mutex); 1345 mutex_unlock(&buffer->mutex);
1351 1346
1347 atomic_dec(&buffer->record_disabled);
1348
1352 return size; 1349 return size;
1353 1350
1354 free_pages: 1351 free_pages:
@@ -1358,6 +1355,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1358 } 1355 }
1359 put_online_cpus(); 1356 put_online_cpus();
1360 mutex_unlock(&buffer->mutex); 1357 mutex_unlock(&buffer->mutex);
1358 atomic_dec(&buffer->record_disabled);
1361 return -ENOMEM; 1359 return -ENOMEM;
1362 1360
1363 /* 1361 /*
@@ -1367,6 +1365,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1367 out_fail: 1365 out_fail:
1368 put_online_cpus(); 1366 put_online_cpus();
1369 mutex_unlock(&buffer->mutex); 1367 mutex_unlock(&buffer->mutex);
1368 atomic_dec(&buffer->record_disabled);
1370 return -1; 1369 return -1;
1371} 1370}
1372EXPORT_SYMBOL_GPL(ring_buffer_resize); 1371EXPORT_SYMBOL_GPL(ring_buffer_resize);
@@ -1787,9 +1786,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1787static struct ring_buffer_event * 1786static struct ring_buffer_event *
1788rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1787rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1789 unsigned long length, unsigned long tail, 1788 unsigned long length, unsigned long tail,
1790 struct buffer_page *commit_page,
1791 struct buffer_page *tail_page, u64 *ts) 1789 struct buffer_page *tail_page, u64 *ts)
1792{ 1790{
1791 struct buffer_page *commit_page = cpu_buffer->commit_page;
1793 struct ring_buffer *buffer = cpu_buffer->buffer; 1792 struct ring_buffer *buffer = cpu_buffer->buffer;
1794 struct buffer_page *next_page; 1793 struct buffer_page *next_page;
1795 int ret; 1794 int ret;
@@ -1892,13 +1891,10 @@ static struct ring_buffer_event *
1892__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1891__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1893 unsigned type, unsigned long length, u64 *ts) 1892 unsigned type, unsigned long length, u64 *ts)
1894{ 1893{
1895 struct buffer_page *tail_page, *commit_page; 1894 struct buffer_page *tail_page;
1896 struct ring_buffer_event *event; 1895 struct ring_buffer_event *event;
1897 unsigned long tail, write; 1896 unsigned long tail, write;
1898 1897
1899 commit_page = cpu_buffer->commit_page;
1900 /* we just need to protect against interrupts */
1901 barrier();
1902 tail_page = cpu_buffer->tail_page; 1898 tail_page = cpu_buffer->tail_page;
1903 write = local_add_return(length, &tail_page->write); 1899 write = local_add_return(length, &tail_page->write);
1904 1900
@@ -1909,7 +1905,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1909 /* See if we shot pass the end of this buffer page */ 1905 /* See if we shot pass the end of this buffer page */
1910 if (write > BUF_PAGE_SIZE) 1906 if (write > BUF_PAGE_SIZE)
1911 return rb_move_tail(cpu_buffer, length, tail, 1907 return rb_move_tail(cpu_buffer, length, tail,
1912 commit_page, tail_page, ts); 1908 tail_page, ts);
1913 1909
1914 /* We reserved something on the buffer */ 1910 /* We reserved something on the buffer */
1915 1911
@@ -2723,6 +2719,8 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
2723 iter->read_stamp = cpu_buffer->read_stamp; 2719 iter->read_stamp = cpu_buffer->read_stamp;
2724 else 2720 else
2725 iter->read_stamp = iter->head_page->page->time_stamp; 2721 iter->read_stamp = iter->head_page->page->time_stamp;
2722 iter->cache_reader_page = cpu_buffer->reader_page;
2723 iter->cache_read = cpu_buffer->read;
2726} 2724}
2727 2725
2728/** 2726/**
@@ -2834,7 +2832,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2834 int ret; 2832 int ret;
2835 2833
2836 local_irq_save(flags); 2834 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock); 2835 arch_spin_lock(&cpu_buffer->lock);
2838 2836
2839 again: 2837 again:
2840 /* 2838 /*
@@ -2876,7 +2874,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2876 * Splice the empty reader page into the list around the head. 2874 * Splice the empty reader page into the list around the head.
2877 */ 2875 */
2878 reader = rb_set_head_page(cpu_buffer); 2876 reader = rb_set_head_page(cpu_buffer);
2879 cpu_buffer->reader_page->list.next = reader->list.next; 2877 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2880 cpu_buffer->reader_page->list.prev = reader->list.prev; 2878 cpu_buffer->reader_page->list.prev = reader->list.prev;
2881 2879
2882 /* 2880 /*
@@ -2913,7 +2911,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2913 * 2911 *
2914 * Now make the new head point back to the reader page. 2912 * Now make the new head point back to the reader page.
2915 */ 2913 */
2916 reader->list.next->prev = &cpu_buffer->reader_page->list; 2914 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2917 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 2915 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2918 2916
2919 /* Finally update the reader page to the new head */ 2917 /* Finally update the reader page to the new head */
@@ -2923,7 +2921,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2923 goto again; 2921 goto again;
2924 2922
2925 out: 2923 out:
2926 __raw_spin_unlock(&cpu_buffer->lock); 2924 arch_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags); 2925 local_irq_restore(flags);
2928 2926
2929 return reader; 2927 return reader;
@@ -3067,13 +3065,22 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3067 struct ring_buffer_event *event; 3065 struct ring_buffer_event *event;
3068 int nr_loops = 0; 3066 int nr_loops = 0;
3069 3067
3070 if (ring_buffer_iter_empty(iter))
3071 return NULL;
3072
3073 cpu_buffer = iter->cpu_buffer; 3068 cpu_buffer = iter->cpu_buffer;
3074 buffer = cpu_buffer->buffer; 3069 buffer = cpu_buffer->buffer;
3075 3070
3071 /*
3072 * Check if someone performed a consuming read to
3073 * the buffer. A consuming read invalidates the iterator
3074 * and we need to reset the iterator in this case.
3075 */
3076 if (unlikely(iter->cache_read != cpu_buffer->read ||
3077 iter->cache_reader_page != cpu_buffer->reader_page))
3078 rb_iter_reset(iter);
3079
3076 again: 3080 again:
3081 if (ring_buffer_iter_empty(iter))
3082 return NULL;
3083
3077 /* 3084 /*
3078 * We repeat when a timestamp is encountered. 3085 * We repeat when a timestamp is encountered.
3079 * We can get multiple timestamps by nested interrupts or also 3086 * We can get multiple timestamps by nested interrupts or also
@@ -3088,6 +3095,11 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3088 if (rb_per_cpu_empty(cpu_buffer)) 3095 if (rb_per_cpu_empty(cpu_buffer))
3089 return NULL; 3096 return NULL;
3090 3097
3098 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3099 rb_inc_iter(iter);
3100 goto again;
3101 }
3102
3091 event = rb_iter_head_event(iter); 3103 event = rb_iter_head_event(iter);
3092 3104
3093 switch (event->type_len) { 3105 switch (event->type_len) {
@@ -3286,9 +3298,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3286 synchronize_sched(); 3298 synchronize_sched();
3287 3299
3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3300 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock); 3301 arch_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter); 3302 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock); 3303 arch_spin_unlock(&cpu_buffer->lock);
3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3304 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293 3305
3294 return iter; 3306 return iter;
@@ -3408,11 +3420,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3420 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out; 3421 goto out;
3410 3422
3411 __raw_spin_lock(&cpu_buffer->lock); 3423 arch_spin_lock(&cpu_buffer->lock);
3412 3424
3413 rb_reset_cpu(cpu_buffer); 3425 rb_reset_cpu(cpu_buffer);
3414 3426
3415 __raw_spin_unlock(&cpu_buffer->lock); 3427 arch_spin_unlock(&cpu_buffer->lock);
3416 3428
3417 out: 3429 out:
3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3430 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);