diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 81 |
1 files changed, 38 insertions, 43 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d4ff01970547..2326b04c95c4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
397 | int ret; | 397 | int ret; |
398 | 398 | ||
399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
400 | "offset:0;\tsize:%u;\n", | 400 | "offset:0;\tsize:%u;\tsigned:%u;\n", |
401 | (unsigned int)sizeof(field.time_stamp)); | 401 | (unsigned int)sizeof(field.time_stamp), |
402 | (unsigned int)is_signed_type(u64)); | ||
402 | 403 | ||
403 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | 404 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" |
404 | "offset:%u;\tsize:%u;\n", | 405 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
405 | (unsigned int)offsetof(typeof(field), commit), | 406 | (unsigned int)offsetof(typeof(field), commit), |
406 | (unsigned int)sizeof(field.commit)); | 407 | (unsigned int)sizeof(field.commit), |
408 | (unsigned int)is_signed_type(long)); | ||
407 | 409 | ||
408 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 410 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
409 | "offset:%u;\tsize:%u;\n", | 411 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
410 | (unsigned int)offsetof(typeof(field), data), | 412 | (unsigned int)offsetof(typeof(field), data), |
411 | (unsigned int)BUF_PAGE_SIZE); | 413 | (unsigned int)BUF_PAGE_SIZE, |
414 | (unsigned int)is_signed_type(char)); | ||
412 | 415 | ||
413 | return ret; | 416 | return ret; |
414 | } | 417 | } |
@@ -420,7 +423,7 @@ struct ring_buffer_per_cpu { | |||
420 | int cpu; | 423 | int cpu; |
421 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
422 | spinlock_t reader_lock; /* serialize readers */ | 425 | spinlock_t reader_lock; /* serialize readers */ |
423 | raw_spinlock_t lock; | 426 | arch_spinlock_t lock; |
424 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
425 | struct list_head *pages; | 428 | struct list_head *pages; |
426 | struct buffer_page *head_page; /* read from head */ | 429 | struct buffer_page *head_page; /* read from head */ |
@@ -483,7 +486,7 @@ struct ring_buffer_iter { | |||
483 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 486 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
484 | #define DEBUG_SHIFT 0 | 487 | #define DEBUG_SHIFT 0 |
485 | 488 | ||
486 | static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) | 489 | static inline u64 rb_time_stamp(struct ring_buffer *buffer) |
487 | { | 490 | { |
488 | /* shift to debug/test normalization and TIME_EXTENTS */ | 491 | /* shift to debug/test normalization and TIME_EXTENTS */ |
489 | return buffer->clock() << DEBUG_SHIFT; | 492 | return buffer->clock() << DEBUG_SHIFT; |
@@ -494,7 +497,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | |||
494 | u64 time; | 497 | u64 time; |
495 | 498 | ||
496 | preempt_disable_notrace(); | 499 | preempt_disable_notrace(); |
497 | time = rb_time_stamp(buffer, cpu); | 500 | time = rb_time_stamp(buffer); |
498 | preempt_enable_no_resched_notrace(); | 501 | preempt_enable_no_resched_notrace(); |
499 | 502 | ||
500 | return time; | 503 | return time; |
@@ -599,7 +602,7 @@ static struct list_head *rb_list_head(struct list_head *list) | |||
599 | } | 602 | } |
600 | 603 | ||
601 | /* | 604 | /* |
602 | * rb_is_head_page - test if the give page is the head page | 605 | * rb_is_head_page - test if the given page is the head page |
603 | * | 606 | * |
604 | * Because the reader may move the head_page pointer, we can | 607 | * Because the reader may move the head_page pointer, we can |
605 | * not trust what the head page is (it may be pointing to | 608 | * not trust what the head page is (it may be pointing to |
@@ -995,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
995 | cpu_buffer->buffer = buffer; | 998 | cpu_buffer->buffer = buffer; |
996 | spin_lock_init(&cpu_buffer->reader_lock); | 999 | spin_lock_init(&cpu_buffer->reader_lock); |
997 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
998 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1001 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
999 | 1002 | ||
1000 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1001 | GFP_KERNEL, cpu_to_node(cpu)); | 1004 | GFP_KERNEL, cpu_to_node(cpu)); |
@@ -1190,9 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1190 | struct list_head *p; | 1193 | struct list_head *p; |
1191 | unsigned i; | 1194 | unsigned i; |
1192 | 1195 | ||
1193 | atomic_inc(&cpu_buffer->record_disabled); | 1196 | spin_lock_irq(&cpu_buffer->reader_lock); |
1194 | synchronize_sched(); | ||
1195 | |||
1196 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
1197 | 1198 | ||
1198 | for (i = 0; i < nr_pages; i++) { | 1199 | for (i = 0; i < nr_pages; i++) { |
@@ -1207,11 +1208,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1207 | return; | 1208 | return; |
1208 | 1209 | ||
1209 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
1210 | |||
1211 | rb_check_pages(cpu_buffer); | 1211 | rb_check_pages(cpu_buffer); |
1212 | 1212 | ||
1213 | atomic_dec(&cpu_buffer->record_disabled); | 1213 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1214 | |||
1215 | } | 1214 | } |
1216 | 1215 | ||
1217 | static void | 1216 | static void |
@@ -1222,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1222 | struct list_head *p; | 1221 | struct list_head *p; |
1223 | unsigned i; | 1222 | unsigned i; |
1224 | 1223 | ||
1225 | atomic_inc(&cpu_buffer->record_disabled); | ||
1226 | synchronize_sched(); | ||
1227 | |||
1228 | spin_lock_irq(&cpu_buffer->reader_lock); | 1224 | spin_lock_irq(&cpu_buffer->reader_lock); |
1229 | rb_head_page_deactivate(cpu_buffer); | 1225 | rb_head_page_deactivate(cpu_buffer); |
1230 | 1226 | ||
@@ -1237,11 +1233,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1237 | list_add_tail(&bpage->list, cpu_buffer->pages); | 1233 | list_add_tail(&bpage->list, cpu_buffer->pages); |
1238 | } | 1234 | } |
1239 | rb_reset_cpu(cpu_buffer); | 1235 | rb_reset_cpu(cpu_buffer); |
1240 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
1241 | |||
1242 | rb_check_pages(cpu_buffer); | 1236 | rb_check_pages(cpu_buffer); |
1243 | 1237 | ||
1244 | atomic_dec(&cpu_buffer->record_disabled); | 1238 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1245 | } | 1239 | } |
1246 | 1240 | ||
1247 | /** | 1241 | /** |
@@ -1249,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1249 | * @buffer: the buffer to resize. | 1243 | * @buffer: the buffer to resize. |
1250 | * @size: the new size. | 1244 | * @size: the new size. |
1251 | * | 1245 | * |
1252 | * The tracer is responsible for making sure that the buffer is | ||
1253 | * not being used while changing the size. | ||
1254 | * Note: We may be able to change the above requirement by using | ||
1255 | * RCU synchronizations. | ||
1256 | * | ||
1257 | * Minimum size is 2 * BUF_PAGE_SIZE. | 1246 | * Minimum size is 2 * BUF_PAGE_SIZE. |
1258 | * | 1247 | * |
1259 | * Returns -1 on failure. | 1248 | * Returns -1 on failure. |
@@ -1285,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1285 | if (size == buffer_size) | 1274 | if (size == buffer_size) |
1286 | return size; | 1275 | return size; |
1287 | 1276 | ||
1277 | atomic_inc(&buffer->record_disabled); | ||
1278 | |||
1279 | /* Make sure all writers are done with this buffer. */ | ||
1280 | synchronize_sched(); | ||
1281 | |||
1288 | mutex_lock(&buffer->mutex); | 1282 | mutex_lock(&buffer->mutex); |
1289 | get_online_cpus(); | 1283 | get_online_cpus(); |
1290 | 1284 | ||
@@ -1347,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1347 | put_online_cpus(); | 1341 | put_online_cpus(); |
1348 | mutex_unlock(&buffer->mutex); | 1342 | mutex_unlock(&buffer->mutex); |
1349 | 1343 | ||
1344 | atomic_dec(&buffer->record_disabled); | ||
1345 | |||
1350 | return size; | 1346 | return size; |
1351 | 1347 | ||
1352 | free_pages: | 1348 | free_pages: |
@@ -1356,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1356 | } | 1352 | } |
1357 | put_online_cpus(); | 1353 | put_online_cpus(); |
1358 | mutex_unlock(&buffer->mutex); | 1354 | mutex_unlock(&buffer->mutex); |
1355 | atomic_dec(&buffer->record_disabled); | ||
1359 | return -ENOMEM; | 1356 | return -ENOMEM; |
1360 | 1357 | ||
1361 | /* | 1358 | /* |
@@ -1365,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1365 | out_fail: | 1362 | out_fail: |
1366 | put_online_cpus(); | 1363 | put_online_cpus(); |
1367 | mutex_unlock(&buffer->mutex); | 1364 | mutex_unlock(&buffer->mutex); |
1365 | atomic_dec(&buffer->record_disabled); | ||
1368 | return -1; | 1366 | return -1; |
1369 | } | 1367 | } |
1370 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | 1368 | EXPORT_SYMBOL_GPL(ring_buffer_resize); |
@@ -1785,9 +1783,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1785 | static struct ring_buffer_event * | 1783 | static struct ring_buffer_event * |
1786 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 1784 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, |
1787 | unsigned long length, unsigned long tail, | 1785 | unsigned long length, unsigned long tail, |
1788 | struct buffer_page *commit_page, | ||
1789 | struct buffer_page *tail_page, u64 *ts) | 1786 | struct buffer_page *tail_page, u64 *ts) |
1790 | { | 1787 | { |
1788 | struct buffer_page *commit_page = cpu_buffer->commit_page; | ||
1791 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1789 | struct ring_buffer *buffer = cpu_buffer->buffer; |
1792 | struct buffer_page *next_page; | 1790 | struct buffer_page *next_page; |
1793 | int ret; | 1791 | int ret; |
@@ -1868,7 +1866,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1868 | * Nested commits always have zero deltas, so | 1866 | * Nested commits always have zero deltas, so |
1869 | * just reread the time stamp | 1867 | * just reread the time stamp |
1870 | */ | 1868 | */ |
1871 | *ts = rb_time_stamp(buffer, cpu_buffer->cpu); | 1869 | *ts = rb_time_stamp(buffer); |
1872 | next_page->page->time_stamp = *ts; | 1870 | next_page->page->time_stamp = *ts; |
1873 | } | 1871 | } |
1874 | 1872 | ||
@@ -1890,13 +1888,10 @@ static struct ring_buffer_event * | |||
1890 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 1888 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
1891 | unsigned type, unsigned long length, u64 *ts) | 1889 | unsigned type, unsigned long length, u64 *ts) |
1892 | { | 1890 | { |
1893 | struct buffer_page *tail_page, *commit_page; | 1891 | struct buffer_page *tail_page; |
1894 | struct ring_buffer_event *event; | 1892 | struct ring_buffer_event *event; |
1895 | unsigned long tail, write; | 1893 | unsigned long tail, write; |
1896 | 1894 | ||
1897 | commit_page = cpu_buffer->commit_page; | ||
1898 | /* we just need to protect against interrupts */ | ||
1899 | barrier(); | ||
1900 | tail_page = cpu_buffer->tail_page; | 1895 | tail_page = cpu_buffer->tail_page; |
1901 | write = local_add_return(length, &tail_page->write); | 1896 | write = local_add_return(length, &tail_page->write); |
1902 | 1897 | ||
@@ -1907,7 +1902,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1907 | /* See if we shot pass the end of this buffer page */ | 1902 | /* See if we shot pass the end of this buffer page */ |
1908 | if (write > BUF_PAGE_SIZE) | 1903 | if (write > BUF_PAGE_SIZE) |
1909 | return rb_move_tail(cpu_buffer, length, tail, | 1904 | return rb_move_tail(cpu_buffer, length, tail, |
1910 | commit_page, tail_page, ts); | 1905 | tail_page, ts); |
1911 | 1906 | ||
1912 | /* We reserved something on the buffer */ | 1907 | /* We reserved something on the buffer */ |
1913 | 1908 | ||
@@ -2111,7 +2106,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2111 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 2106 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
2112 | goto out_fail; | 2107 | goto out_fail; |
2113 | 2108 | ||
2114 | ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); | 2109 | ts = rb_time_stamp(cpu_buffer->buffer); |
2115 | 2110 | ||
2116 | /* | 2111 | /* |
2117 | * Only the first commit can update the timestamp. | 2112 | * Only the first commit can update the timestamp. |
@@ -2681,7 +2676,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
2681 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 2676 | EXPORT_SYMBOL_GPL(ring_buffer_entries); |
2682 | 2677 | ||
2683 | /** | 2678 | /** |
2684 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 2679 | * ring_buffer_overruns - get the number of overruns in buffer |
2685 | * @buffer: The ring buffer | 2680 | * @buffer: The ring buffer |
2686 | * | 2681 | * |
2687 | * Returns the total number of overruns in the ring buffer | 2682 | * Returns the total number of overruns in the ring buffer |
@@ -2832,7 +2827,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2832 | int ret; | 2827 | int ret; |
2833 | 2828 | ||
2834 | local_irq_save(flags); | 2829 | local_irq_save(flags); |
2835 | __raw_spin_lock(&cpu_buffer->lock); | 2830 | arch_spin_lock(&cpu_buffer->lock); |
2836 | 2831 | ||
2837 | again: | 2832 | again: |
2838 | /* | 2833 | /* |
@@ -2921,7 +2916,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2921 | goto again; | 2916 | goto again; |
2922 | 2917 | ||
2923 | out: | 2918 | out: |
2924 | __raw_spin_unlock(&cpu_buffer->lock); | 2919 | arch_spin_unlock(&cpu_buffer->lock); |
2925 | local_irq_restore(flags); | 2920 | local_irq_restore(flags); |
2926 | 2921 | ||
2927 | return reader; | 2922 | return reader; |
@@ -3284,9 +3279,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
3284 | synchronize_sched(); | 3279 | synchronize_sched(); |
3285 | 3280 | ||
3286 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3281 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3287 | __raw_spin_lock(&cpu_buffer->lock); | 3282 | arch_spin_lock(&cpu_buffer->lock); |
3288 | rb_iter_reset(iter); | 3283 | rb_iter_reset(iter); |
3289 | __raw_spin_unlock(&cpu_buffer->lock); | 3284 | arch_spin_unlock(&cpu_buffer->lock); |
3290 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3285 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3291 | 3286 | ||
3292 | return iter; | 3287 | return iter; |
@@ -3406,11 +3401,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3406 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3401 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
3407 | goto out; | 3402 | goto out; |
3408 | 3403 | ||
3409 | __raw_spin_lock(&cpu_buffer->lock); | 3404 | arch_spin_lock(&cpu_buffer->lock); |
3410 | 3405 | ||
3411 | rb_reset_cpu(cpu_buffer); | 3406 | rb_reset_cpu(cpu_buffer); |
3412 | 3407 | ||
3413 | __raw_spin_unlock(&cpu_buffer->lock); | 3408 | arch_spin_unlock(&cpu_buffer->lock); |
3414 | 3409 | ||
3415 | out: | 3410 | out: |
3416 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3411 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |