diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 42 |
1 files changed, 22 insertions, 20 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3ffa502fb243..f58c9ad15830 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
397 | int ret; | 397 | int ret; |
398 | 398 | ||
399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
400 | "offset:0;\tsize:%u;\n", | 400 | "offset:0;\tsize:%u;\tsigned:%u;\n", |
401 | (unsigned int)sizeof(field.time_stamp)); | 401 | (unsigned int)sizeof(field.time_stamp), |
402 | (unsigned int)is_signed_type(u64)); | ||
402 | 403 | ||
403 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | 404 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" |
404 | "offset:%u;\tsize:%u;\n", | 405 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
405 | (unsigned int)offsetof(typeof(field), commit), | 406 | (unsigned int)offsetof(typeof(field), commit), |
406 | (unsigned int)sizeof(field.commit)); | 407 | (unsigned int)sizeof(field.commit), |
408 | (unsigned int)is_signed_type(long)); | ||
407 | 409 | ||
408 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 410 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
409 | "offset:%u;\tsize:%u;\n", | 411 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
410 | (unsigned int)offsetof(typeof(field), data), | 412 | (unsigned int)offsetof(typeof(field), data), |
411 | (unsigned int)BUF_PAGE_SIZE); | 413 | (unsigned int)BUF_PAGE_SIZE, |
414 | (unsigned int)is_signed_type(char)); | ||
412 | 415 | ||
413 | return ret; | 416 | return ret; |
414 | } | 417 | } |
@@ -420,7 +423,7 @@ struct ring_buffer_per_cpu { | |||
420 | int cpu; | 423 | int cpu; |
421 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
422 | spinlock_t reader_lock; /* serialize readers */ | 425 | spinlock_t reader_lock; /* serialize readers */ |
423 | raw_spinlock_t lock; | 426 | arch_spinlock_t lock; |
424 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
425 | struct list_head *pages; | 428 | struct list_head *pages; |
426 | struct buffer_page *head_page; /* read from head */ | 429 | struct buffer_page *head_page; /* read from head */ |
@@ -995,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
995 | cpu_buffer->buffer = buffer; | 998 | cpu_buffer->buffer = buffer; |
996 | spin_lock_init(&cpu_buffer->reader_lock); | 999 | spin_lock_init(&cpu_buffer->reader_lock); |
997 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
998 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1001 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
999 | 1002 | ||
1000 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1001 | GFP_KERNEL, cpu_to_node(cpu)); | 1004 | GFP_KERNEL, cpu_to_node(cpu)); |
@@ -1193,6 +1196,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1193 | atomic_inc(&cpu_buffer->record_disabled); | 1196 | atomic_inc(&cpu_buffer->record_disabled); |
1194 | synchronize_sched(); | 1197 | synchronize_sched(); |
1195 | 1198 | ||
1199 | spin_lock_irq(&cpu_buffer->reader_lock); | ||
1196 | rb_head_page_deactivate(cpu_buffer); | 1200 | rb_head_page_deactivate(cpu_buffer); |
1197 | 1201 | ||
1198 | for (i = 0; i < nr_pages; i++) { | 1202 | for (i = 0; i < nr_pages; i++) { |
@@ -1207,6 +1211,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1207 | return; | 1211 | return; |
1208 | 1212 | ||
1209 | rb_reset_cpu(cpu_buffer); | 1213 | rb_reset_cpu(cpu_buffer); |
1214 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
1210 | 1215 | ||
1211 | rb_check_pages(cpu_buffer); | 1216 | rb_check_pages(cpu_buffer); |
1212 | 1217 | ||
@@ -1785,9 +1790,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1785 | static struct ring_buffer_event * | 1790 | static struct ring_buffer_event * |
1786 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 1791 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, |
1787 | unsigned long length, unsigned long tail, | 1792 | unsigned long length, unsigned long tail, |
1788 | struct buffer_page *commit_page, | ||
1789 | struct buffer_page *tail_page, u64 *ts) | 1793 | struct buffer_page *tail_page, u64 *ts) |
1790 | { | 1794 | { |
1795 | struct buffer_page *commit_page = cpu_buffer->commit_page; | ||
1791 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1796 | struct ring_buffer *buffer = cpu_buffer->buffer; |
1792 | struct buffer_page *next_page; | 1797 | struct buffer_page *next_page; |
1793 | int ret; | 1798 | int ret; |
@@ -1890,13 +1895,10 @@ static struct ring_buffer_event * | |||
1890 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 1895 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
1891 | unsigned type, unsigned long length, u64 *ts) | 1896 | unsigned type, unsigned long length, u64 *ts) |
1892 | { | 1897 | { |
1893 | struct buffer_page *tail_page, *commit_page; | 1898 | struct buffer_page *tail_page; |
1894 | struct ring_buffer_event *event; | 1899 | struct ring_buffer_event *event; |
1895 | unsigned long tail, write; | 1900 | unsigned long tail, write; |
1896 | 1901 | ||
1897 | commit_page = cpu_buffer->commit_page; | ||
1898 | /* we just need to protect against interrupts */ | ||
1899 | barrier(); | ||
1900 | tail_page = cpu_buffer->tail_page; | 1902 | tail_page = cpu_buffer->tail_page; |
1901 | write = local_add_return(length, &tail_page->write); | 1903 | write = local_add_return(length, &tail_page->write); |
1902 | 1904 | ||
@@ -1907,7 +1909,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1907 | /* See if we shot pass the end of this buffer page */ | 1909 | /* See if we shot pass the end of this buffer page */ |
1908 | if (write > BUF_PAGE_SIZE) | 1910 | if (write > BUF_PAGE_SIZE) |
1909 | return rb_move_tail(cpu_buffer, length, tail, | 1911 | return rb_move_tail(cpu_buffer, length, tail, |
1910 | commit_page, tail_page, ts); | 1912 | tail_page, ts); |
1911 | 1913 | ||
1912 | /* We reserved something on the buffer */ | 1914 | /* We reserved something on the buffer */ |
1913 | 1915 | ||
@@ -2832,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2832 | int ret; | 2834 | int ret; |
2833 | 2835 | ||
2834 | local_irq_save(flags); | 2836 | local_irq_save(flags); |
2835 | __raw_spin_lock(&cpu_buffer->lock); | 2837 | arch_spin_lock(&cpu_buffer->lock); |
2836 | 2838 | ||
2837 | again: | 2839 | again: |
2838 | /* | 2840 | /* |
@@ -2921,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2921 | goto again; | 2923 | goto again; |
2922 | 2924 | ||
2923 | out: | 2925 | out: |
2924 | __raw_spin_unlock(&cpu_buffer->lock); | 2926 | arch_spin_unlock(&cpu_buffer->lock); |
2925 | local_irq_restore(flags); | 2927 | local_irq_restore(flags); |
2926 | 2928 | ||
2927 | return reader; | 2929 | return reader; |
@@ -3284,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
3284 | synchronize_sched(); | 3286 | synchronize_sched(); |
3285 | 3287 | ||
3286 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3288 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3287 | __raw_spin_lock(&cpu_buffer->lock); | 3289 | arch_spin_lock(&cpu_buffer->lock); |
3288 | rb_iter_reset(iter); | 3290 | rb_iter_reset(iter); |
3289 | __raw_spin_unlock(&cpu_buffer->lock); | 3291 | arch_spin_unlock(&cpu_buffer->lock); |
3290 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3292 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3291 | 3293 | ||
3292 | return iter; | 3294 | return iter; |
@@ -3406,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3406 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3408 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
3407 | goto out; | 3409 | goto out; |
3408 | 3410 | ||
3409 | __raw_spin_lock(&cpu_buffer->lock); | 3411 | arch_spin_lock(&cpu_buffer->lock); |
3410 | 3412 | ||
3411 | rb_reset_cpu(cpu_buffer); | 3413 | rb_reset_cpu(cpu_buffer); |
3412 | 3414 | ||
3413 | __raw_spin_unlock(&cpu_buffer->lock); | 3415 | arch_spin_unlock(&cpu_buffer->lock); |
3414 | 3416 | ||
3415 | out: | 3417 | out: |
3416 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3418 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |