diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5dd017fea6f5..f58c9ad15830 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
397 | int ret; | 397 | int ret; |
398 | 398 | ||
399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
400 | "offset:0;\tsize:%u;\n", | 400 | "offset:0;\tsize:%u;\tsigned:%u;\n", |
401 | (unsigned int)sizeof(field.time_stamp)); | 401 | (unsigned int)sizeof(field.time_stamp), |
402 | (unsigned int)is_signed_type(u64)); | ||
402 | 403 | ||
403 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | 404 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" |
404 | "offset:%u;\tsize:%u;\n", | 405 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
405 | (unsigned int)offsetof(typeof(field), commit), | 406 | (unsigned int)offsetof(typeof(field), commit), |
406 | (unsigned int)sizeof(field.commit)); | 407 | (unsigned int)sizeof(field.commit), |
408 | (unsigned int)is_signed_type(long)); | ||
407 | 409 | ||
408 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 410 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
409 | "offset:%u;\tsize:%u;\n", | 411 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
410 | (unsigned int)offsetof(typeof(field), data), | 412 | (unsigned int)offsetof(typeof(field), data), |
411 | (unsigned int)BUF_PAGE_SIZE); | 413 | (unsigned int)BUF_PAGE_SIZE, |
414 | (unsigned int)is_signed_type(char)); | ||
412 | 415 | ||
413 | return ret; | 416 | return ret; |
414 | } | 417 | } |
@@ -420,7 +423,7 @@ struct ring_buffer_per_cpu { | |||
420 | int cpu; | 423 | int cpu; |
421 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
422 | spinlock_t reader_lock; /* serialize readers */ | 425 | spinlock_t reader_lock; /* serialize readers */ |
423 | raw_spinlock_t lock; | 426 | arch_spinlock_t lock; |
424 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
425 | struct list_head *pages; | 428 | struct list_head *pages; |
426 | struct buffer_page *head_page; /* read from head */ | 429 | struct buffer_page *head_page; /* read from head */ |
@@ -995,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
995 | cpu_buffer->buffer = buffer; | 998 | cpu_buffer->buffer = buffer; |
996 | spin_lock_init(&cpu_buffer->reader_lock); | 999 | spin_lock_init(&cpu_buffer->reader_lock); |
997 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
998 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1001 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
999 | 1002 | ||
1000 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1001 | GFP_KERNEL, cpu_to_node(cpu)); | 1004 | GFP_KERNEL, cpu_to_node(cpu)); |
@@ -1787,9 +1790,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1787 | static struct ring_buffer_event * | 1790 | static struct ring_buffer_event * |
1788 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 1791 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, |
1789 | unsigned long length, unsigned long tail, | 1792 | unsigned long length, unsigned long tail, |
1790 | struct buffer_page *commit_page, | ||
1791 | struct buffer_page *tail_page, u64 *ts) | 1793 | struct buffer_page *tail_page, u64 *ts) |
1792 | { | 1794 | { |
1795 | struct buffer_page *commit_page = cpu_buffer->commit_page; | ||
1793 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1796 | struct ring_buffer *buffer = cpu_buffer->buffer; |
1794 | struct buffer_page *next_page; | 1797 | struct buffer_page *next_page; |
1795 | int ret; | 1798 | int ret; |
@@ -1892,13 +1895,10 @@ static struct ring_buffer_event * | |||
1892 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 1895 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
1893 | unsigned type, unsigned long length, u64 *ts) | 1896 | unsigned type, unsigned long length, u64 *ts) |
1894 | { | 1897 | { |
1895 | struct buffer_page *tail_page, *commit_page; | 1898 | struct buffer_page *tail_page; |
1896 | struct ring_buffer_event *event; | 1899 | struct ring_buffer_event *event; |
1897 | unsigned long tail, write; | 1900 | unsigned long tail, write; |
1898 | 1901 | ||
1899 | commit_page = cpu_buffer->commit_page; | ||
1900 | /* we just need to protect against interrupts */ | ||
1901 | barrier(); | ||
1902 | tail_page = cpu_buffer->tail_page; | 1902 | tail_page = cpu_buffer->tail_page; |
1903 | write = local_add_return(length, &tail_page->write); | 1903 | write = local_add_return(length, &tail_page->write); |
1904 | 1904 | ||
@@ -1909,7 +1909,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1909 | /* See if we shot pass the end of this buffer page */ | 1909 | /* See if we shot pass the end of this buffer page */ |
1910 | if (write > BUF_PAGE_SIZE) | 1910 | if (write > BUF_PAGE_SIZE) |
1911 | return rb_move_tail(cpu_buffer, length, tail, | 1911 | return rb_move_tail(cpu_buffer, length, tail, |
1912 | commit_page, tail_page, ts); | 1912 | tail_page, ts); |
1913 | 1913 | ||
1914 | /* We reserved something on the buffer */ | 1914 | /* We reserved something on the buffer */ |
1915 | 1915 | ||
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2834 | int ret; | 2834 | int ret; |
2835 | 2835 | ||
2836 | local_irq_save(flags); | 2836 | local_irq_save(flags); |
2837 | __raw_spin_lock(&cpu_buffer->lock); | 2837 | arch_spin_lock(&cpu_buffer->lock); |
2838 | 2838 | ||
2839 | again: | 2839 | again: |
2840 | /* | 2840 | /* |
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2923 | goto again; | 2923 | goto again; |
2924 | 2924 | ||
2925 | out: | 2925 | out: |
2926 | __raw_spin_unlock(&cpu_buffer->lock); | 2926 | arch_spin_unlock(&cpu_buffer->lock); |
2927 | local_irq_restore(flags); | 2927 | local_irq_restore(flags); |
2928 | 2928 | ||
2929 | return reader; | 2929 | return reader; |
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
3286 | synchronize_sched(); | 3286 | synchronize_sched(); |
3287 | 3287 | ||
3288 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3288 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3289 | __raw_spin_lock(&cpu_buffer->lock); | 3289 | arch_spin_lock(&cpu_buffer->lock); |
3290 | rb_iter_reset(iter); | 3290 | rb_iter_reset(iter); |
3291 | __raw_spin_unlock(&cpu_buffer->lock); | 3291 | arch_spin_unlock(&cpu_buffer->lock); |
3292 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3292 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3293 | 3293 | ||
3294 | return iter; | 3294 | return iter; |
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3408 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3408 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
3409 | goto out; | 3409 | goto out; |
3410 | 3410 | ||
3411 | __raw_spin_lock(&cpu_buffer->lock); | 3411 | arch_spin_lock(&cpu_buffer->lock); |
3412 | 3412 | ||
3413 | rb_reset_cpu(cpu_buffer); | 3413 | rb_reset_cpu(cpu_buffer); |
3414 | 3414 | ||
3415 | __raw_spin_unlock(&cpu_buffer->lock); | 3415 | arch_spin_unlock(&cpu_buffer->lock); |
3416 | 3416 | ||
3417 | out: | 3417 | out: |
3418 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3418 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |