aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5e..f58c9ad15830 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
423 int cpu; 423 int cpu;
424 struct ring_buffer *buffer; 424 struct ring_buffer *buffer;
425 spinlock_t reader_lock; /* serialize readers */ 425 spinlock_t reader_lock; /* serialize readers */
426 raw_spinlock_t lock; 426 arch_spinlock_t lock;
427 struct lock_class_key lock_key; 427 struct lock_class_key lock_key;
428 struct list_head *pages; 428 struct list_head *pages;
429 struct buffer_page *head_page; /* read from head */ 429 struct buffer_page *head_page; /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
998 cpu_buffer->buffer = buffer; 998 cpu_buffer->buffer = buffer;
999 spin_lock_init(&cpu_buffer->reader_lock); 999 spin_lock_init(&cpu_buffer->reader_lock);
1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1001 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1001 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1002 1002
1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1004 GFP_KERNEL, cpu_to_node(cpu)); 1004 GFP_KERNEL, cpu_to_node(cpu));
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2834 int ret; 2834 int ret;
2835 2835
2836 local_irq_save(flags); 2836 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock); 2837 arch_spin_lock(&cpu_buffer->lock);
2838 2838
2839 again: 2839 again:
2840 /* 2840 /*
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2923 goto again; 2923 goto again;
2924 2924
2925 out: 2925 out:
2926 __raw_spin_unlock(&cpu_buffer->lock); 2926 arch_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags); 2927 local_irq_restore(flags);
2928 2928
2929 return reader; 2929 return reader;
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3286 synchronize_sched(); 3286 synchronize_sched();
3287 3287
3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock); 3289 arch_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter); 3290 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock); 3291 arch_spin_unlock(&cpu_buffer->lock);
3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293 3293
3294 return iter; 3294 return iter;
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out; 3409 goto out;
3410 3410
3411 __raw_spin_lock(&cpu_buffer->lock); 3411 arch_spin_lock(&cpu_buffer->lock);
3412 3412
3413 rb_reset_cpu(cpu_buffer); 3413 rb_reset_cpu(cpu_buffer);
3414 3414
3415 __raw_spin_unlock(&cpu_buffer->lock); 3415 arch_spin_unlock(&cpu_buffer->lock);
3416 3416
3417 out: 3417 out:
3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);