diff options
author | Robert Richter <robert.richter@amd.com> | 2009-07-31 08:58:04 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-09-09 23:54:02 -0400 |
commit | d8eeb2d3b26d25c44c10f28430e2157a2d20bd1d (patch) | |
tree | 8308f1be1706a6605387289683e7b11dc40ecda6 | |
parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) |
ring-buffer: consolidate interface of rb_buffer_peek()
rb_buffer_peek() operates with struct ring_buffer_per_cpu *cpu_buffer
only. Thus, instead of passing variables buffer and cpu it is better
to use cpu_buffer directly. This also reduces the risk of races since
cpu_buffer is not calculated twice.
Signed-off-by: Robert Richter <robert.richter@amd.com>
LKML-Reference: <1249045084-3028-1-git-send-email-robert.richter@amd.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/ring_buffer.c | 11 |
1 files changed, 4 insertions, 7 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 454e74e718cf..8786c350b4ca 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2997,15 +2997,12 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
2997 | } | 2997 | } |
2998 | 2998 | ||
2999 | static struct ring_buffer_event * | 2999 | static struct ring_buffer_event * |
3000 | rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 3000 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) |
3001 | { | 3001 | { |
3002 | struct ring_buffer_per_cpu *cpu_buffer; | ||
3003 | struct ring_buffer_event *event; | 3002 | struct ring_buffer_event *event; |
3004 | struct buffer_page *reader; | 3003 | struct buffer_page *reader; |
3005 | int nr_loops = 0; | 3004 | int nr_loops = 0; |
3006 | 3005 | ||
3007 | cpu_buffer = buffer->buffers[cpu]; | ||
3008 | |||
3009 | again: | 3006 | again: |
3010 | /* | 3007 | /* |
3011 | * We repeat when a timestamp is encountered. It is possible | 3008 | * We repeat when a timestamp is encountered. It is possible |
@@ -3049,7 +3046,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3049 | case RINGBUF_TYPE_DATA: | 3046 | case RINGBUF_TYPE_DATA: |
3050 | if (ts) { | 3047 | if (ts) { |
3051 | *ts = cpu_buffer->read_stamp + event->time_delta; | 3048 | *ts = cpu_buffer->read_stamp + event->time_delta; |
3052 | ring_buffer_normalize_time_stamp(buffer, | 3049 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, |
3053 | cpu_buffer->cpu, ts); | 3050 | cpu_buffer->cpu, ts); |
3054 | } | 3051 | } |
3055 | return event; | 3052 | return event; |
@@ -3168,7 +3165,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3168 | local_irq_save(flags); | 3165 | local_irq_save(flags); |
3169 | if (dolock) | 3166 | if (dolock) |
3170 | spin_lock(&cpu_buffer->reader_lock); | 3167 | spin_lock(&cpu_buffer->reader_lock); |
3171 | event = rb_buffer_peek(buffer, cpu, ts); | 3168 | event = rb_buffer_peek(cpu_buffer, ts); |
3172 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3169 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3173 | rb_advance_reader(cpu_buffer); | 3170 | rb_advance_reader(cpu_buffer); |
3174 | if (dolock) | 3171 | if (dolock) |
@@ -3237,7 +3234,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3237 | if (dolock) | 3234 | if (dolock) |
3238 | spin_lock(&cpu_buffer->reader_lock); | 3235 | spin_lock(&cpu_buffer->reader_lock); |
3239 | 3236 | ||
3240 | event = rb_buffer_peek(buffer, cpu, ts); | 3237 | event = rb_buffer_peek(cpu_buffer, ts); |
3241 | if (event) | 3238 | if (event) |
3242 | rb_advance_reader(cpu_buffer); | 3239 | rb_advance_reader(cpu_buffer); |
3243 | 3240 | ||