aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-05-11 16:28:23 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-05-11 23:14:03 -0400
commit88eb0125362f2ab272cbaf84252cf101ddc2dec9 (patch)
treeacf8bac454d61874f35bf5e20cd30b448811608a /kernel
parent0f0c85fc80adbbd2265d89867d743f929d516805 (diff)
ring-buffer: use internal time stamp function
The ring_buffer_time_stamp that is exported adds a little more overhead than is needed for using it internally. This patch adds an internal timestamp function that can be inlined (a single line function) and used internally for the ring buffer. [ Impact: a little less overhead to the ring buffer ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f452de2ce490..a9e645a5bc10 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -454,13 +454,18 @@ struct ring_buffer_iter {
454/* Up this if you want to test the TIME_EXTENTS and normalization */ 454/* Up this if you want to test the TIME_EXTENTS and normalization */
455#define DEBUG_SHIFT 0 455#define DEBUG_SHIFT 0
456 456
457static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
458{
459 /* shift to debug/test normalization and TIME_EXTENTS */
460 return buffer->clock() << DEBUG_SHIFT;
461}
462
457u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) 463u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
458{ 464{
459 u64 time; 465 u64 time;
460 466
461 preempt_disable_notrace(); 467 preempt_disable_notrace();
462 /* shift to debug/test normalization and TIME_EXTENTS */ 468 time = rb_time_stamp(buffer, cpu);
463 time = buffer->clock() << DEBUG_SHIFT;
464 preempt_enable_no_resched_notrace(); 469 preempt_enable_no_resched_notrace();
465 470
466 return time; 471 return time;
@@ -1247,7 +1252,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1247 cpu_buffer->tail_page = next_page; 1252 cpu_buffer->tail_page = next_page;
1248 1253
1249 /* reread the time stamp */ 1254 /* reread the time stamp */
1250 *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu); 1255 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1251 cpu_buffer->tail_page->page->time_stamp = *ts; 1256 cpu_buffer->tail_page->page->time_stamp = *ts;
1252 } 1257 }
1253 1258
@@ -1413,7 +1418,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1413 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 1418 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1414 return NULL; 1419 return NULL;
1415 1420
1416 ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 1421 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1417 1422
1418 /* 1423 /*
1419 * Only the first commit can update the timestamp. 1424 * Only the first commit can update the timestamp.