aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-05-11 22:11:05 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-05-11 23:33:06 -0400
commit168b6b1d0594c7866caa73b12f3b8d91075695f2 (patch)
tree2dd8d0e8b6ffc87360180c878a87b3632625b3f0 /kernel/trace/ring_buffer.c
parent88eb0125362f2ab272cbaf84252cf101ddc2dec9 (diff)
ring-buffer: move code around to remove some branches
This is a bit of micro-optimizations. But since the ring buffer is used in tracing every function call, it is an extreme hot path. Every nanosecond counts. This change shows over 5% improvement in the ring-buffer-benchmark. [ Impact: more efficient code ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a9e645a5bc10..16b24d49604c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1400,7 +1400,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1400 unsigned long length) 1400 unsigned long length)
1401{ 1401{
1402 struct ring_buffer_event *event; 1402 struct ring_buffer_event *event;
1403 u64 ts, delta; 1403 u64 ts, delta = 0;
1404 int commit = 0; 1404 int commit = 0;
1405 int nr_loops = 0; 1405 int nr_loops = 0;
1406 1406
@@ -1431,20 +1431,21 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1431 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page && 1431 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1432 rb_page_write(cpu_buffer->tail_page) == 1432 rb_page_write(cpu_buffer->tail_page) ==
1433 rb_commit_index(cpu_buffer))) { 1433 rb_commit_index(cpu_buffer))) {
1434 u64 diff;
1434 1435
1435 delta = ts - cpu_buffer->write_stamp; 1436 diff = ts - cpu_buffer->write_stamp;
1436 1437
1437 /* make sure this delta is calculated here */ 1438 /* make sure this diff is calculated here */
1438 barrier(); 1439 barrier();
1439 1440
1440 /* Did the write stamp get updated already? */ 1441 /* Did the write stamp get updated already? */
1441 if (unlikely(ts < cpu_buffer->write_stamp)) 1442 if (unlikely(ts < cpu_buffer->write_stamp))
1442 delta = 0; 1443 goto get_event;
1443 1444
1444 else if (unlikely(test_time_stamp(delta))) { 1445 delta = diff;
1446 if (unlikely(test_time_stamp(delta))) {
1445 1447
1446 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); 1448 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1447
1448 if (commit == -EBUSY) 1449 if (commit == -EBUSY)
1449 return NULL; 1450 return NULL;
1450 1451
@@ -1453,12 +1454,11 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1453 1454
1454 RB_WARN_ON(cpu_buffer, commit < 0); 1455 RB_WARN_ON(cpu_buffer, commit < 0);
1455 } 1456 }
1456 } else 1457 }
1457 /* Non commits have zero deltas */
1458 delta = 0;
1459 1458
1459 get_event:
1460 event = __rb_reserve_next(cpu_buffer, 0, length, &ts); 1460 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
1461 if (PTR_ERR(event) == -EAGAIN) 1461 if (unlikely(PTR_ERR(event) == -EAGAIN))
1462 goto again; 1462 goto again;
1463 1463
1464 if (!event) { 1464 if (!event) {