aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-05-06 10:26:45 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-05-06 12:49:19 -0400
commit8e7abf1c62941ebb7a1416cbc62392c8a0902625 (patch)
tree72b62a8ce2cecee2ed0cff586fb8bed10cf0eb2d /kernel/trace/ring_buffer.c
parent35cf723e99c0e26ddf51f037dffaa4ff2c2c9106 (diff)
ring-buffer: remove unneeded conditional in rb_reserve_next
The code in __rb_reserve_next checks on page overflow if it is the original commiter and then resets the page back to the original setting. Although this is fine, and the code is correct, it is a bit fragil. Some experimental work I did breaks it easily. The better and more robust solution is to have all commiters that overflow the page, simply subtract what they added. [ Impact: more robust ring buffer account management ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 424129eb20a4..03ed52b67db3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1290,9 +1290,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1290 rb_event_set_padding(event); 1290 rb_event_set_padding(event);
1291 } 1291 }
1292 1292
1293 if (tail <= BUF_PAGE_SIZE) 1293 /* Set the write back to the previous setting */
1294 /* Set the write back to the previous setting */ 1294 local_sub(length, &tail_page->write);
1295 local_set(&tail_page->write, tail);
1296 1295
1297 /* 1296 /*
1298 * If this was a commit entry that failed, 1297 * If this was a commit entry that failed,
@@ -1311,8 +1310,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1311 1310
1312 out_reset: 1311 out_reset:
1313 /* reset write */ 1312 /* reset write */
1314 if (tail <= BUF_PAGE_SIZE) 1313 local_sub(length, &tail_page->write);
1315 local_set(&tail_page->write, tail);
1316 1314
1317 if (likely(lock_taken)) 1315 if (likely(lock_taken))
1318 __raw_spin_unlock(&cpu_buffer->lock); 1316 __raw_spin_unlock(&cpu_buffer->lock);