aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-05-06 15:30:07 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-05-06 15:30:07 -0400
commit6634ff26cce2da04e5c2a5481bcb8888e7d01786 (patch)
tree64efca734e6cb10ff7cb19086e8f83b92eb40fd1 /kernel/trace/ring_buffer.c
parent00c81a58c5b4e0de14ee33bfbc3d71c90f69f9ea (diff)
ring-buffer: make moving the tail page a separate function
Ingo Molnar thought the code would be cleaner if we used a function call instead of a goto for moving the tail page. After implementing this, it seems that gcc still inlines the result and the output is pretty much the same. Since this is considered a cleaner approach, might as well implement it. [ Impact: code clean up ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c89
1 files changed, 49 insertions, 40 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 03ed52b67db3..3ae5ccf2c0fc 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1154,51 +1154,18 @@ static unsigned rb_calculate_event_length(unsigned length)
1154 return length; 1154 return length;
1155} 1155}
1156 1156
1157
1157static struct ring_buffer_event * 1158static struct ring_buffer_event *
1158__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1159rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1159 unsigned type, unsigned long length, u64 *ts) 1160 unsigned long length, unsigned long tail,
1161 struct buffer_page *commit_page,
1162 struct buffer_page *tail_page, u64 *ts)
1160{ 1163{
1161 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; 1164 struct buffer_page *next_page, *head_page, *reader_page;
1162 struct buffer_page *next_page;
1163 unsigned long tail, write;
1164 struct ring_buffer *buffer = cpu_buffer->buffer; 1165 struct ring_buffer *buffer = cpu_buffer->buffer;
1165 struct ring_buffer_event *event; 1166 struct ring_buffer_event *event;
1166 unsigned long flags;
1167 bool lock_taken = false; 1167 bool lock_taken = false;
1168 1168 unsigned long flags;
1169 commit_page = cpu_buffer->commit_page;
1170 /* we just need to protect against interrupts */
1171 barrier();
1172 tail_page = cpu_buffer->tail_page;
1173 write = local_add_return(length, &tail_page->write);
1174 tail = write - length;
1175
1176 /* See if we shot pass the end of this buffer page */
1177 if (write > BUF_PAGE_SIZE)
1178 goto next_page;
1179
1180 /* We reserved something on the buffer */
1181
1182 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1183 return NULL;
1184
1185 event = __rb_page_index(tail_page, tail);
1186 rb_update_event(event, type, length);
1187
1188 /* The passed in type is zero for DATA */
1189 if (likely(!type))
1190 local_inc(&tail_page->entries);
1191
1192 /*
1193 * If this is a commit and the tail is zero, then update
1194 * this page's time stamp.
1195 */
1196 if (!tail && rb_is_commit(cpu_buffer, event))
1197 cpu_buffer->commit_page->page->time_stamp = *ts;
1198
1199 return event;
1200
1201 next_page:
1202 1169
1203 next_page = tail_page; 1170 next_page = tail_page;
1204 1171
@@ -1318,6 +1285,48 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1318 return NULL; 1285 return NULL;
1319} 1286}
1320 1287
1288static struct ring_buffer_event *
1289__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1290 unsigned type, unsigned long length, u64 *ts)
1291{
1292 struct buffer_page *tail_page, *commit_page;
1293 struct ring_buffer_event *event;
1294 unsigned long tail, write;
1295
1296 commit_page = cpu_buffer->commit_page;
1297 /* we just need to protect against interrupts */
1298 barrier();
1299 tail_page = cpu_buffer->tail_page;
1300 write = local_add_return(length, &tail_page->write);
1301 tail = write - length;
1302
1303 /* See if we shot pass the end of this buffer page */
1304 if (write > BUF_PAGE_SIZE)
1305 return rb_move_tail(cpu_buffer, length, tail,
1306 commit_page, tail_page, ts);
1307
1308 /* We reserved something on the buffer */
1309
1310 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1311 return NULL;
1312
1313 event = __rb_page_index(tail_page, tail);
1314 rb_update_event(event, type, length);
1315
1316 /* The passed in type is zero for DATA */
1317 if (likely(!type))
1318 local_inc(&tail_page->entries);
1319
1320 /*
1321 * If this is a commit and the tail is zero, then update
1322 * this page's time stamp.
1323 */
1324 if (!tail && rb_is_commit(cpu_buffer, event))
1325 cpu_buffer->commit_page->page->time_stamp = *ts;
1326
1327 return event;
1328}
1329
1321static int 1330static int
1322rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1331rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1323 u64 *ts, u64 *delta) 1332 u64 *ts, u64 *delta)