diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-05-05 21:16:11 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-05-05 21:16:11 -0400 |
commit | aa20ae8444fc6c318272c643f856d8d8ad3e198d (patch) | |
tree | 662d8f33c284a43a41d5c9e9edfe13238bd3535e /kernel/trace | |
parent | 94487d6d53af5acae10cf9fd52f74498994d46b1 (diff) |
ring-buffer: move big if statement down
In the hot path of the ring buffer "__rb_reserve_next" there's a big
if statement that does not even return back to the work flow.
code;
if (cross to next page) {
[ lots of code ]
return;
}
more code;
The condition is even the unlikely path, although we do not denote it
with an unlikely because gcc is fine with it. The condition is true when
the write crosses a page boundary, and we need to start at a new page.
Having this if statement makes it hard to read, but calling another
function to do the work is also not appropriate, because we are using a lot
of variables that were set before the if statement, and we do not want to
send them as parameters.
This patch changes it to a goto:
code;
if (cross to next page)
goto next_page;
more code;
return;
next_page:
[ lots of code]
This makes the code easier to understand, and a bit more obvious.
The output from gcc is practically identical. For some reason, gcc decided
to use different registers when I switched it to a goto. But other than that,
the logic is the same.
[ Impact: easier to read code ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 218 |
1 files changed, 111 insertions, 107 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 7876df00695f..424129eb20a4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1159,6 +1159,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1159 | unsigned type, unsigned long length, u64 *ts) | 1159 | unsigned type, unsigned long length, u64 *ts) |
1160 | { | 1160 | { |
1161 | struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; | 1161 | struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; |
1162 | struct buffer_page *next_page; | ||
1162 | unsigned long tail, write; | 1163 | unsigned long tail, write; |
1163 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1164 | struct ring_buffer *buffer = cpu_buffer->buffer; |
1164 | struct ring_buffer_event *event; | 1165 | struct ring_buffer_event *event; |
@@ -1173,137 +1174,140 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1173 | tail = write - length; | 1174 | tail = write - length; |
1174 | 1175 | ||
1175 | /* See if we shot pass the end of this buffer page */ | 1176 | /* See if we shot pass the end of this buffer page */ |
1176 | if (write > BUF_PAGE_SIZE) { | 1177 | if (write > BUF_PAGE_SIZE) |
1177 | struct buffer_page *next_page = tail_page; | 1178 | goto next_page; |
1178 | 1179 | ||
1179 | local_irq_save(flags); | 1180 | /* We reserved something on the buffer */ |
1180 | /* | ||
1181 | * Since the write to the buffer is still not | ||
1182 | * fully lockless, we must be careful with NMIs. | ||
1183 | * The locks in the writers are taken when a write | ||
1184 | * crosses to a new page. The locks protect against | ||
1185 | * races with the readers (this will soon be fixed | ||
1186 | * with a lockless solution). | ||
1187 | * | ||
1188 | * Because we can not protect against NMIs, and we | ||
1189 | * want to keep traces reentrant, we need to manage | ||
1190 | * what happens when we are in an NMI. | ||
1191 | * | ||
1192 | * NMIs can happen after we take the lock. | ||
1193 | * If we are in an NMI, only take the lock | ||
1194 | * if it is not already taken. Otherwise | ||
1195 | * simply fail. | ||
1196 | */ | ||
1197 | if (unlikely(in_nmi())) { | ||
1198 | if (!__raw_spin_trylock(&cpu_buffer->lock)) { | ||
1199 | cpu_buffer->nmi_dropped++; | ||
1200 | goto out_reset; | ||
1201 | } | ||
1202 | } else | ||
1203 | __raw_spin_lock(&cpu_buffer->lock); | ||
1204 | |||
1205 | lock_taken = true; | ||
1206 | 1181 | ||
1207 | rb_inc_page(cpu_buffer, &next_page); | 1182 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) |
1183 | return NULL; | ||
1208 | 1184 | ||
1209 | head_page = cpu_buffer->head_page; | 1185 | event = __rb_page_index(tail_page, tail); |
1210 | reader_page = cpu_buffer->reader_page; | 1186 | rb_update_event(event, type, length); |
1211 | 1187 | ||
1212 | /* we grabbed the lock before incrementing */ | 1188 | /* The passed in type is zero for DATA */ |
1213 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) | 1189 | if (likely(!type)) |
1214 | goto out_reset; | 1190 | local_inc(&tail_page->entries); |
1215 | 1191 | ||
1216 | /* | 1192 | /* |
1217 | * If for some reason, we had an interrupt storm that made | 1193 | * If this is a commit and the tail is zero, then update |
1218 | * it all the way around the buffer, bail, and warn | 1194 | * this page's time stamp. |
1219 | * about it. | 1195 | */ |
1220 | */ | 1196 | if (!tail && rb_is_commit(cpu_buffer, event)) |
1221 | if (unlikely(next_page == commit_page)) { | 1197 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1222 | cpu_buffer->commit_overrun++; | ||
1223 | goto out_reset; | ||
1224 | } | ||
1225 | 1198 | ||
1226 | if (next_page == head_page) { | 1199 | return event; |
1227 | if (!(buffer->flags & RB_FL_OVERWRITE)) | ||
1228 | goto out_reset; | ||
1229 | 1200 | ||
1230 | /* tail_page has not moved yet? */ | 1201 | next_page: |
1231 | if (tail_page == cpu_buffer->tail_page) { | ||
1232 | /* count overflows */ | ||
1233 | cpu_buffer->overrun += | ||
1234 | local_read(&head_page->entries); | ||
1235 | 1202 | ||
1236 | rb_inc_page(cpu_buffer, &head_page); | 1203 | next_page = tail_page; |
1237 | cpu_buffer->head_page = head_page; | ||
1238 | cpu_buffer->head_page->read = 0; | ||
1239 | } | ||
1240 | } | ||
1241 | 1204 | ||
1242 | /* | 1205 | local_irq_save(flags); |
1243 | * If the tail page is still the same as what we think | 1206 | /* |
1244 | * it is, then it is up to us to update the tail | 1207 | * Since the write to the buffer is still not |
1245 | * pointer. | 1208 | * fully lockless, we must be careful with NMIs. |
1246 | */ | 1209 | * The locks in the writers are taken when a write |
1247 | if (tail_page == cpu_buffer->tail_page) { | 1210 | * crosses to a new page. The locks protect against |
1248 | local_set(&next_page->write, 0); | 1211 | * races with the readers (this will soon be fixed |
1249 | local_set(&next_page->entries, 0); | 1212 | * with a lockless solution). |
1250 | local_set(&next_page->page->commit, 0); | 1213 | * |
1251 | cpu_buffer->tail_page = next_page; | 1214 | * Because we can not protect against NMIs, and we |
1252 | 1215 | * want to keep traces reentrant, we need to manage | |
1253 | /* reread the time stamp */ | 1216 | * what happens when we are in an NMI. |
1254 | *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu); | 1217 | * |
1255 | cpu_buffer->tail_page->page->time_stamp = *ts; | 1218 | * NMIs can happen after we take the lock. |
1219 | * If we are in an NMI, only take the lock | ||
1220 | * if it is not already taken. Otherwise | ||
1221 | * simply fail. | ||
1222 | */ | ||
1223 | if (unlikely(in_nmi())) { | ||
1224 | if (!__raw_spin_trylock(&cpu_buffer->lock)) { | ||
1225 | cpu_buffer->nmi_dropped++; | ||
1226 | goto out_reset; | ||
1256 | } | 1227 | } |
1228 | } else | ||
1229 | __raw_spin_lock(&cpu_buffer->lock); | ||
1257 | 1230 | ||
1258 | /* | 1231 | lock_taken = true; |
1259 | * The actual tail page has moved forward. | ||
1260 | */ | ||
1261 | if (tail < BUF_PAGE_SIZE) { | ||
1262 | /* Mark the rest of the page with padding */ | ||
1263 | event = __rb_page_index(tail_page, tail); | ||
1264 | rb_event_set_padding(event); | ||
1265 | } | ||
1266 | 1232 | ||
1267 | if (tail <= BUF_PAGE_SIZE) | 1233 | rb_inc_page(cpu_buffer, &next_page); |
1268 | /* Set the write back to the previous setting */ | ||
1269 | local_set(&tail_page->write, tail); | ||
1270 | 1234 | ||
1271 | /* | 1235 | head_page = cpu_buffer->head_page; |
1272 | * If this was a commit entry that failed, | 1236 | reader_page = cpu_buffer->reader_page; |
1273 | * increment that too | ||
1274 | */ | ||
1275 | if (tail_page == cpu_buffer->commit_page && | ||
1276 | tail == rb_commit_index(cpu_buffer)) { | ||
1277 | rb_set_commit_to_write(cpu_buffer); | ||
1278 | } | ||
1279 | 1237 | ||
1280 | __raw_spin_unlock(&cpu_buffer->lock); | 1238 | /* we grabbed the lock before incrementing */ |
1281 | local_irq_restore(flags); | 1239 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) |
1240 | goto out_reset; | ||
1282 | 1241 | ||
1283 | /* fail and let the caller try again */ | 1242 | /* |
1284 | return ERR_PTR(-EAGAIN); | 1243 | * If for some reason, we had an interrupt storm that made |
1244 | * it all the way around the buffer, bail, and warn | ||
1245 | * about it. | ||
1246 | */ | ||
1247 | if (unlikely(next_page == commit_page)) { | ||
1248 | cpu_buffer->commit_overrun++; | ||
1249 | goto out_reset; | ||
1285 | } | 1250 | } |
1286 | 1251 | ||
1287 | /* We reserved something on the buffer */ | 1252 | if (next_page == head_page) { |
1253 | if (!(buffer->flags & RB_FL_OVERWRITE)) | ||
1254 | goto out_reset; | ||
1288 | 1255 | ||
1289 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) | 1256 | /* tail_page has not moved yet? */ |
1290 | return NULL; | 1257 | if (tail_page == cpu_buffer->tail_page) { |
1258 | /* count overflows */ | ||
1259 | cpu_buffer->overrun += | ||
1260 | local_read(&head_page->entries); | ||
1291 | 1261 | ||
1292 | event = __rb_page_index(tail_page, tail); | 1262 | rb_inc_page(cpu_buffer, &head_page); |
1293 | rb_update_event(event, type, length); | 1263 | cpu_buffer->head_page = head_page; |
1264 | cpu_buffer->head_page->read = 0; | ||
1265 | } | ||
1266 | } | ||
1294 | 1267 | ||
1295 | /* The passed in type is zero for DATA */ | 1268 | /* |
1296 | if (likely(!type)) | 1269 | * If the tail page is still the same as what we think |
1297 | local_inc(&tail_page->entries); | 1270 | * it is, then it is up to us to update the tail |
1271 | * pointer. | ||
1272 | */ | ||
1273 | if (tail_page == cpu_buffer->tail_page) { | ||
1274 | local_set(&next_page->write, 0); | ||
1275 | local_set(&next_page->entries, 0); | ||
1276 | local_set(&next_page->page->commit, 0); | ||
1277 | cpu_buffer->tail_page = next_page; | ||
1278 | |||
1279 | /* reread the time stamp */ | ||
1280 | *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu); | ||
1281 | cpu_buffer->tail_page->page->time_stamp = *ts; | ||
1282 | } | ||
1298 | 1283 | ||
1299 | /* | 1284 | /* |
1300 | * If this is a commit and the tail is zero, then update | 1285 | * The actual tail page has moved forward. |
1301 | * this page's time stamp. | ||
1302 | */ | 1286 | */ |
1303 | if (!tail && rb_is_commit(cpu_buffer, event)) | 1287 | if (tail < BUF_PAGE_SIZE) { |
1304 | cpu_buffer->commit_page->page->time_stamp = *ts; | 1288 | /* Mark the rest of the page with padding */ |
1289 | event = __rb_page_index(tail_page, tail); | ||
1290 | rb_event_set_padding(event); | ||
1291 | } | ||
1305 | 1292 | ||
1306 | return event; | 1293 | if (tail <= BUF_PAGE_SIZE) |
1294 | /* Set the write back to the previous setting */ | ||
1295 | local_set(&tail_page->write, tail); | ||
1296 | |||
1297 | /* | ||
1298 | * If this was a commit entry that failed, | ||
1299 | * increment that too | ||
1300 | */ | ||
1301 | if (tail_page == cpu_buffer->commit_page && | ||
1302 | tail == rb_commit_index(cpu_buffer)) { | ||
1303 | rb_set_commit_to_write(cpu_buffer); | ||
1304 | } | ||
1305 | |||
1306 | __raw_spin_unlock(&cpu_buffer->lock); | ||
1307 | local_irq_restore(flags); | ||
1308 | |||
1309 | /* fail and let the caller try again */ | ||
1310 | return ERR_PTR(-EAGAIN); | ||
1307 | 1311 | ||
1308 | out_reset: | 1312 | out_reset: |
1309 | /* reset write */ | 1313 | /* reset write */ |