diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-06-11 11:12:00 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-06-15 11:37:19 -0400 |
commit | c7b0930857e2278f2e7714db6294e94c57f623b0 (patch) | |
tree | b43f113aa02063362d2e0f984d29c78afae87a02 /kernel/trace/ring_buffer.c | |
parent | 0ac2058f686a19fe8ab25c4f3104fc1580dce7cf (diff) |
ring-buffer: prevent adding write in discarded area
This a very tight race where an interrupt could come in and not
have enough data to put into the end of a buffer page, and that
it would fail to write and need to go to the next page.
But if this happened when another writer was about to reserver
their data, and that writer has smaller data to reserve, then
it could succeed even though the interrupt moved the tail page.
To pervent that, if we fail to store data, and by subtracting the
amount we reserved we still have room for smaller data, we need
to fill that space with "discarded" data.
[ Impact: prevent race were buffer data may be lost ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 68 |
1 files changed, 56 insertions, 12 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9c31c9f6b93f..dbc0f93396aa 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -205,6 +205,7 @@ EXPORT_SYMBOL_GPL(tracing_is_on); | |||
205 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 205 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
206 | #define RB_ALIGNMENT 4U | 206 | #define RB_ALIGNMENT 4U |
207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
208 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ | ||
208 | 209 | ||
209 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 210 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ |
210 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 211 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX |
@@ -1170,6 +1171,59 @@ static unsigned rb_calculate_event_length(unsigned length) | |||
1170 | return length; | 1171 | return length; |
1171 | } | 1172 | } |
1172 | 1173 | ||
1174 | static inline void | ||
1175 | rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | ||
1176 | struct buffer_page *tail_page, | ||
1177 | unsigned long tail, unsigned long length) | ||
1178 | { | ||
1179 | struct ring_buffer_event *event; | ||
1180 | |||
1181 | /* | ||
1182 | * Only the event that crossed the page boundary | ||
1183 | * must fill the old tail_page with padding. | ||
1184 | */ | ||
1185 | if (tail >= BUF_PAGE_SIZE) { | ||
1186 | local_sub(length, &tail_page->write); | ||
1187 | return; | ||
1188 | } | ||
1189 | |||
1190 | event = __rb_page_index(tail_page, tail); | ||
1191 | |||
1192 | /* | ||
1193 | * If this event is bigger than the minimum size, then | ||
1194 | * we need to be careful that we don't subtract the | ||
1195 | * write counter enough to allow another writer to slip | ||
1196 | * in on this page. | ||
1197 | * We put in a discarded commit instead, to make sure | ||
1198 | * that this space is not used again. | ||
1199 | * | ||
1200 | * If we are less than the minimum size, we don't need to | ||
1201 | * worry about it. | ||
1202 | */ | ||
1203 | if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { | ||
1204 | /* No room for any events */ | ||
1205 | |||
1206 | /* Mark the rest of the page with padding */ | ||
1207 | rb_event_set_padding(event); | ||
1208 | |||
1209 | /* Set the write back to the previous setting */ | ||
1210 | local_sub(length, &tail_page->write); | ||
1211 | return; | ||
1212 | } | ||
1213 | |||
1214 | /* Put in a discarded event */ | ||
1215 | event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; | ||
1216 | event->type_len = RINGBUF_TYPE_PADDING; | ||
1217 | /* time delta must be non zero */ | ||
1218 | event->time_delta = 1; | ||
1219 | /* Account for this as an entry */ | ||
1220 | local_inc(&tail_page->entries); | ||
1221 | local_inc(&cpu_buffer->entries); | ||
1222 | |||
1223 | /* Set write to end of buffer */ | ||
1224 | length = (tail + length) - BUF_PAGE_SIZE; | ||
1225 | local_sub(length, &tail_page->write); | ||
1226 | } | ||
1173 | 1227 | ||
1174 | static struct ring_buffer_event * | 1228 | static struct ring_buffer_event * |
1175 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 1229 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, |
@@ -1264,17 +1318,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1264 | cpu_buffer->tail_page->page->time_stamp = *ts; | 1318 | cpu_buffer->tail_page->page->time_stamp = *ts; |
1265 | } | 1319 | } |
1266 | 1320 | ||
1267 | /* | 1321 | rb_reset_tail(cpu_buffer, tail_page, tail, length); |
1268 | * The actual tail page has moved forward. | ||
1269 | */ | ||
1270 | if (tail < BUF_PAGE_SIZE) { | ||
1271 | /* Mark the rest of the page with padding */ | ||
1272 | event = __rb_page_index(tail_page, tail); | ||
1273 | rb_event_set_padding(event); | ||
1274 | } | ||
1275 | |||
1276 | /* Set the write back to the previous setting */ | ||
1277 | local_sub(length, &tail_page->write); | ||
1278 | 1322 | ||
1279 | /* | 1323 | /* |
1280 | * If this was a commit entry that failed, | 1324 | * If this was a commit entry that failed, |
@@ -1293,7 +1337,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1293 | 1337 | ||
1294 | out_reset: | 1338 | out_reset: |
1295 | /* reset write */ | 1339 | /* reset write */ |
1296 | local_sub(length, &tail_page->write); | 1340 | rb_reset_tail(cpu_buffer, tail_page, tail, length); |
1297 | 1341 | ||
1298 | if (likely(lock_taken)) | 1342 | if (likely(lock_taken)) |
1299 | __raw_spin_unlock(&cpu_buffer->lock); | 1343 | __raw_spin_unlock(&cpu_buffer->lock); |