diff options
author | Steven Rostedt <srostedt@redhat.com> | 2011-07-14 16:36:53 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2011-07-14 16:36:53 -0400 |
commit | 4a9bd3f134decd6d16ead8d288342d57aad486be (patch) | |
tree | ca9eca5fcbe93dc8a433d839221505ad8fba5296 | |
parent | 259032bfe379281bf7cba512b7705bdb4ce41db5 (diff) |
tracing: Have dynamic size event stack traces
Currently the stack trace per event in ftace is only 8 frames.
This can be quite limiting and sometimes useless. Especially when
the "ignore frames" is wrong and we also use up stack frames for
the event processing itself.
Change this to be dynamic by adding a percpu buffer that we can
write a large stack frame into and then copy into the ring buffer.
For interrupts and NMIs that come in while another event is being
process, will only get to use the 8 frame stack. That should be enough
as the task that it interrupted will have the full stack frame anyway.
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/ftrace_event.h | 1 | ||||
-rw-r--r-- | kernel/trace/trace.c | 92 | ||||
-rw-r--r-- | kernel/trace/trace_entries.h | 3 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 11 |
4 files changed, 88 insertions, 19 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index b1e69eefc203..96efa6794ea5 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -76,6 +76,7 @@ struct trace_iterator { | |||
76 | struct trace_entry *ent; | 76 | struct trace_entry *ent; |
77 | unsigned long lost_events; | 77 | unsigned long lost_events; |
78 | int leftover; | 78 | int leftover; |
79 | int ent_size; | ||
79 | int cpu; | 80 | int cpu; |
80 | u64 ts; | 81 | u64 ts; |
81 | 82 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d9c16123f6e2..e5df02c69b1d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1248,6 +1248,15 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | #ifdef CONFIG_STACKTRACE | 1250 | #ifdef CONFIG_STACKTRACE |
1251 | |||
1252 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) | ||
1253 | struct ftrace_stack { | ||
1254 | unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; | ||
1255 | }; | ||
1256 | |||
1257 | static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); | ||
1258 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); | ||
1259 | |||
1251 | static void __ftrace_trace_stack(struct ring_buffer *buffer, | 1260 | static void __ftrace_trace_stack(struct ring_buffer *buffer, |
1252 | unsigned long flags, | 1261 | unsigned long flags, |
1253 | int skip, int pc, struct pt_regs *regs) | 1262 | int skip, int pc, struct pt_regs *regs) |
@@ -1256,25 +1265,77 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1256 | struct ring_buffer_event *event; | 1265 | struct ring_buffer_event *event; |
1257 | struct stack_entry *entry; | 1266 | struct stack_entry *entry; |
1258 | struct stack_trace trace; | 1267 | struct stack_trace trace; |
1268 | int use_stack; | ||
1269 | int size = FTRACE_STACK_ENTRIES; | ||
1270 | |||
1271 | trace.nr_entries = 0; | ||
1272 | trace.skip = skip; | ||
1273 | |||
1274 | /* | ||
1275 | * Since events can happen in NMIs there's no safe way to | ||
1276 | * use the per cpu ftrace_stacks. We reserve it and if an interrupt | ||
1277 | * or NMI comes in, it will just have to use the default | ||
1278 | * FTRACE_STACK_SIZE. | ||
1279 | */ | ||
1280 | preempt_disable_notrace(); | ||
1281 | |||
1282 | use_stack = ++__get_cpu_var(ftrace_stack_reserve); | ||
1283 | /* | ||
1284 | * We don't need any atomic variables, just a barrier. | ||
1285 | * If an interrupt comes in, we don't care, because it would | ||
1286 | * have exited and put the counter back to what we want. | ||
1287 | * We just need a barrier to keep gcc from moving things | ||
1288 | * around. | ||
1289 | */ | ||
1290 | barrier(); | ||
1291 | if (use_stack == 1) { | ||
1292 | trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; | ||
1293 | trace.max_entries = FTRACE_STACK_MAX_ENTRIES; | ||
1294 | |||
1295 | if (regs) | ||
1296 | save_stack_trace_regs(regs, &trace); | ||
1297 | else | ||
1298 | save_stack_trace(&trace); | ||
1299 | |||
1300 | if (trace.nr_entries > size) | ||
1301 | size = trace.nr_entries; | ||
1302 | } else | ||
1303 | /* From now on, use_stack is a boolean */ | ||
1304 | use_stack = 0; | ||
1305 | |||
1306 | size *= sizeof(unsigned long); | ||
1259 | 1307 | ||
1260 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, | 1308 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, |
1261 | sizeof(*entry), flags, pc); | 1309 | sizeof(*entry) + size, flags, pc); |
1262 | if (!event) | 1310 | if (!event) |
1263 | return; | 1311 | goto out; |
1264 | entry = ring_buffer_event_data(event); | 1312 | entry = ring_buffer_event_data(event); |
1265 | memset(&entry->caller, 0, sizeof(entry->caller)); | ||
1266 | 1313 | ||
1267 | trace.nr_entries = 0; | 1314 | memset(&entry->caller, 0, size); |
1268 | trace.max_entries = FTRACE_STACK_ENTRIES; | 1315 | |
1269 | trace.skip = skip; | 1316 | if (use_stack) |
1270 | trace.entries = entry->caller; | 1317 | memcpy(&entry->caller, trace.entries, |
1318 | trace.nr_entries * sizeof(unsigned long)); | ||
1319 | else { | ||
1320 | trace.max_entries = FTRACE_STACK_ENTRIES; | ||
1321 | trace.entries = entry->caller; | ||
1322 | if (regs) | ||
1323 | save_stack_trace_regs(regs, &trace); | ||
1324 | else | ||
1325 | save_stack_trace(&trace); | ||
1326 | } | ||
1327 | |||
1328 | entry->size = trace.nr_entries; | ||
1271 | 1329 | ||
1272 | if (regs) | ||
1273 | save_stack_trace_regs(regs, &trace); | ||
1274 | else | ||
1275 | save_stack_trace(&trace); | ||
1276 | if (!filter_check_discard(call, entry, buffer, event)) | 1330 | if (!filter_check_discard(call, entry, buffer, event)) |
1277 | ring_buffer_unlock_commit(buffer, event); | 1331 | ring_buffer_unlock_commit(buffer, event); |
1332 | |||
1333 | out: | ||
1334 | /* Again, don't let gcc optimize things here */ | ||
1335 | barrier(); | ||
1336 | __get_cpu_var(ftrace_stack_reserve)--; | ||
1337 | preempt_enable_notrace(); | ||
1338 | |||
1278 | } | 1339 | } |
1279 | 1340 | ||
1280 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, | 1341 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, |
@@ -1562,7 +1623,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | |||
1562 | 1623 | ||
1563 | ftrace_enable_cpu(); | 1624 | ftrace_enable_cpu(); |
1564 | 1625 | ||
1565 | return event ? ring_buffer_event_data(event) : NULL; | 1626 | if (event) { |
1627 | iter->ent_size = ring_buffer_event_length(event); | ||
1628 | return ring_buffer_event_data(event); | ||
1629 | } | ||
1630 | iter->ent_size = 0; | ||
1631 | return NULL; | ||
1566 | } | 1632 | } |
1567 | 1633 | ||
1568 | static struct trace_entry * | 1634 | static struct trace_entry * |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index e32744c84d94..93365907f219 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -161,7 +161,8 @@ FTRACE_ENTRY(kernel_stack, stack_entry, | |||
161 | TRACE_STACK, | 161 | TRACE_STACK, |
162 | 162 | ||
163 | F_STRUCT( | 163 | F_STRUCT( |
164 | __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) | 164 | __field( int, size ) |
165 | __dynamic_array(unsigned long, caller ) | ||
165 | ), | 166 | ), |
166 | 167 | ||
167 | F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" | 168 | F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index e37de492a9e1..51999309a6cf 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -1107,19 +1107,20 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
1107 | { | 1107 | { |
1108 | struct stack_entry *field; | 1108 | struct stack_entry *field; |
1109 | struct trace_seq *s = &iter->seq; | 1109 | struct trace_seq *s = &iter->seq; |
1110 | int i; | 1110 | unsigned long *p; |
1111 | unsigned long *end; | ||
1111 | 1112 | ||
1112 | trace_assign_type(field, iter->ent); | 1113 | trace_assign_type(field, iter->ent); |
1114 | end = (unsigned long *)((long)iter->ent + iter->ent_size); | ||
1113 | 1115 | ||
1114 | if (!trace_seq_puts(s, "<stack trace>\n")) | 1116 | if (!trace_seq_puts(s, "<stack trace>\n")) |
1115 | goto partial; | 1117 | goto partial; |
1116 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | 1118 | |
1117 | if (!field->caller[i] || (field->caller[i] == ULONG_MAX)) | 1119 | for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { |
1118 | break; | ||
1119 | if (!trace_seq_puts(s, " => ")) | 1120 | if (!trace_seq_puts(s, " => ")) |
1120 | goto partial; | 1121 | goto partial; |
1121 | 1122 | ||
1122 | if (!seq_print_ip_sym(s, field->caller[i], flags)) | 1123 | if (!seq_print_ip_sym(s, *p, flags)) |
1123 | goto partial; | 1124 | goto partial; |
1124 | if (!trace_seq_puts(s, "\n")) | 1125 | if (!trace_seq_puts(s, "\n")) |
1125 | goto partial; | 1126 | goto partial; |