diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 92 |
1 files changed, 79 insertions, 13 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d9c16123f6e2..e5df02c69b1d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1248,6 +1248,15 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | #ifdef CONFIG_STACKTRACE | 1250 | #ifdef CONFIG_STACKTRACE |
1251 | |||
1252 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) | ||
1253 | struct ftrace_stack { | ||
1254 | unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; | ||
1255 | }; | ||
1256 | |||
1257 | static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); | ||
1258 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); | ||
1259 | |||
1251 | static void __ftrace_trace_stack(struct ring_buffer *buffer, | 1260 | static void __ftrace_trace_stack(struct ring_buffer *buffer, |
1252 | unsigned long flags, | 1261 | unsigned long flags, |
1253 | int skip, int pc, struct pt_regs *regs) | 1262 | int skip, int pc, struct pt_regs *regs) |
@@ -1256,25 +1265,77 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1256 | struct ring_buffer_event *event; | 1265 | struct ring_buffer_event *event; |
1257 | struct stack_entry *entry; | 1266 | struct stack_entry *entry; |
1258 | struct stack_trace trace; | 1267 | struct stack_trace trace; |
1268 | int use_stack; | ||
1269 | int size = FTRACE_STACK_ENTRIES; | ||
1270 | |||
1271 | trace.nr_entries = 0; | ||
1272 | trace.skip = skip; | ||
1273 | |||
1274 | /* | ||
1275 | * Since events can happen in NMIs there's no safe way to | ||
1276 | * use the per cpu ftrace_stacks. We reserve it and if an interrupt | ||
1277 | * or NMI comes in, it will just have to use the default | ||
1278 | * FTRACE_STACK_SIZE. | ||
1279 | */ | ||
1280 | preempt_disable_notrace(); | ||
1281 | |||
1282 | use_stack = ++__get_cpu_var(ftrace_stack_reserve); | ||
1283 | /* | ||
1284 | * We don't need any atomic variables, just a barrier. | ||
1285 | * If an interrupt comes in, we don't care, because it would | ||
1286 | * have exited and put the counter back to what we want. | ||
1287 | * We just need a barrier to keep gcc from moving things | ||
1288 | * around. | ||
1289 | */ | ||
1290 | barrier(); | ||
1291 | if (use_stack == 1) { | ||
1292 | trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; | ||
1293 | trace.max_entries = FTRACE_STACK_MAX_ENTRIES; | ||
1294 | |||
1295 | if (regs) | ||
1296 | save_stack_trace_regs(regs, &trace); | ||
1297 | else | ||
1298 | save_stack_trace(&trace); | ||
1299 | |||
1300 | if (trace.nr_entries > size) | ||
1301 | size = trace.nr_entries; | ||
1302 | } else | ||
1303 | /* From now on, use_stack is a boolean */ | ||
1304 | use_stack = 0; | ||
1305 | |||
1306 | size *= sizeof(unsigned long); | ||
1259 | 1307 | ||
1260 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, | 1308 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, |
1261 | sizeof(*entry), flags, pc); | 1309 | sizeof(*entry) + size, flags, pc); |
1262 | if (!event) | 1310 | if (!event) |
1263 | return; | 1311 | goto out; |
1264 | entry = ring_buffer_event_data(event); | 1312 | entry = ring_buffer_event_data(event); |
1265 | memset(&entry->caller, 0, sizeof(entry->caller)); | ||
1266 | 1313 | ||
1267 | trace.nr_entries = 0; | 1314 | memset(&entry->caller, 0, size); |
1268 | trace.max_entries = FTRACE_STACK_ENTRIES; | 1315 | |
1269 | trace.skip = skip; | 1316 | if (use_stack) |
1270 | trace.entries = entry->caller; | 1317 | memcpy(&entry->caller, trace.entries, |
1318 | trace.nr_entries * sizeof(unsigned long)); | ||
1319 | else { | ||
1320 | trace.max_entries = FTRACE_STACK_ENTRIES; | ||
1321 | trace.entries = entry->caller; | ||
1322 | if (regs) | ||
1323 | save_stack_trace_regs(regs, &trace); | ||
1324 | else | ||
1325 | save_stack_trace(&trace); | ||
1326 | } | ||
1327 | |||
1328 | entry->size = trace.nr_entries; | ||
1271 | 1329 | ||
1272 | if (regs) | ||
1273 | save_stack_trace_regs(regs, &trace); | ||
1274 | else | ||
1275 | save_stack_trace(&trace); | ||
1276 | if (!filter_check_discard(call, entry, buffer, event)) | 1330 | if (!filter_check_discard(call, entry, buffer, event)) |
1277 | ring_buffer_unlock_commit(buffer, event); | 1331 | ring_buffer_unlock_commit(buffer, event); |
1332 | |||
1333 | out: | ||
1334 | /* Again, don't let gcc optimize things here */ | ||
1335 | barrier(); | ||
1336 | __get_cpu_var(ftrace_stack_reserve)--; | ||
1337 | preempt_enable_notrace(); | ||
1338 | |||
1278 | } | 1339 | } |
1279 | 1340 | ||
1280 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, | 1341 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, |
@@ -1562,7 +1623,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | |||
1562 | 1623 | ||
1563 | ftrace_enable_cpu(); | 1624 | ftrace_enable_cpu(); |
1564 | 1625 | ||
1565 | return event ? ring_buffer_event_data(event) : NULL; | 1626 | if (event) { |
1627 | iter->ent_size = ring_buffer_event_length(event); | ||
1628 | return ring_buffer_event_data(event); | ||
1629 | } | ||
1630 | iter->ent_size = 0; | ||
1631 | return NULL; | ||
1566 | } | 1632 | } |
1567 | 1633 | ||
1568 | static struct trace_entry * | 1634 | static struct trace_entry * |