aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c103
1 files changed, 85 insertions, 18 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 192c1354a7e0..b4b1b4fe99fd 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -28,6 +28,8 @@
28#include <linux/gfp.h> 28#include <linux/gfp.h>
29#include <linux/fs.h> 29#include <linux/fs.h>
30 30
31#include <linux/stacktrace.h>
32
31#include "trace.h" 33#include "trace.h"
32 34
33unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 35unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
@@ -88,6 +90,7 @@ enum trace_type {
88 TRACE_FN, 90 TRACE_FN,
89 TRACE_CTX, 91 TRACE_CTX,
90 TRACE_WAKE, 92 TRACE_WAKE,
93 TRACE_STACK,
91 TRACE_SPECIAL, 94 TRACE_SPECIAL,
92 95
93 __TRACE_LAST_TYPE 96 __TRACE_LAST_TYPE
@@ -109,6 +112,7 @@ enum trace_iterator_flags {
109 TRACE_ITER_HEX = 0x20, 112 TRACE_ITER_HEX = 0x20,
110 TRACE_ITER_BIN = 0x40, 113 TRACE_ITER_BIN = 0x40,
111 TRACE_ITER_BLOCK = 0x80, 114 TRACE_ITER_BLOCK = 0x80,
115 TRACE_ITER_STACKTRACE = 0x100,
112}; 116};
113 117
114#define TRACE_ITER_SYM_MASK \ 118#define TRACE_ITER_SYM_MASK \
@@ -124,10 +128,11 @@ static const char *trace_options[] = {
124 "hex", 128 "hex",
125 "bin", 129 "bin",
126 "block", 130 "block",
131 "stacktrace",
127 NULL 132 NULL
128}; 133};
129 134
130static unsigned trace_flags; 135static unsigned trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_STACKTRACE;
131 136
132static DEFINE_SPINLOCK(ftrace_max_lock); 137static DEFINE_SPINLOCK(ftrace_max_lock);
133 138
@@ -657,7 +662,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
657 spin_unlock_irqrestore(&data->lock, irq_flags); 662 spin_unlock_irqrestore(&data->lock, irq_flags);
658 663
659 if (!(trace_flags & TRACE_ITER_BLOCK)) 664 if (!(trace_flags & TRACE_ITER_BLOCK))
660 wake_up (&trace_wait); 665 wake_up(&trace_wait);
661} 666}
662 667
663void 668void
@@ -685,13 +690,39 @@ trace_special(struct trace_array *tr, struct trace_array_cpu *data,
685 spin_unlock_irqrestore(&data->lock, irq_flags); 690 spin_unlock_irqrestore(&data->lock, irq_flags);
686 691
687 if (!(trace_flags & TRACE_ITER_BLOCK)) 692 if (!(trace_flags & TRACE_ITER_BLOCK))
688 wake_up (&trace_wait); 693 wake_up(&trace_wait);
694}
695
696void __trace_stack(struct trace_array *tr,
697 struct trace_array_cpu *data,
698 unsigned long flags,
699 int skip)
700{
701 struct trace_entry *entry;
702 struct stack_trace trace;
703
704 if (!(trace_flags & TRACE_ITER_STACKTRACE))
705 return;
706
707 entry = tracing_get_trace_entry(tr, data);
708 tracing_generic_entry_update(entry, flags);
709 entry->type = TRACE_STACK;
710
711 memset(&entry->stack, 0, sizeof(entry->stack));
712
713 trace.nr_entries = 0;
714 trace.max_entries = FTRACE_STACK_ENTRIES;
715 trace.skip = skip;
716 trace.entries = entry->stack.caller;
717
718 save_stack_trace(&trace);
689} 719}
690 720
691void 721void
692tracing_sched_switch_trace(struct trace_array *tr, 722tracing_sched_switch_trace(struct trace_array *tr,
693 struct trace_array_cpu *data, 723 struct trace_array_cpu *data,
694 struct task_struct *prev, struct task_struct *next, 724 struct task_struct *prev,
725 struct task_struct *next,
695 unsigned long flags) 726 unsigned long flags)
696{ 727{
697 struct trace_entry *entry; 728 struct trace_entry *entry;
@@ -706,16 +737,18 @@ tracing_sched_switch_trace(struct trace_array *tr,
706 entry->ctx.prev_state = prev->state; 737 entry->ctx.prev_state = prev->state;
707 entry->ctx.next_pid = next->pid; 738 entry->ctx.next_pid = next->pid;
708 entry->ctx.next_prio = next->prio; 739 entry->ctx.next_prio = next->prio;
740 __trace_stack(tr, data, flags, 4);
709 spin_unlock_irqrestore(&data->lock, irq_flags); 741 spin_unlock_irqrestore(&data->lock, irq_flags);
710 742
711 if (!(trace_flags & TRACE_ITER_BLOCK)) 743 if (!(trace_flags & TRACE_ITER_BLOCK))
712 wake_up (&trace_wait); 744 wake_up(&trace_wait);
713} 745}
714 746
715void 747void
716tracing_sched_wakeup_trace(struct trace_array *tr, 748tracing_sched_wakeup_trace(struct trace_array *tr,
717 struct trace_array_cpu *data, 749 struct trace_array_cpu *data,
718 struct task_struct *wakee, struct task_struct *curr, 750 struct task_struct *wakee,
751 struct task_struct *curr,
719 unsigned long flags) 752 unsigned long flags)
720{ 753{
721 struct trace_entry *entry; 754 struct trace_entry *entry;
@@ -730,6 +763,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
730 entry->ctx.prev_state = curr->state; 763 entry->ctx.prev_state = curr->state;
731 entry->ctx.next_pid = wakee->pid; 764 entry->ctx.next_pid = wakee->pid;
732 entry->ctx.next_prio = wakee->prio; 765 entry->ctx.next_prio = wakee->prio;
766 __trace_stack(tr, data, flags, 5);
733 spin_unlock_irqrestore(&data->lock, irq_flags); 767 spin_unlock_irqrestore(&data->lock, irq_flags);
734 768
735 if (!(trace_flags & TRACE_ITER_BLOCK)) 769 if (!(trace_flags & TRACE_ITER_BLOCK))
@@ -1179,6 +1213,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1179 unsigned long rel_usecs; 1213 unsigned long rel_usecs;
1180 char *comm; 1214 char *comm;
1181 int S; 1215 int S;
1216 int i;
1182 1217
1183 if (!next_entry) 1218 if (!next_entry)
1184 next_entry = entry; 1219 next_entry = entry;
@@ -1197,8 +1232,10 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1197 abs_usecs % 1000, rel_usecs/1000, 1232 abs_usecs % 1000, rel_usecs/1000,
1198 rel_usecs % 1000); 1233 rel_usecs % 1000);
1199 } else { 1234 } else {
1200 lat_print_generic(s, entry, cpu); 1235 if (entry->type != TRACE_STACK) {
1201 lat_print_timestamp(s, abs_usecs, rel_usecs); 1236 lat_print_generic(s, entry, cpu);
1237 lat_print_timestamp(s, abs_usecs, rel_usecs);
1238 }
1202 } 1239 }
1203 switch (entry->type) { 1240 switch (entry->type) {
1204 case TRACE_FN: 1241 case TRACE_FN:
@@ -1226,6 +1263,14 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1226 entry->special.arg2, 1263 entry->special.arg2,
1227 entry->special.arg3); 1264 entry->special.arg3);
1228 break; 1265 break;
1266 case TRACE_STACK:
1267 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1268 if (i)
1269 trace_seq_puts(s, " <= ");
1270 seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
1271 }
1272 trace_seq_puts(s, "\n");
1273 break;
1229 default: 1274 default:
1230 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1275 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1231 } 1276 }
@@ -1241,8 +1286,9 @@ static int print_trace_fmt(struct trace_iterator *iter)
1241 unsigned long long t; 1286 unsigned long long t;
1242 unsigned long secs; 1287 unsigned long secs;
1243 char *comm; 1288 char *comm;
1244 int S;
1245 int ret; 1289 int ret;
1290 int S;
1291 int i;
1246 1292
1247 entry = iter->ent; 1293 entry = iter->ent;
1248 1294
@@ -1252,15 +1298,17 @@ static int print_trace_fmt(struct trace_iterator *iter)
1252 usec_rem = do_div(t, 1000000ULL); 1298 usec_rem = do_div(t, 1000000ULL);
1253 secs = (unsigned long)t; 1299 secs = (unsigned long)t;
1254 1300
1255 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); 1301 if (entry->type != TRACE_STACK) {
1256 if (!ret) 1302 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1257 return 0; 1303 if (!ret)
1258 ret = trace_seq_printf(s, "[%02d] ", iter->cpu); 1304 return 0;
1259 if (!ret) 1305 ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1260 return 0; 1306 if (!ret)
1261 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); 1307 return 0;
1262 if (!ret) 1308 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1263 return 0; 1309 if (!ret)
1310 return 0;
1311 }
1264 1312
1265 switch (entry->type) { 1313 switch (entry->type) {
1266 case TRACE_FN: 1314 case TRACE_FN:
@@ -1303,6 +1351,22 @@ static int print_trace_fmt(struct trace_iterator *iter)
1303 if (!ret) 1351 if (!ret)
1304 return 0; 1352 return 0;
1305 break; 1353 break;
1354 case TRACE_STACK:
1355 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1356 if (i) {
1357 ret = trace_seq_puts(s, " <= ");
1358 if (!ret)
1359 return 0;
1360 }
1361 ret = seq_print_ip_sym(s, entry->stack.caller[i],
1362 sym_flags);
1363 if (!ret)
1364 return 0;
1365 }
1366 ret = trace_seq_puts(s, "\n");
1367 if (!ret)
1368 return 0;
1369 break;
1306 } 1370 }
1307 return 1; 1371 return 1;
1308} 1372}
@@ -1344,6 +1408,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
1344 return 0; 1408 return 0;
1345 break; 1409 break;
1346 case TRACE_SPECIAL: 1410 case TRACE_SPECIAL:
1411 case TRACE_STACK:
1347 ret = trace_seq_printf(s, " %lx %lx %lx\n", 1412 ret = trace_seq_printf(s, " %lx %lx %lx\n",
1348 entry->special.arg1, 1413 entry->special.arg1,
1349 entry->special.arg2, 1414 entry->special.arg2,
@@ -1399,6 +1464,7 @@ static int print_hex_fmt(struct trace_iterator *iter)
1399 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1464 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1400 break; 1465 break;
1401 case TRACE_SPECIAL: 1466 case TRACE_SPECIAL:
1467 case TRACE_STACK:
1402 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1); 1468 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
1403 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2); 1469 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
1404 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3); 1470 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
@@ -1433,6 +1499,7 @@ static int print_bin_fmt(struct trace_iterator *iter)
1433 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio); 1499 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
1434 break; 1500 break;
1435 case TRACE_SPECIAL: 1501 case TRACE_SPECIAL:
1502 case TRACE_STACK:
1436 SEQ_PUT_FIELD_RET(s, entry->special.arg1); 1503 SEQ_PUT_FIELD_RET(s, entry->special.arg1);
1437 SEQ_PUT_FIELD_RET(s, entry->special.arg2); 1504 SEQ_PUT_FIELD_RET(s, entry->special.arg2);
1438 SEQ_PUT_FIELD_RET(s, entry->special.arg3); 1505 SEQ_PUT_FIELD_RET(s, entry->special.arg3);