diff options
author | Török Edwin <edwintorok@gmail.com> | 2008-11-22 06:28:47 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-23 03:25:15 -0500 |
commit | 02b67518e2b1c490787dac7f35e1204e74fe21ba (patch) | |
tree | a3d92846e1a09a829f300ab15726ee9c288cb49e /kernel | |
parent | a0a70c735ef714fe1b6777b571630c3d50c7b008 (diff) |
tracing: add support for userspace stacktraces in tracing/iter_ctrl
Impact: add new (default-off) tracing visualization feature
Usage example:
mount -t debugfs nodev /sys/kernel/debug
cd /sys/kernel/debug/tracing
echo userstacktrace >iter_ctrl
echo sched_switch >current_tracer
echo 1 >tracing_enabled
.... run application ...
echo 0 >tracing_enabled
Then read one of 'trace','latency_trace','trace_pipe'.
To get the best output you can compile your userspace programs with
frame pointers (at least glibc + the app you are tracing).
Signed-off-by: Török Edwin <edwintorok@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.c | 93 | ||||
-rw-r--r-- | kernel/trace/trace.h | 9 |
2 files changed, 102 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4ee6f0375222..ced8b4fa9f51 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -275,6 +275,7 @@ static const char *trace_options[] = { | |||
275 | "ftrace_preempt", | 275 | "ftrace_preempt", |
276 | "branch", | 276 | "branch", |
277 | "annotate", | 277 | "annotate", |
278 | "userstacktrace", | ||
278 | NULL | 279 | NULL |
279 | }; | 280 | }; |
280 | 281 | ||
@@ -918,6 +919,44 @@ void __trace_stack(struct trace_array *tr, | |||
918 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | 919 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); |
919 | } | 920 | } |
920 | 921 | ||
922 | static void ftrace_trace_userstack(struct trace_array *tr, | ||
923 | struct trace_array_cpu *data, | ||
924 | unsigned long flags, int pc) | ||
925 | { | ||
926 | struct userstack_entry *entry; | ||
927 | struct stack_trace trace; | ||
928 | struct ring_buffer_event *event; | ||
929 | unsigned long irq_flags; | ||
930 | |||
931 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | ||
932 | return; | ||
933 | |||
934 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
935 | &irq_flags); | ||
936 | if (!event) | ||
937 | return; | ||
938 | entry = ring_buffer_event_data(event); | ||
939 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
940 | entry->ent.type = TRACE_USER_STACK; | ||
941 | |||
942 | memset(&entry->caller, 0, sizeof(entry->caller)); | ||
943 | |||
944 | trace.nr_entries = 0; | ||
945 | trace.max_entries = FTRACE_STACK_ENTRIES; | ||
946 | trace.skip = 0; | ||
947 | trace.entries = entry->caller; | ||
948 | |||
949 | save_stack_trace_user(&trace); | ||
950 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
951 | } | ||
952 | |||
953 | void __trace_userstack(struct trace_array *tr, | ||
954 | struct trace_array_cpu *data, | ||
955 | unsigned long flags) | ||
956 | { | ||
957 | ftrace_trace_userstack(tr, data, flags, preempt_count()); | ||
958 | } | ||
959 | |||
921 | static void | 960 | static void |
922 | ftrace_trace_special(void *__tr, void *__data, | 961 | ftrace_trace_special(void *__tr, void *__data, |
923 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 962 | unsigned long arg1, unsigned long arg2, unsigned long arg3, |
@@ -941,6 +980,7 @@ ftrace_trace_special(void *__tr, void *__data, | |||
941 | entry->arg3 = arg3; | 980 | entry->arg3 = arg3; |
942 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 981 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
943 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); | 982 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); |
983 | ftrace_trace_userstack(tr, data, irq_flags, pc); | ||
944 | 984 | ||
945 | trace_wake_up(); | 985 | trace_wake_up(); |
946 | } | 986 | } |
@@ -979,6 +1019,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
979 | entry->next_cpu = task_cpu(next); | 1019 | entry->next_cpu = task_cpu(next); |
980 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1020 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
981 | ftrace_trace_stack(tr, data, flags, 5, pc); | 1021 | ftrace_trace_stack(tr, data, flags, 5, pc); |
1022 | ftrace_trace_userstack(tr, data, flags, pc); | ||
982 | } | 1023 | } |
983 | 1024 | ||
984 | void | 1025 | void |
@@ -1008,6 +1049,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1008 | entry->next_cpu = task_cpu(wakee); | 1049 | entry->next_cpu = task_cpu(wakee); |
1009 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1050 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
1010 | ftrace_trace_stack(tr, data, flags, 6, pc); | 1051 | ftrace_trace_stack(tr, data, flags, 6, pc); |
1052 | ftrace_trace_userstack(tr, data, flags, pc); | ||
1011 | 1053 | ||
1012 | trace_wake_up(); | 1054 | trace_wake_up(); |
1013 | } | 1055 | } |
@@ -1387,6 +1429,31 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | |||
1387 | return ret; | 1429 | return ret; |
1388 | } | 1430 | } |
1389 | 1431 | ||
1432 | static int | ||
1433 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
1434 | unsigned long sym_flags) | ||
1435 | { | ||
1436 | int ret = 1; | ||
1437 | unsigned i; | ||
1438 | |||
1439 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
1440 | unsigned long ip = entry->caller[i]; | ||
1441 | |||
1442 | if (ip == ULONG_MAX || !ret) | ||
1443 | break; | ||
1444 | if (i) | ||
1445 | ret = trace_seq_puts(s, " <- "); | ||
1446 | if (!ip) { | ||
1447 | ret = trace_seq_puts(s, "??"); | ||
1448 | continue; | ||
1449 | } | ||
1450 | if (ret /*&& (sym_flags & TRACE_ITER_SYM_ADDR)*/) | ||
1451 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
1452 | } | ||
1453 | |||
1454 | return ret; | ||
1455 | } | ||
1456 | |||
1390 | static void print_lat_help_header(struct seq_file *m) | 1457 | static void print_lat_help_header(struct seq_file *m) |
1391 | { | 1458 | { |
1392 | seq_puts(m, "# _------=> CPU# \n"); | 1459 | seq_puts(m, "# _------=> CPU# \n"); |
@@ -1702,6 +1769,16 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1702 | field->line); | 1769 | field->line); |
1703 | break; | 1770 | break; |
1704 | } | 1771 | } |
1772 | case TRACE_USER_STACK: { | ||
1773 | struct userstack_entry *field; | ||
1774 | |||
1775 | trace_assign_type(field, entry); | ||
1776 | |||
1777 | seq_print_userip_objs(field, s, sym_flags); | ||
1778 | if (entry->flags & TRACE_FLAG_CONT) | ||
1779 | trace_seq_print_cont(s, iter); | ||
1780 | break; | ||
1781 | } | ||
1705 | default: | 1782 | default: |
1706 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | 1783 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
1707 | } | 1784 | } |
@@ -1853,6 +1930,19 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1853 | field->line); | 1930 | field->line); |
1854 | break; | 1931 | break; |
1855 | } | 1932 | } |
1933 | case TRACE_USER_STACK: { | ||
1934 | struct userstack_entry *field; | ||
1935 | |||
1936 | trace_assign_type(field, entry); | ||
1937 | |||
1938 | ret = seq_print_userip_objs(field, s, sym_flags); | ||
1939 | if (!ret) | ||
1940 | return TRACE_TYPE_PARTIAL_LINE; | ||
1941 | ret = trace_seq_putc(s, '\n'); | ||
1942 | if (!ret) | ||
1943 | return TRACE_TYPE_PARTIAL_LINE; | ||
1944 | break; | ||
1945 | } | ||
1856 | } | 1946 | } |
1857 | return TRACE_TYPE_HANDLED; | 1947 | return TRACE_TYPE_HANDLED; |
1858 | } | 1948 | } |
@@ -1912,6 +2002,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | |||
1912 | break; | 2002 | break; |
1913 | } | 2003 | } |
1914 | case TRACE_SPECIAL: | 2004 | case TRACE_SPECIAL: |
2005 | case TRACE_USER_STACK: | ||
1915 | case TRACE_STACK: { | 2006 | case TRACE_STACK: { |
1916 | struct special_entry *field; | 2007 | struct special_entry *field; |
1917 | 2008 | ||
@@ -2000,6 +2091,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
2000 | break; | 2091 | break; |
2001 | } | 2092 | } |
2002 | case TRACE_SPECIAL: | 2093 | case TRACE_SPECIAL: |
2094 | case TRACE_USER_STACK: | ||
2003 | case TRACE_STACK: { | 2095 | case TRACE_STACK: { |
2004 | struct special_entry *field; | 2096 | struct special_entry *field; |
2005 | 2097 | ||
@@ -2054,6 +2146,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
2054 | break; | 2146 | break; |
2055 | } | 2147 | } |
2056 | case TRACE_SPECIAL: | 2148 | case TRACE_SPECIAL: |
2149 | case TRACE_USER_STACK: | ||
2057 | case TRACE_STACK: { | 2150 | case TRACE_STACK: { |
2058 | struct special_entry *field; | 2151 | struct special_entry *field; |
2059 | 2152 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2cb12fd98f6b..17bb4c830b01 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -26,6 +26,7 @@ enum trace_type { | |||
26 | TRACE_BOOT_CALL, | 26 | TRACE_BOOT_CALL, |
27 | TRACE_BOOT_RET, | 27 | TRACE_BOOT_RET, |
28 | TRACE_FN_RET, | 28 | TRACE_FN_RET, |
29 | TRACE_USER_STACK, | ||
29 | 30 | ||
30 | __TRACE_LAST_TYPE | 31 | __TRACE_LAST_TYPE |
31 | }; | 32 | }; |
@@ -42,6 +43,7 @@ struct trace_entry { | |||
42 | unsigned char flags; | 43 | unsigned char flags; |
43 | unsigned char preempt_count; | 44 | unsigned char preempt_count; |
44 | int pid; | 45 | int pid; |
46 | int tgid; | ||
45 | }; | 47 | }; |
46 | 48 | ||
47 | /* | 49 | /* |
@@ -99,6 +101,11 @@ struct stack_entry { | |||
99 | unsigned long caller[FTRACE_STACK_ENTRIES]; | 101 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
100 | }; | 102 | }; |
101 | 103 | ||
104 | struct userstack_entry { | ||
105 | struct trace_entry ent; | ||
106 | unsigned long caller[FTRACE_STACK_ENTRIES]; | ||
107 | }; | ||
108 | |||
102 | /* | 109 | /* |
103 | * ftrace_printk entry: | 110 | * ftrace_printk entry: |
104 | */ | 111 | */ |
@@ -240,6 +247,7 @@ extern void __ftrace_bad_type(void); | |||
240 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | 247 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
241 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ | 248 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ |
242 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | 249 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
250 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | ||
243 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 251 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
244 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | 252 | IF_ASSIGN(var, ent, struct special_entry, 0); \ |
245 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 253 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
@@ -500,6 +508,7 @@ enum trace_iterator_flags { | |||
500 | TRACE_ITER_PREEMPTONLY = 0x800, | 508 | TRACE_ITER_PREEMPTONLY = 0x800, |
501 | TRACE_ITER_BRANCH = 0x1000, | 509 | TRACE_ITER_BRANCH = 0x1000, |
502 | TRACE_ITER_ANNOTATE = 0x2000, | 510 | TRACE_ITER_ANNOTATE = 0x2000, |
511 | TRACE_ITER_USERSTACKTRACE = 0x4000 | ||
503 | }; | 512 | }; |
504 | 513 | ||
505 | /* | 514 | /* |