diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 79 |
1 files changed, 61 insertions, 18 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8df8fdd69c95..5811e0a5f732 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -804,7 +804,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 804 | spin_unlock(&trace_cmdline_lock); | 804 | spin_unlock(&trace_cmdline_lock); |
| 805 | } | 805 | } |
| 806 | 806 | ||
| 807 | static char *trace_find_cmdline(int pid) | 807 | char *trace_find_cmdline(int pid) |
| 808 | { | 808 | { |
| 809 | char *cmdline = "<...>"; | 809 | char *cmdline = "<...>"; |
| 810 | unsigned map; | 810 | unsigned map; |
| @@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
| 878 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 878 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
| 879 | } | 879 | } |
| 880 | 880 | ||
| 881 | #ifdef CONFIG_FUNCTION_RET_TRACER | 881 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 882 | static void __trace_function_return(struct trace_array *tr, | 882 | static void __trace_graph_entry(struct trace_array *tr, |
| 883 | struct trace_array_cpu *data, | 883 | struct trace_array_cpu *data, |
| 884 | struct ftrace_retfunc *trace, | 884 | struct ftrace_graph_ent *trace, |
| 885 | unsigned long flags, | 885 | unsigned long flags, |
| 886 | int pc) | 886 | int pc) |
| 887 | { | 887 | { |
| 888 | struct ring_buffer_event *event; | 888 | struct ring_buffer_event *event; |
| 889 | struct ftrace_ret_entry *entry; | 889 | struct ftrace_graph_ent_entry *entry; |
| 890 | unsigned long irq_flags; | 890 | unsigned long irq_flags; |
| 891 | 891 | ||
| 892 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 892 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
| @@ -898,12 +898,32 @@ static void __trace_function_return(struct trace_array *tr, | |||
| 898 | return; | 898 | return; |
| 899 | entry = ring_buffer_event_data(event); | 899 | entry = ring_buffer_event_data(event); |
| 900 | tracing_generic_entry_update(&entry->ent, flags, pc); | 900 | tracing_generic_entry_update(&entry->ent, flags, pc); |
| 901 | entry->ent.type = TRACE_FN_RET; | 901 | entry->ent.type = TRACE_GRAPH_ENT; |
| 902 | entry->ip = trace->func; | 902 | entry->graph_ent = *trace; |
| 903 | entry->parent_ip = trace->ret; | 903 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); |
| 904 | entry->rettime = trace->rettime; | 904 | } |
| 905 | entry->calltime = trace->calltime; | 905 | |
| 906 | entry->overrun = trace->overrun; | 906 | static void __trace_graph_return(struct trace_array *tr, |
| 907 | struct trace_array_cpu *data, | ||
| 908 | struct ftrace_graph_ret *trace, | ||
| 909 | unsigned long flags, | ||
| 910 | int pc) | ||
| 911 | { | ||
| 912 | struct ring_buffer_event *event; | ||
| 913 | struct ftrace_graph_ret_entry *entry; | ||
| 914 | unsigned long irq_flags; | ||
| 915 | |||
| 916 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
| 917 | return; | ||
| 918 | |||
| 919 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
| 920 | &irq_flags); | ||
| 921 | if (!event) | ||
| 922 | return; | ||
| 923 | entry = ring_buffer_event_data(event); | ||
| 924 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 925 | entry->ent.type = TRACE_GRAPH_RET; | ||
| 926 | entry->ret = *trace; | ||
| 907 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 927 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); |
| 908 | } | 928 | } |
| 909 | #endif | 929 | #endif |
| @@ -1177,8 +1197,29 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 1177 | local_irq_restore(flags); | 1197 | local_irq_restore(flags); |
| 1178 | } | 1198 | } |
| 1179 | 1199 | ||
| 1180 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1200 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1181 | void trace_function_return(struct ftrace_retfunc *trace) | 1201 | void trace_graph_entry(struct ftrace_graph_ent *trace) |
| 1202 | { | ||
| 1203 | struct trace_array *tr = &global_trace; | ||
| 1204 | struct trace_array_cpu *data; | ||
| 1205 | unsigned long flags; | ||
| 1206 | long disabled; | ||
| 1207 | int cpu; | ||
| 1208 | int pc; | ||
| 1209 | |||
| 1210 | raw_local_irq_save(flags); | ||
| 1211 | cpu = raw_smp_processor_id(); | ||
| 1212 | data = tr->data[cpu]; | ||
| 1213 | disabled = atomic_inc_return(&data->disabled); | ||
| 1214 | if (likely(disabled == 1)) { | ||
| 1215 | pc = preempt_count(); | ||
| 1216 | __trace_graph_entry(tr, data, trace, flags, pc); | ||
| 1217 | } | ||
| 1218 | atomic_dec(&data->disabled); | ||
| 1219 | raw_local_irq_restore(flags); | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
| 1182 | { | 1223 | { |
| 1183 | struct trace_array *tr = &global_trace; | 1224 | struct trace_array *tr = &global_trace; |
| 1184 | struct trace_array_cpu *data; | 1225 | struct trace_array_cpu *data; |
| @@ -1193,12 +1234,12 @@ void trace_function_return(struct ftrace_retfunc *trace) | |||
| 1193 | disabled = atomic_inc_return(&data->disabled); | 1234 | disabled = atomic_inc_return(&data->disabled); |
| 1194 | if (likely(disabled == 1)) { | 1235 | if (likely(disabled == 1)) { |
| 1195 | pc = preempt_count(); | 1236 | pc = preempt_count(); |
| 1196 | __trace_function_return(tr, data, trace, flags, pc); | 1237 | __trace_graph_return(tr, data, trace, flags, pc); |
| 1197 | } | 1238 | } |
| 1198 | atomic_dec(&data->disabled); | 1239 | atomic_dec(&data->disabled); |
| 1199 | raw_local_irq_restore(flags); | 1240 | raw_local_irq_restore(flags); |
| 1200 | } | 1241 | } |
| 1201 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | 1242 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 1202 | 1243 | ||
| 1203 | static struct ftrace_ops trace_ops __read_mostly = | 1244 | static struct ftrace_ops trace_ops __read_mostly = |
| 1204 | { | 1245 | { |
| @@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
| 2000 | trace_seq_print_cont(s, iter); | 2041 | trace_seq_print_cont(s, iter); |
| 2001 | break; | 2042 | break; |
| 2002 | } | 2043 | } |
| 2003 | case TRACE_FN_RET: { | 2044 | case TRACE_GRAPH_RET: { |
| 2004 | return print_return_function(iter); | 2045 | return print_graph_function(iter); |
| 2005 | break; | 2046 | } |
| 2047 | case TRACE_GRAPH_ENT: { | ||
| 2048 | return print_graph_function(iter); | ||
| 2006 | } | 2049 | } |
| 2007 | case TRACE_BRANCH: { | 2050 | case TRACE_BRANCH: { |
| 2008 | struct trace_branch *field; | 2051 | struct trace_branch *field; |
