aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c67
1 files changed, 55 insertions, 12 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f21ab2c68fd4..9d5f7c94f251 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -879,14 +879,38 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
879} 879}
880 880
881#ifdef CONFIG_FUNCTION_GRAPH_TRACER 881#ifdef CONFIG_FUNCTION_GRAPH_TRACER
882static void __trace_function_graph(struct trace_array *tr, 882static void __trace_graph_entry(struct trace_array *tr,
883 struct trace_array_cpu *data,
884 struct ftrace_graph_ent *trace,
885 unsigned long flags,
886 int pc)
887{
888 struct ring_buffer_event *event;
889 struct ftrace_graph_ent_entry *entry;
890 unsigned long irq_flags;
891
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
893 return;
894
895 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
896 &irq_flags);
897 if (!event)
898 return;
899 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_GRAPH_ENT;
902 entry->graph_ent = *trace;
903 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
904}
905
906static void __trace_graph_return(struct trace_array *tr,
883 struct trace_array_cpu *data, 907 struct trace_array_cpu *data,
884 struct ftrace_graph_ret *trace, 908 struct ftrace_graph_ret *trace,
885 unsigned long flags, 909 unsigned long flags,
886 int pc) 910 int pc)
887{ 911{
888 struct ring_buffer_event *event; 912 struct ring_buffer_event *event;
889 struct ftrace_graph_entry *entry; 913 struct ftrace_graph_ret_entry *entry;
890 unsigned long irq_flags; 914 unsigned long irq_flags;
891 915
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 916 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -898,12 +922,8 @@ static void __trace_function_graph(struct trace_array *tr,
898 return; 922 return;
899 entry = ring_buffer_event_data(event); 923 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc); 924 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_FN_RET; 925 entry->ent.type = TRACE_GRAPH_RET;
902 entry->ip = trace->func; 926 entry->ret = *trace;
903 entry->parent_ip = trace->ret;
904 entry->rettime = trace->rettime;
905 entry->calltime = trace->calltime;
906 entry->overrun = trace->overrun;
907 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 927 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
908} 928}
909#endif 929#endif
@@ -1178,7 +1198,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1178} 1198}
1179 1199
1180#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1181void trace_function_graph(struct ftrace_graph_ret *trace) 1201void trace_graph_entry(struct ftrace_graph_ent *trace)
1182{ 1202{
1183 struct trace_array *tr = &global_trace; 1203 struct trace_array *tr = &global_trace;
1184 struct trace_array_cpu *data; 1204 struct trace_array_cpu *data;
@@ -1193,7 +1213,28 @@ void trace_function_graph(struct ftrace_graph_ret *trace)
1193 disabled = atomic_inc_return(&data->disabled); 1213 disabled = atomic_inc_return(&data->disabled);
1194 if (likely(disabled == 1)) { 1214 if (likely(disabled == 1)) {
1195 pc = preempt_count(); 1215 pc = preempt_count();
1196 __trace_function_graph(tr, data, trace, flags, pc); 1216 __trace_graph_entry(tr, data, trace, flags, pc);
1217 }
1218 atomic_dec(&data->disabled);
1219 raw_local_irq_restore(flags);
1220}
1221
1222void trace_graph_return(struct ftrace_graph_ret *trace)
1223{
1224 struct trace_array *tr = &global_trace;
1225 struct trace_array_cpu *data;
1226 unsigned long flags;
1227 long disabled;
1228 int cpu;
1229 int pc;
1230
1231 raw_local_irq_save(flags);
1232 cpu = raw_smp_processor_id();
1233 data = tr->data[cpu];
1234 disabled = atomic_inc_return(&data->disabled);
1235 if (likely(disabled == 1)) {
1236 pc = preempt_count();
1237 __trace_graph_return(tr, data, trace, flags, pc);
1197 } 1238 }
1198 atomic_dec(&data->disabled); 1239 atomic_dec(&data->disabled);
1199 raw_local_irq_restore(flags); 1240 raw_local_irq_restore(flags);
@@ -2000,9 +2041,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2000 trace_seq_print_cont(s, iter); 2041 trace_seq_print_cont(s, iter);
2001 break; 2042 break;
2002 } 2043 }
2003 case TRACE_FN_RET: { 2044 case TRACE_GRAPH_RET: {
2045 return print_graph_function(iter);
2046 }
2047 case TRACE_GRAPH_ENT: {
2004 return print_graph_function(iter); 2048 return print_graph_function(iter);
2005 break;
2006 } 2049 }
2007 case TRACE_BRANCH: { 2050 case TRACE_BRANCH: {
2008 struct trace_branch *field; 2051 struct trace_branch *field;