aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-07-29 12:59:58 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-08-06 01:28:06 -0400
commit1a0799a8fef5acc6503f9c5e79b2cd003317826c (patch)
tree8aec6e623981cd8505de53752234d9f6b5d94843 /kernel/trace/trace.c
parent82e04af498a85ba425efe77580b7ba08234411df (diff)
tracing/function-graph-tracer: Move graph event insertion helpers in the graph tracer file
The function graph events helpers which insert the function entry and return events into the ring buffer currently reside in trace.c But this file is quite overloaded and the right place for these helpers is in the function graph tracer file. Then move them to trace_functions_graph.c Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c110
1 files changed, 0 insertions, 110 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1b73acb40e56..0cfd1a62def1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -942,54 +942,6 @@ trace_function(struct trace_array *tr,
942 ring_buffer_unlock_commit(tr->buffer, event); 942 ring_buffer_unlock_commit(tr->buffer, event);
943} 943}
944 944
945#ifdef CONFIG_FUNCTION_GRAPH_TRACER
946static int __trace_graph_entry(struct trace_array *tr,
947 struct ftrace_graph_ent *trace,
948 unsigned long flags,
949 int pc)
950{
951 struct ftrace_event_call *call = &event_funcgraph_entry;
952 struct ring_buffer_event *event;
953 struct ftrace_graph_ent_entry *entry;
954
955 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
956 return 0;
957
958 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
959 sizeof(*entry), flags, pc);
960 if (!event)
961 return 0;
962 entry = ring_buffer_event_data(event);
963 entry->graph_ent = *trace;
964 if (!filter_current_check_discard(call, entry, event))
965 ring_buffer_unlock_commit(global_trace.buffer, event);
966
967 return 1;
968}
969
970static void __trace_graph_return(struct trace_array *tr,
971 struct ftrace_graph_ret *trace,
972 unsigned long flags,
973 int pc)
974{
975 struct ftrace_event_call *call = &event_funcgraph_exit;
976 struct ring_buffer_event *event;
977 struct ftrace_graph_ret_entry *entry;
978
979 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
980 return;
981
982 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
983 sizeof(*entry), flags, pc);
984 if (!event)
985 return;
986 entry = ring_buffer_event_data(event);
987 entry->ret = *trace;
988 if (!filter_current_check_discard(call, entry, event))
989 ring_buffer_unlock_commit(global_trace.buffer, event);
990}
991#endif
992
993void 945void
994ftrace(struct trace_array *tr, struct trace_array_cpu *data, 946ftrace(struct trace_array *tr, struct trace_array_cpu *data,
995 unsigned long ip, unsigned long parent_ip, unsigned long flags, 947 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -1129,68 +1081,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1129 local_irq_restore(flags); 1081 local_irq_restore(flags);
1130} 1082}
1131 1083
1132#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1133int trace_graph_entry(struct ftrace_graph_ent *trace)
1134{
1135 struct trace_array *tr = &global_trace;
1136 struct trace_array_cpu *data;
1137 unsigned long flags;
1138 long disabled;
1139 int ret;
1140 int cpu;
1141 int pc;
1142
1143 if (!ftrace_trace_task(current))
1144 return 0;
1145
1146 if (!ftrace_graph_addr(trace->func))
1147 return 0;
1148
1149 local_irq_save(flags);
1150 cpu = raw_smp_processor_id();
1151 data = tr->data[cpu];
1152 disabled = atomic_inc_return(&data->disabled);
1153 if (likely(disabled == 1)) {
1154 pc = preempt_count();
1155 ret = __trace_graph_entry(tr, trace, flags, pc);
1156 } else {
1157 ret = 0;
1158 }
1159 /* Only do the atomic if it is not already set */
1160 if (!test_tsk_trace_graph(current))
1161 set_tsk_trace_graph(current);
1162
1163 atomic_dec(&data->disabled);
1164 local_irq_restore(flags);
1165
1166 return ret;
1167}
1168
1169void trace_graph_return(struct ftrace_graph_ret *trace)
1170{
1171 struct trace_array *tr = &global_trace;
1172 struct trace_array_cpu *data;
1173 unsigned long flags;
1174 long disabled;
1175 int cpu;
1176 int pc;
1177
1178 local_irq_save(flags);
1179 cpu = raw_smp_processor_id();
1180 data = tr->data[cpu];
1181 disabled = atomic_inc_return(&data->disabled);
1182 if (likely(disabled == 1)) {
1183 pc = preempt_count();
1184 __trace_graph_return(tr, trace, flags, pc);
1185 }
1186 if (!trace->depth)
1187 clear_tsk_trace_graph(current);
1188 atomic_dec(&data->disabled);
1189 local_irq_restore(flags);
1190}
1191#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1192
1193
1194/** 1084/**
1195 * trace_vbprintk - write binary msg to tracing buffer 1085 * trace_vbprintk - write binary msg to tracing buffer
1196 * 1086 *