diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 84 |
1 files changed, 0 insertions, 84 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3c54cb125228..2585ffb6c6b5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1046,65 +1046,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1046 | local_irq_restore(flags); | 1046 | local_irq_restore(flags); |
1047 | } | 1047 | } |
1048 | 1048 | ||
1049 | #ifdef CONFIG_FUNCTION_TRACER | ||
1050 | static void | ||
1051 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | ||
1052 | { | ||
1053 | struct trace_array *tr = &global_trace; | ||
1054 | struct trace_array_cpu *data; | ||
1055 | unsigned long flags; | ||
1056 | long disabled; | ||
1057 | int cpu, resched; | ||
1058 | int pc; | ||
1059 | |||
1060 | if (unlikely(!ftrace_function_enabled)) | ||
1061 | return; | ||
1062 | |||
1063 | pc = preempt_count(); | ||
1064 | resched = ftrace_preempt_disable(); | ||
1065 | local_save_flags(flags); | ||
1066 | cpu = raw_smp_processor_id(); | ||
1067 | data = tr->data[cpu]; | ||
1068 | disabled = atomic_inc_return(&data->disabled); | ||
1069 | |||
1070 | if (likely(disabled == 1)) | ||
1071 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
1072 | |||
1073 | atomic_dec(&data->disabled); | ||
1074 | ftrace_preempt_enable(resched); | ||
1075 | } | ||
1076 | |||
1077 | static void | ||
1078 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
1079 | { | ||
1080 | struct trace_array *tr = &global_trace; | ||
1081 | struct trace_array_cpu *data; | ||
1082 | unsigned long flags; | ||
1083 | long disabled; | ||
1084 | int cpu; | ||
1085 | int pc; | ||
1086 | |||
1087 | if (unlikely(!ftrace_function_enabled)) | ||
1088 | return; | ||
1089 | |||
1090 | /* | ||
1091 | * Need to use raw, since this must be called before the | ||
1092 | * recursive protection is performed. | ||
1093 | */ | ||
1094 | local_irq_save(flags); | ||
1095 | cpu = raw_smp_processor_id(); | ||
1096 | data = tr->data[cpu]; | ||
1097 | disabled = atomic_inc_return(&data->disabled); | ||
1098 | |||
1099 | if (likely(disabled == 1)) { | ||
1100 | pc = preempt_count(); | ||
1101 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
1102 | } | ||
1103 | |||
1104 | atomic_dec(&data->disabled); | ||
1105 | local_irq_restore(flags); | ||
1106 | } | ||
1107 | |||
1108 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1049 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1109 | int trace_graph_entry(struct ftrace_graph_ent *trace) | 1050 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
1110 | { | 1051 | { |
@@ -1162,31 +1103,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
1162 | } | 1103 | } |
1163 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1104 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1164 | 1105 | ||
1165 | static struct ftrace_ops trace_ops __read_mostly = | ||
1166 | { | ||
1167 | .func = function_trace_call, | ||
1168 | }; | ||
1169 | |||
1170 | void tracing_start_function_trace(void) | ||
1171 | { | ||
1172 | ftrace_function_enabled = 0; | ||
1173 | |||
1174 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
1175 | trace_ops.func = function_trace_call_preempt_only; | ||
1176 | else | ||
1177 | trace_ops.func = function_trace_call; | ||
1178 | |||
1179 | register_ftrace_function(&trace_ops); | ||
1180 | ftrace_function_enabled = 1; | ||
1181 | } | ||
1182 | |||
1183 | void tracing_stop_function_trace(void) | ||
1184 | { | ||
1185 | ftrace_function_enabled = 0; | ||
1186 | unregister_ftrace_function(&trace_ops); | ||
1187 | } | ||
1188 | #endif | ||
1189 | |||
1190 | enum trace_file_type { | 1106 | enum trace_file_type { |
1191 | TRACE_FILE_LAT_FMT = 1, | 1107 | TRACE_FILE_LAT_FMT = 1, |
1192 | TRACE_FILE_ANNOTATE = 2, | 1108 | TRACE_FILE_ANNOTATE = 2, |