aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c40
-rw-r--r--kernel/trace/trace.h1
2 files changed, 40 insertions, 1 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3e7bf5eb9007..d576dbd6defe 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -244,6 +244,7 @@ static const char *trace_options[] = {
244 "stacktrace", 244 "stacktrace",
245 "sched-tree", 245 "sched-tree",
246 "ftrace_printk", 246 "ftrace_printk",
247 "ftrace_preempt",
247 NULL 248 NULL
248}; 249};
249 250
@@ -891,7 +892,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
891 892
892#ifdef CONFIG_FUNCTION_TRACER 893#ifdef CONFIG_FUNCTION_TRACER
893static void 894static void
894function_trace_call(unsigned long ip, unsigned long parent_ip) 895function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
895{ 896{
896 struct trace_array *tr = &global_trace; 897 struct trace_array *tr = &global_trace;
897 struct trace_array_cpu *data; 898 struct trace_array_cpu *data;
@@ -917,6 +918,37 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
917 ftrace_preempt_enable(resched); 918 ftrace_preempt_enable(resched);
918} 919}
919 920
921static void
922function_trace_call(unsigned long ip, unsigned long parent_ip)
923{
924 struct trace_array *tr = &global_trace;
925 struct trace_array_cpu *data;
926 unsigned long flags;
927 long disabled;
928 int cpu;
929 int pc;
930
931 if (unlikely(!ftrace_function_enabled))
932 return;
933
934 /*
935 * Need to use raw, since this must be called before the
936 * recursive protection is performed.
937 */
938 raw_local_irq_save(flags);
939 cpu = raw_smp_processor_id();
940 data = tr->data[cpu];
941 disabled = atomic_inc_return(&data->disabled);
942
943 if (likely(disabled == 1)) {
944 pc = preempt_count();
945 trace_function(tr, data, ip, parent_ip, flags, pc);
946 }
947
948 atomic_dec(&data->disabled);
949 raw_local_irq_restore(flags);
950}
951
920static struct ftrace_ops trace_ops __read_mostly = 952static struct ftrace_ops trace_ops __read_mostly =
921{ 953{
922 .func = function_trace_call, 954 .func = function_trace_call,
@@ -925,6 +957,12 @@ static struct ftrace_ops trace_ops __read_mostly =
925void tracing_start_function_trace(void) 957void tracing_start_function_trace(void)
926{ 958{
927 ftrace_function_enabled = 0; 959 ftrace_function_enabled = 0;
960
961 if (trace_flags & TRACE_ITER_PREEMPTONLY)
962 trace_ops.func = function_trace_call_preempt_only;
963 else
964 trace_ops.func = function_trace_call;
965
928 register_ftrace_function(&trace_ops); 966 register_ftrace_function(&trace_ops);
929 if (tracer_enabled) 967 if (tracer_enabled)
930 ftrace_function_enabled = 1; 968 ftrace_function_enabled = 1;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 10c6dae76894..bb547e933af7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -415,6 +415,7 @@ enum trace_iterator_flags {
415 TRACE_ITER_STACKTRACE = 0x100, 415 TRACE_ITER_STACKTRACE = 0x100,
416 TRACE_ITER_SCHED_TREE = 0x200, 416 TRACE_ITER_SCHED_TREE = 0x200,
417 TRACE_ITER_PRINTK = 0x400, 417 TRACE_ITER_PRINTK = 0x400,
418 TRACE_ITER_PREEMPTONLY = 0x800,
418}; 419};
419 420
420extern struct tracer nop_trace; 421extern struct tracer nop_trace;