aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-11-03 23:15:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-04 04:09:50 -0500
commitb2a866f9344cb30d7ddf5d67b5b8393daf8bef07 (patch)
tree5265b5bcc3e6daca52f2951a85394bbc58f789fd /kernel
parent182e9f5f704ed6b9175142fe8da33c9ce0c52b52 (diff)
ftrace: function tracer with irqs disabled
Impact: disable interrupts during trace entry creation (as opposed to preempt) To help with performance, I set the ftracer to not disable interrupts, and only to disable preemption. If an interrupt occurred, it would not be traced, because the function tracer protects itself from recursion. This may be faster, but the trace output might miss some traces. This patch makes the fuction trace disable interrupts, but it also adds a runtime feature to disable preemption instead. It does this by having two different tracer functions. When the function tracer is enabled, it will check to see which version is requested (irqs disabled or preemption disabled). Then it will use the corresponding function as the tracer. Irq disabling is the default behaviour, but if the user wants better performance, with the chance of missing traces, then they can choose the preempt disabled version. Running hackbench 3 times with the irqs disabled and 3 times with the preempt disabled function tracer yielded: tracing type times entries recorded ------------ -------- ---------------- irq disabled 43.393 166433066 43.282 166172618 43.298 166256704 preempt disabled 38.969 159871710 38.943 159972935 39.325 161056510 Average: irqs disabled: 43.324 166287462 preempt disabled: 39.079 160300385 preempt is 10.8 percent faster than irqs disabled. I wrote a patch to count function trace recursion and reran hackbench. With irq disabled: 1,150 times the function tracer did not trace due to recursion. with preempt disabled: 5,117,718 times. The thousand times with irq disabled could be due to NMIs, or simply a case where it called a function that was not protected by notrace. But we also see that a large amount of the trace is lost with the preempt version. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c40
-rw-r--r--kernel/trace/trace.h1
2 files changed, 40 insertions, 1 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3e7bf5eb9007..d576dbd6defe 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -244,6 +244,7 @@ static const char *trace_options[] = {
244 "stacktrace", 244 "stacktrace",
245 "sched-tree", 245 "sched-tree",
246 "ftrace_printk", 246 "ftrace_printk",
247 "ftrace_preempt",
247 NULL 248 NULL
248}; 249};
249 250
@@ -891,7 +892,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
891 892
892#ifdef CONFIG_FUNCTION_TRACER 893#ifdef CONFIG_FUNCTION_TRACER
893static void 894static void
894function_trace_call(unsigned long ip, unsigned long parent_ip) 895function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
895{ 896{
896 struct trace_array *tr = &global_trace; 897 struct trace_array *tr = &global_trace;
897 struct trace_array_cpu *data; 898 struct trace_array_cpu *data;
@@ -917,6 +918,37 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
917 ftrace_preempt_enable(resched); 918 ftrace_preempt_enable(resched);
918} 919}
919 920
921static void
922function_trace_call(unsigned long ip, unsigned long parent_ip)
923{
924 struct trace_array *tr = &global_trace;
925 struct trace_array_cpu *data;
926 unsigned long flags;
927 long disabled;
928 int cpu;
929 int pc;
930
931 if (unlikely(!ftrace_function_enabled))
932 return;
933
934 /*
935 * Need to use raw, since this must be called before the
936 * recursive protection is performed.
937 */
938 raw_local_irq_save(flags);
939 cpu = raw_smp_processor_id();
940 data = tr->data[cpu];
941 disabled = atomic_inc_return(&data->disabled);
942
943 if (likely(disabled == 1)) {
944 pc = preempt_count();
945 trace_function(tr, data, ip, parent_ip, flags, pc);
946 }
947
948 atomic_dec(&data->disabled);
949 raw_local_irq_restore(flags);
950}
951
920static struct ftrace_ops trace_ops __read_mostly = 952static struct ftrace_ops trace_ops __read_mostly =
921{ 953{
922 .func = function_trace_call, 954 .func = function_trace_call,
@@ -925,6 +957,12 @@ static struct ftrace_ops trace_ops __read_mostly =
925void tracing_start_function_trace(void) 957void tracing_start_function_trace(void)
926{ 958{
927 ftrace_function_enabled = 0; 959 ftrace_function_enabled = 0;
960
961 if (trace_flags & TRACE_ITER_PREEMPTONLY)
962 trace_ops.func = function_trace_call_preempt_only;
963 else
964 trace_ops.func = function_trace_call;
965
928 register_ftrace_function(&trace_ops); 966 register_ftrace_function(&trace_ops);
929 if (tracer_enabled) 967 if (tracer_enabled)
930 ftrace_function_enabled = 1; 968 ftrace_function_enabled = 1;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 10c6dae76894..bb547e933af7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -415,6 +415,7 @@ enum trace_iterator_flags {
415 TRACE_ITER_STACKTRACE = 0x100, 415 TRACE_ITER_STACKTRACE = 0x100,
416 TRACE_ITER_SCHED_TREE = 0x200, 416 TRACE_ITER_SCHED_TREE = 0x200,
417 TRACE_ITER_PRINTK = 0x400, 417 TRACE_ITER_PRINTK = 0x400,
418 TRACE_ITER_PREEMPTONLY = 0x800,
418}; 419};
419 420
420extern struct tracer nop_trace; 421extern struct tracer nop_trace;