diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2008-05-12 15:20:42 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:31:58 -0400 |
commit | 16444a8a40d4c7b4f6de34af0cae1f76a4f6c901 (patch) | |
tree | 9c290bcdbdc1ecf8f578c30b3b36914e14fdaacc /kernel | |
parent | 6e766410c4babd37bc7cd5e25009c179781742c8 (diff) |
ftrace: add basic support for gcc profiler instrumentation
If CONFIG_FTRACE is selected and /proc/sys/kernel/ftrace_enabled is
set to a non-zero value the ftrace routine will be called everytime
we enter a kernel function that is not marked with the "notrace"
attribute.
The ftrace routine will then call a registered function if a function
happens to be registered.
[ This code has been highly hacked by Steven Rostedt and Ingo Molnar,
so don't blame Arnaldo for all of this ;-) ]
Update:
It is now possible to register more than one ftrace function.
If only one ftrace function is registered, that will be the
function that ftrace calls directly. If more than one function
is registered, then ftrace will call a function that will loop
through the functions to call.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 5 | ||||
-rw-r--r-- | kernel/trace/Makefile | 3 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 138 |
4 files changed, 147 insertions, 0 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 1c9938addb9d..fa05f6d8bdbf 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -69,6 +69,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | |||
69 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o | 69 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o |
70 | obj-$(CONFIG_MARKERS) += marker.o | 70 | obj-$(CONFIG_MARKERS) += marker.o |
71 | obj-$(CONFIG_LATENCYTOP) += latencytop.o | 71 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
72 | obj-$(CONFIG_FTRACE) += trace/ | ||
72 | 73 | ||
73 | ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) | 74 | ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) |
74 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 75 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig new file mode 100644 index 000000000000..8185c91417bc --- /dev/null +++ b/kernel/trace/Kconfig | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Architectures that offer an FTRACE implementation should select HAVE_FTRACE: | ||
3 | # | ||
4 | config HAVE_FTRACE | ||
5 | bool | ||
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile new file mode 100644 index 000000000000..bf4fd215a6a9 --- /dev/null +++ b/kernel/trace/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_FTRACE) += libftrace.o | ||
2 | |||
3 | libftrace-y := ftrace.o | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c new file mode 100644 index 000000000000..b6a80b98a3fb --- /dev/null +++ b/kernel/trace/ftrace.c | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * Infrastructure for profiling code inserted by 'gcc -pg'. | ||
3 | * | ||
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> | ||
6 | * | ||
7 | * Originally ported from the -rt patch by: | ||
8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | ||
9 | * | ||
10 | * Based on code in the latency_tracer, that is: | ||
11 | * | ||
12 | * Copyright (C) 2004-2006 Ingo Molnar | ||
13 | * Copyright (C) 2004 William Lee Irwin III | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/ftrace.h> | ||
18 | |||
19 | static DEFINE_SPINLOCK(ftrace_func_lock); | ||
20 | static struct ftrace_ops ftrace_list_end __read_mostly = | ||
21 | { | ||
22 | .func = ftrace_stub, | ||
23 | }; | ||
24 | |||
25 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | ||
26 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | ||
27 | |||
28 | /* mcount is defined per arch in assembly */ | ||
29 | EXPORT_SYMBOL(mcount); | ||
30 | |||
31 | notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | ||
32 | { | ||
33 | struct ftrace_ops *op = ftrace_list; | ||
34 | |||
35 | /* in case someone actually ports this to alpha! */ | ||
36 | read_barrier_depends(); | ||
37 | |||
38 | while (op != &ftrace_list_end) { | ||
39 | /* silly alpha */ | ||
40 | read_barrier_depends(); | ||
41 | op->func(ip, parent_ip); | ||
42 | op = op->next; | ||
43 | }; | ||
44 | } | ||
45 | |||
46 | /** | ||
47 | * register_ftrace_function - register a function for profiling | ||
48 | * @ops - ops structure that holds the function for profiling. | ||
49 | * | ||
50 | * Register a function to be called by all functions in the | ||
51 | * kernel. | ||
52 | * | ||
53 | * Note: @ops->func and all the functions it calls must be labeled | ||
54 | * with "notrace", otherwise it will go into a | ||
55 | * recursive loop. | ||
56 | */ | ||
57 | int register_ftrace_function(struct ftrace_ops *ops) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | |||
61 | spin_lock_irqsave(&ftrace_func_lock, flags); | ||
62 | ops->next = ftrace_list; | ||
63 | /* | ||
64 | * We are entering ops into the ftrace_list but another | ||
65 | * CPU might be walking that list. We need to make sure | ||
66 | * the ops->next pointer is valid before another CPU sees | ||
67 | * the ops pointer included into the ftrace_list. | ||
68 | */ | ||
69 | smp_wmb(); | ||
70 | ftrace_list = ops; | ||
71 | /* | ||
72 | * For one func, simply call it directly. | ||
73 | * For more than one func, call the chain. | ||
74 | */ | ||
75 | if (ops->next == &ftrace_list_end) | ||
76 | ftrace_trace_function = ops->func; | ||
77 | else | ||
78 | ftrace_trace_function = ftrace_list_func; | ||
79 | spin_unlock_irqrestore(&ftrace_func_lock, flags); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * unregister_ftrace_function - unresgister a function for profiling. | ||
86 | * @ops - ops structure that holds the function to unregister | ||
87 | * | ||
88 | * Unregister a function that was added to be called by ftrace profiling. | ||
89 | */ | ||
90 | int unregister_ftrace_function(struct ftrace_ops *ops) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | struct ftrace_ops **p; | ||
94 | int ret = 0; | ||
95 | |||
96 | spin_lock_irqsave(&ftrace_func_lock, flags); | ||
97 | |||
98 | /* | ||
99 | * If we are the only function, then the ftrace pointer is | ||
100 | * pointing directly to that function. | ||
101 | */ | ||
102 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | ||
103 | ftrace_trace_function = ftrace_stub; | ||
104 | ftrace_list = &ftrace_list_end; | ||
105 | goto out; | ||
106 | } | ||
107 | |||
108 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | ||
109 | if (*p == ops) | ||
110 | break; | ||
111 | |||
112 | if (*p != ops) { | ||
113 | ret = -1; | ||
114 | goto out; | ||
115 | } | ||
116 | |||
117 | *p = (*p)->next; | ||
118 | |||
119 | /* If we only have one func left, then call that directly */ | ||
120 | if (ftrace_list->next == &ftrace_list_end) | ||
121 | ftrace_trace_function = ftrace_list->func; | ||
122 | |||
123 | out: | ||
124 | spin_unlock_irqrestore(&ftrace_func_lock, flags); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * clear_ftrace_function - reset the ftrace function | ||
131 | * | ||
132 | * This NULLs the ftrace function and in essence stops | ||
133 | * tracing. There may be lag | ||
134 | */ | ||
135 | void clear_ftrace_function(void) | ||
136 | { | ||
137 | ftrace_trace_function = ftrace_stub; | ||
138 | } | ||