diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 138 |
1 files changed, 138 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c new file mode 100644 index 000000000000..b6a80b98a3fb --- /dev/null +++ b/kernel/trace/ftrace.c | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * Infrastructure for profiling code inserted by 'gcc -pg'. | ||
3 | * | ||
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> | ||
6 | * | ||
7 | * Originally ported from the -rt patch by: | ||
8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | ||
9 | * | ||
10 | * Based on code in the latency_tracer, that is: | ||
11 | * | ||
12 | * Copyright (C) 2004-2006 Ingo Molnar | ||
13 | * Copyright (C) 2004 William Lee Irwin III | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/ftrace.h> | ||
18 | |||
19 | static DEFINE_SPINLOCK(ftrace_func_lock); | ||
20 | static struct ftrace_ops ftrace_list_end __read_mostly = | ||
21 | { | ||
22 | .func = ftrace_stub, | ||
23 | }; | ||
24 | |||
25 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | ||
26 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | ||
27 | |||
28 | /* mcount is defined per arch in assembly */ | ||
29 | EXPORT_SYMBOL(mcount); | ||
30 | |||
31 | notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | ||
32 | { | ||
33 | struct ftrace_ops *op = ftrace_list; | ||
34 | |||
35 | /* in case someone actually ports this to alpha! */ | ||
36 | read_barrier_depends(); | ||
37 | |||
38 | while (op != &ftrace_list_end) { | ||
39 | /* silly alpha */ | ||
40 | read_barrier_depends(); | ||
41 | op->func(ip, parent_ip); | ||
42 | op = op->next; | ||
43 | }; | ||
44 | } | ||
45 | |||
46 | /** | ||
47 | * register_ftrace_function - register a function for profiling | ||
48 | * @ops - ops structure that holds the function for profiling. | ||
49 | * | ||
50 | * Register a function to be called by all functions in the | ||
51 | * kernel. | ||
52 | * | ||
53 | * Note: @ops->func and all the functions it calls must be labeled | ||
54 | * with "notrace", otherwise it will go into a | ||
55 | * recursive loop. | ||
56 | */ | ||
57 | int register_ftrace_function(struct ftrace_ops *ops) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | |||
61 | spin_lock_irqsave(&ftrace_func_lock, flags); | ||
62 | ops->next = ftrace_list; | ||
63 | /* | ||
64 | * We are entering ops into the ftrace_list but another | ||
65 | * CPU might be walking that list. We need to make sure | ||
66 | * the ops->next pointer is valid before another CPU sees | ||
67 | * the ops pointer included into the ftrace_list. | ||
68 | */ | ||
69 | smp_wmb(); | ||
70 | ftrace_list = ops; | ||
71 | /* | ||
72 | * For one func, simply call it directly. | ||
73 | * For more than one func, call the chain. | ||
74 | */ | ||
75 | if (ops->next == &ftrace_list_end) | ||
76 | ftrace_trace_function = ops->func; | ||
77 | else | ||
78 | ftrace_trace_function = ftrace_list_func; | ||
79 | spin_unlock_irqrestore(&ftrace_func_lock, flags); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * unregister_ftrace_function - unresgister a function for profiling. | ||
86 | * @ops - ops structure that holds the function to unregister | ||
87 | * | ||
88 | * Unregister a function that was added to be called by ftrace profiling. | ||
89 | */ | ||
90 | int unregister_ftrace_function(struct ftrace_ops *ops) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | struct ftrace_ops **p; | ||
94 | int ret = 0; | ||
95 | |||
96 | spin_lock_irqsave(&ftrace_func_lock, flags); | ||
97 | |||
98 | /* | ||
99 | * If we are the only function, then the ftrace pointer is | ||
100 | * pointing directly to that function. | ||
101 | */ | ||
102 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | ||
103 | ftrace_trace_function = ftrace_stub; | ||
104 | ftrace_list = &ftrace_list_end; | ||
105 | goto out; | ||
106 | } | ||
107 | |||
108 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | ||
109 | if (*p == ops) | ||
110 | break; | ||
111 | |||
112 | if (*p != ops) { | ||
113 | ret = -1; | ||
114 | goto out; | ||
115 | } | ||
116 | |||
117 | *p = (*p)->next; | ||
118 | |||
119 | /* If we only have one func left, then call that directly */ | ||
120 | if (ftrace_list->next == &ftrace_list_end) | ||
121 | ftrace_trace_function = ftrace_list->func; | ||
122 | |||
123 | out: | ||
124 | spin_unlock_irqrestore(&ftrace_func_lock, flags); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * clear_ftrace_function - reset the ftrace function | ||
131 | * | ||
132 | * This NULLs the ftrace function and in essence stops | ||
133 | * tracing. There may be lag | ||
134 | */ | ||
135 | void clear_ftrace_function(void) | ||
136 | { | ||
137 | ftrace_trace_function = ftrace_stub; | ||
138 | } | ||