aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-05-03 22:49:52 -0400
committerSteven Rostedt <rostedt@goodmis.org>2011-05-18 15:29:49 -0400
commit2b499381bc50ede01b3d8eab164ca2fad00655f0 (patch)
tree3140c277582b03b1645fffcb829763d62e2f01fa /kernel/trace
parentbd69c30b1d08032d97ab0dabd7a1eb7fb73ca2b2 (diff)
ftrace: Have global_ops store the functions that are to be traced
This is a step towards each ops structure defining its own set of functions to trace. As the current code with pid's and such are specific to the global_ops, it is restructured to be used with the global ops. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c69
1 files changed, 53 insertions, 16 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8fef1d99bbbf..dcce0bf9c84d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -91,6 +91,7 @@ static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
91ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 91ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
92ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 92ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
93ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 93ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
94static struct ftrace_ops global_ops;
94 95
95/* 96/*
96 * Traverse the ftrace_list, invoking all entries. The reason that we 97 * Traverse the ftrace_list, invoking all entries. The reason that we
@@ -153,7 +154,7 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
153} 154}
154#endif 155#endif
155 156
156static void update_ftrace_function(void) 157static void update_global_ops(void)
157{ 158{
158 ftrace_func_t func; 159 ftrace_func_t func;
159 160
@@ -173,6 +174,18 @@ static void update_ftrace_function(void)
173 set_ftrace_pid_function(func); 174 set_ftrace_pid_function(func);
174 func = ftrace_pid_func; 175 func = ftrace_pid_func;
175 } 176 }
177
178 global_ops.func = func;
179}
180
181static void update_ftrace_function(void)
182{
183 ftrace_func_t func;
184
185 update_global_ops();
186
187 func = global_ops.func;
188
176#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 189#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
177 ftrace_trace_function = func; 190 ftrace_trace_function = func;
178#else 191#else
@@ -181,24 +194,19 @@ static void update_ftrace_function(void)
181#endif 194#endif
182} 195}
183 196
184static int __register_ftrace_function(struct ftrace_ops *ops) 197static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
185{ 198{
186 ops->next = ftrace_list; 199 ops->next = *list;
187 /* 200 /*
188 * We are entering ops into the ftrace_list but another 201 * We are entering ops into the ftrace_list but another
189 * CPU might be walking that list. We need to make sure 202 * CPU might be walking that list. We need to make sure
190 * the ops->next pointer is valid before another CPU sees 203 * the ops->next pointer is valid before another CPU sees
191 * the ops pointer included into the ftrace_list. 204 * the ops pointer included into the ftrace_list.
192 */ 205 */
193 rcu_assign_pointer(ftrace_list, ops); 206 rcu_assign_pointer(*list, ops);
194
195 if (ftrace_enabled)
196 update_ftrace_function();
197
198 return 0;
199} 207}
200 208
201static int __unregister_ftrace_function(struct ftrace_ops *ops) 209static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
202{ 210{
203 struct ftrace_ops **p; 211 struct ftrace_ops **p;
204 212
@@ -206,13 +214,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
206 * If we are removing the last function, then simply point 214 * If we are removing the last function, then simply point
207 * to the ftrace_stub. 215 * to the ftrace_stub.
208 */ 216 */
209 if (ftrace_list == ops && ops->next == &ftrace_list_end) { 217 if (*list == ops && ops->next == &ftrace_list_end) {
210 ftrace_trace_function = ftrace_stub; 218 *list = &ftrace_list_end;
211 ftrace_list = &ftrace_list_end;
212 return 0; 219 return 0;
213 } 220 }
214 221
215 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) 222 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
216 if (*p == ops) 223 if (*p == ops)
217 break; 224 break;
218 225
@@ -220,7 +227,37 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
220 return -1; 227 return -1;
221 228
222 *p = (*p)->next; 229 *p = (*p)->next;
230 return 0;
231}
232
233static int __register_ftrace_function(struct ftrace_ops *ops)
234{
235 if (ftrace_disabled)
236 return -ENODEV;
237
238 if (FTRACE_WARN_ON(ops == &global_ops))
239 return -EINVAL;
240
241 add_ftrace_ops(&ftrace_list, ops);
242 if (ftrace_enabled)
243 update_ftrace_function();
244
245 return 0;
246}
223 247
248static int __unregister_ftrace_function(struct ftrace_ops *ops)
249{
250 int ret;
251
252 if (ftrace_disabled)
253 return -ENODEV;
254
255 if (FTRACE_WARN_ON(ops == &global_ops))
256 return -EINVAL;
257
258 ret = remove_ftrace_ops(&ftrace_list, ops);
259 if (ret < 0)
260 return ret;
224 if (ftrace_enabled) 261 if (ftrace_enabled)
225 update_ftrace_function(); 262 update_ftrace_function();
226 263
@@ -894,7 +931,7 @@ enum {
894 FTRACE_OPS_FL_ENABLED = 1, 931 FTRACE_OPS_FL_ENABLED = 1,
895}; 932};
896 933
897struct ftrace_ops global_ops = { 934static struct ftrace_ops global_ops = {
898 .func = ftrace_stub, 935 .func = ftrace_stub,
899 .notrace_hash = EMPTY_HASH, 936 .notrace_hash = EMPTY_HASH,
900 .filter_hash = EMPTY_HASH, 937 .filter_hash = EMPTY_HASH,
@@ -3263,7 +3300,7 @@ void __init ftrace_init(void)
3263 3300
3264#else 3301#else
3265 3302
3266struct ftrace_ops global_ops = { 3303static struct ftrace_ops global_ops = {
3267 .func = ftrace_stub, 3304 .func = ftrace_stub,
3268}; 3305};
3269 3306