diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 41 |
1 files changed, 27 insertions, 14 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 53f6b6401cf0..02004ae91860 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -113,7 +113,7 @@ static int ftrace_disabled __read_mostly; | |||
113 | 113 | ||
114 | static DEFINE_MUTEX(ftrace_lock); | 114 | static DEFINE_MUTEX(ftrace_lock); |
115 | 115 | ||
116 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 116 | static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
117 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 117 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
118 | static struct ftrace_ops global_ops; | 118 | static struct ftrace_ops global_ops; |
119 | 119 | ||
@@ -169,8 +169,11 @@ int ftrace_nr_registered_ops(void) | |||
169 | 169 | ||
170 | mutex_lock(&ftrace_lock); | 170 | mutex_lock(&ftrace_lock); |
171 | 171 | ||
172 | for (ops = ftrace_ops_list; | 172 | for (ops = rcu_dereference_protected(ftrace_ops_list, |
173 | ops != &ftrace_list_end; ops = ops->next) | 173 | lockdep_is_held(&ftrace_lock)); |
174 | ops != &ftrace_list_end; | ||
175 | ops = rcu_dereference_protected(ops->next, | ||
176 | lockdep_is_held(&ftrace_lock))) | ||
174 | cnt++; | 177 | cnt++; |
175 | 178 | ||
176 | mutex_unlock(&ftrace_lock); | 179 | mutex_unlock(&ftrace_lock); |
@@ -275,10 +278,11 @@ static void update_ftrace_function(void) | |||
275 | * If there's only one ftrace_ops registered, the ftrace_ops_list | 278 | * If there's only one ftrace_ops registered, the ftrace_ops_list |
276 | * will point to the ops we want. | 279 | * will point to the ops we want. |
277 | */ | 280 | */ |
278 | set_function_trace_op = ftrace_ops_list; | 281 | set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, |
282 | lockdep_is_held(&ftrace_lock)); | ||
279 | 283 | ||
280 | /* If there's no ftrace_ops registered, just call the stub function */ | 284 | /* If there's no ftrace_ops registered, just call the stub function */ |
281 | if (ftrace_ops_list == &ftrace_list_end) { | 285 | if (set_function_trace_op == &ftrace_list_end) { |
282 | func = ftrace_stub; | 286 | func = ftrace_stub; |
283 | 287 | ||
284 | /* | 288 | /* |
@@ -286,7 +290,8 @@ static void update_ftrace_function(void) | |||
286 | * recursion safe and not dynamic and the arch supports passing ops, | 290 | * recursion safe and not dynamic and the arch supports passing ops, |
287 | * then have the mcount trampoline call the function directly. | 291 | * then have the mcount trampoline call the function directly. |
288 | */ | 292 | */ |
289 | } else if (ftrace_ops_list->next == &ftrace_list_end) { | 293 | } else if (rcu_dereference_protected(ftrace_ops_list->next, |
294 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | ||
290 | func = ftrace_ops_get_list_func(ftrace_ops_list); | 295 | func = ftrace_ops_get_list_func(ftrace_ops_list); |
291 | 296 | ||
292 | } else { | 297 | } else { |
@@ -348,9 +353,11 @@ int using_ftrace_ops_list_func(void) | |||
348 | return ftrace_trace_function == ftrace_ops_list_func; | 353 | return ftrace_trace_function == ftrace_ops_list_func; |
349 | } | 354 | } |
350 | 355 | ||
351 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 356 | static void add_ftrace_ops(struct ftrace_ops __rcu **list, |
357 | struct ftrace_ops *ops) | ||
352 | { | 358 | { |
353 | ops->next = *list; | 359 | rcu_assign_pointer(ops->next, *list); |
360 | |||
354 | /* | 361 | /* |
355 | * We are entering ops into the list but another | 362 | * We are entering ops into the list but another |
356 | * CPU might be walking that list. We need to make sure | 363 | * CPU might be walking that list. We need to make sure |
@@ -360,7 +367,8 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | |||
360 | rcu_assign_pointer(*list, ops); | 367 | rcu_assign_pointer(*list, ops); |
361 | } | 368 | } |
362 | 369 | ||
363 | static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 370 | static int remove_ftrace_ops(struct ftrace_ops __rcu **list, |
371 | struct ftrace_ops *ops) | ||
364 | { | 372 | { |
365 | struct ftrace_ops **p; | 373 | struct ftrace_ops **p; |
366 | 374 | ||
@@ -368,7 +376,10 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | |||
368 | * If we are removing the last function, then simply point | 376 | * If we are removing the last function, then simply point |
369 | * to the ftrace_stub. | 377 | * to the ftrace_stub. |
370 | */ | 378 | */ |
371 | if (*list == ops && ops->next == &ftrace_list_end) { | 379 | if (rcu_dereference_protected(*list, |
380 | lockdep_is_held(&ftrace_lock)) == ops && | ||
381 | rcu_dereference_protected(ops->next, | ||
382 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | ||
372 | *list = &ftrace_list_end; | 383 | *list = &ftrace_list_end; |
373 | return 0; | 384 | return 0; |
374 | } | 385 | } |
@@ -1569,8 +1580,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
1569 | return 0; | 1580 | return 0; |
1570 | #endif | 1581 | #endif |
1571 | 1582 | ||
1572 | hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); | 1583 | rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); |
1573 | hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); | 1584 | rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); |
1574 | 1585 | ||
1575 | if (hash_contains_ip(ip, &hash)) | 1586 | if (hash_contains_ip(ip, &hash)) |
1576 | ret = 1; | 1587 | ret = 1; |
@@ -2840,7 +2851,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2840 | * If there's no more ops registered with ftrace, run a | 2851 | * If there's no more ops registered with ftrace, run a |
2841 | * sanity check to make sure all rec flags are cleared. | 2852 | * sanity check to make sure all rec flags are cleared. |
2842 | */ | 2853 | */ |
2843 | if (ftrace_ops_list == &ftrace_list_end) { | 2854 | if (rcu_dereference_protected(ftrace_ops_list, |
2855 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | ||
2844 | struct ftrace_page *pg; | 2856 | struct ftrace_page *pg; |
2845 | struct dyn_ftrace *rec; | 2857 | struct dyn_ftrace *rec; |
2846 | 2858 | ||
@@ -6453,7 +6465,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
6453 | if (ftrace_enabled) { | 6465 | if (ftrace_enabled) { |
6454 | 6466 | ||
6455 | /* we are starting ftrace again */ | 6467 | /* we are starting ftrace again */ |
6456 | if (ftrace_ops_list != &ftrace_list_end) | 6468 | if (rcu_dereference_protected(ftrace_ops_list, |
6469 | lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) | ||
6457 | update_ftrace_function(); | 6470 | update_ftrace_function(); |
6458 | 6471 | ||
6459 | ftrace_startup_sysctl(); | 6472 | ftrace_startup_sysctl(); |