aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2013-03-13 12:42:58 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-03-15 00:36:03 -0400
commit7818b3886545f89549185e4023743e2df91d1fa1 (patch)
tree1037f3bb76e18594a9267d27afc27bbdf02b8e65 /kernel/trace
parente67efb93f0e9130174293ffaa5975f87b301b531 (diff)
ftrace: Use manual free after synchronize_sched() not call_rcu_sched()
The entries to the probe hash must be freed after a synchronize_sched() after the entry has been removed from the hash. As the entries are registered with ops that may have their own callbacks, and these callbacks may sleep, we can not use call_rcu_sched() because the rcu callbacks registered with that are called from a softirq context. Instead of using call_rcu_sched(), manually save the entries on a free_list and at the end of the loop that removes the entries, do a synchronize_sched() and then go through the free_list, freeing the entries. Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ff0ef41c6d93..25770824598f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1068,7 +1068,7 @@ struct ftrace_func_probe {
1068 unsigned long flags; 1068 unsigned long flags;
1069 unsigned long ip; 1069 unsigned long ip;
1070 void *data; 1070 void *data;
1071 struct rcu_head rcu; 1071 struct list_head free_list;
1072}; 1072};
1073 1073
1074struct ftrace_func_entry { 1074struct ftrace_func_entry {
@@ -2978,11 +2978,8 @@ static void __disable_ftrace_function_probe(void)
2978} 2978}
2979 2979
2980 2980
2981static void ftrace_free_entry_rcu(struct rcu_head *rhp) 2981static void ftrace_free_entry(struct ftrace_func_probe *entry)
2982{ 2982{
2983 struct ftrace_func_probe *entry =
2984 container_of(rhp, struct ftrace_func_probe, rcu);
2985
2986 if (entry->ops->free) 2983 if (entry->ops->free)
2987 entry->ops->free(entry->ops, entry->ip, &entry->data); 2984 entry->ops->free(entry->ops, entry->ip, &entry->data);
2988 kfree(entry); 2985 kfree(entry);
@@ -3092,7 +3089,9 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3092{ 3089{
3093 struct ftrace_func_entry *rec_entry; 3090 struct ftrace_func_entry *rec_entry;
3094 struct ftrace_func_probe *entry; 3091 struct ftrace_func_probe *entry;
3092 struct ftrace_func_probe *p;
3095 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3093 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3094 struct list_head free_list;
3096 struct ftrace_hash *hash; 3095 struct ftrace_hash *hash;
3097 struct hlist_node *n, *tmp; 3096 struct hlist_node *n, *tmp;
3098 char str[KSYM_SYMBOL_LEN]; 3097 char str[KSYM_SYMBOL_LEN];
@@ -3120,6 +3119,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3120 /* Hmm, should report this somehow */ 3119 /* Hmm, should report this somehow */
3121 goto out_unlock; 3120 goto out_unlock;
3122 3121
3122 INIT_LIST_HEAD(&free_list);
3123
3123 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3124 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3124 struct hlist_head *hhd = &ftrace_func_hash[i]; 3125 struct hlist_head *hhd = &ftrace_func_hash[i];
3125 3126
@@ -3146,7 +3147,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3146 free_hash_entry(hash, rec_entry); 3147 free_hash_entry(hash, rec_entry);
3147 3148
3148 hlist_del_rcu(&entry->node); 3149 hlist_del_rcu(&entry->node);
3149 call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); 3150 list_add(&entry->free_list, &free_list);
3150 } 3151 }
3151 } 3152 }
3152 __disable_ftrace_function_probe(); 3153 __disable_ftrace_function_probe();
@@ -3155,6 +3156,12 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3155 * probe is removed, a null hash means *all enabled*. 3156 * probe is removed, a null hash means *all enabled*.
3156 */ 3157 */
3157 ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3158 ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3159 synchronize_sched();
3160 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3161 list_del(&entry->free_list);
3162 ftrace_free_entry(entry);
3163 }
3164
3158 out_unlock: 3165 out_unlock:
3159 mutex_unlock(&ftrace_lock); 3166 mutex_unlock(&ftrace_lock);
3160 free_ftrace_hash(hash); 3167 free_ftrace_hash(hash);