aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-07-13 15:03:44 -0400
committerHerton Ronaldo Krzesinski <herton.krzesinski@canonical.com>2012-02-13 15:15:02 -0500
commitc05d4534a98930ec3fc1a14ad8c7ccdc0813dddd (patch)
treee65ea4f3743e3352c9d36675c72966057fdb987d /kernel
parent72918109166d5d3a58f3d4a1e61669ff2ac96829 (diff)
ftrace: Balance records when updating the hash
BugLink: http://bugs.launchpad.net/bugs/926309 commit 41fb61c2d08107ce96a5dcb3a6289b2afd3e135c upstream. Whenever the hash of the ftrace_ops is updated, the record counts must be balance. This requires disabling the records that are set in the original hash, and then enabling the records that are set in the updated hash. Moving the update into ftrace_hash_move() removes the bug where the hash was updated but the records were not, which results in ftrace triggering a warning and disabling itself because the ftrace_ops filter is updated while the ftrace_ops was registered, and then the failure happens when the ftrace_ops is unregistered. The current code will not trigger this bug, but new code will. Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c49
1 files changed, 33 insertions, 16 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ef9271b69b4..1eef6cf38d5 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1182 return NULL; 1182 return NULL;
1183} 1183}
1184 1184
1185static void
1186ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1187static void
1188ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1189
1185static int 1190static int
1186ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) 1191ftrace_hash_move(struct ftrace_ops *ops, int enable,
1192 struct ftrace_hash **dst, struct ftrace_hash *src)
1187{ 1193{
1188 struct ftrace_func_entry *entry; 1194 struct ftrace_func_entry *entry;
1189 struct hlist_node *tp, *tn; 1195 struct hlist_node *tp, *tn;
@@ -1193,9 +1199,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1193 unsigned long key; 1199 unsigned long key;
1194 int size = src->count; 1200 int size = src->count;
1195 int bits = 0; 1201 int bits = 0;
1202 int ret;
1196 int i; 1203 int i;
1197 1204
1198 /* 1205 /*
1206 * Remove the current set, update the hash and add
1207 * them back.
1208 */
1209 ftrace_hash_rec_disable(ops, enable);
1210
1211 /*
1199 * If the new source is empty, just free dst and assign it 1212 * If the new source is empty, just free dst and assign it
1200 * the empty_hash. 1213 * the empty_hash.
1201 */ 1214 */
@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1215 if (bits > FTRACE_HASH_MAX_BITS) 1228 if (bits > FTRACE_HASH_MAX_BITS)
1216 bits = FTRACE_HASH_MAX_BITS; 1229 bits = FTRACE_HASH_MAX_BITS;
1217 1230
1231 ret = -ENOMEM;
1218 new_hash = alloc_ftrace_hash(bits); 1232 new_hash = alloc_ftrace_hash(bits);
1219 if (!new_hash) 1233 if (!new_hash)
1220 return -ENOMEM; 1234 goto out;
1221 1235
1222 size = 1 << src->size_bits; 1236 size = 1 << src->size_bits;
1223 for (i = 0; i < size; i++) { 1237 for (i = 0; i < size; i++) {
@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1236 rcu_assign_pointer(*dst, new_hash); 1250 rcu_assign_pointer(*dst, new_hash);
1237 free_ftrace_hash_rcu(old_hash); 1251 free_ftrace_hash_rcu(old_hash);
1238 1252
1239 return 0; 1253 ret = 0;
1254 out:
1255 /*
1256 * Enable regardless of ret:
1257 * On success, we enable the new hash.
1258 * On failure, we re-enable the original hash.
1259 */
1260 ftrace_hash_rec_enable(ops, enable);
1261
1262 return ret;
1240} 1263}
1241 1264
1242/* 1265/*
@@ -2877,7 +2900,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2877 ftrace_match_records(hash, buf, len); 2900 ftrace_match_records(hash, buf, len);
2878 2901
2879 mutex_lock(&ftrace_lock); 2902 mutex_lock(&ftrace_lock);
2880 ret = ftrace_hash_move(orig_hash, hash); 2903 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2881 mutex_unlock(&ftrace_lock); 2904 mutex_unlock(&ftrace_lock);
2882 2905
2883 mutex_unlock(&ftrace_regex_lock); 2906 mutex_unlock(&ftrace_regex_lock);
@@ -3060,18 +3083,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
3060 orig_hash = &iter->ops->notrace_hash; 3083 orig_hash = &iter->ops->notrace_hash;
3061 3084
3062 mutex_lock(&ftrace_lock); 3085 mutex_lock(&ftrace_lock);
3063 /* 3086 ret = ftrace_hash_move(iter->ops, filter_hash,
3064 * Remove the current set, update the hash and add 3087 orig_hash, iter->hash);
3065 * them back. 3088 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3066 */ 3089 && ftrace_enabled)
3067 ftrace_hash_rec_disable(iter->ops, filter_hash); 3090 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3068 ret = ftrace_hash_move(orig_hash, iter->hash); 3091
3069 if (!ret) {
3070 ftrace_hash_rec_enable(iter->ops, filter_hash);
3071 if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3072 && ftrace_enabled)
3073 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3074 }
3075 mutex_unlock(&ftrace_lock); 3092 mutex_unlock(&ftrace_lock);
3076 } 3093 }
3077 free_ftrace_hash(iter->hash); 3094 free_ftrace_hash(iter->hash);