aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-07-13 15:03:44 -0400
committerSteven Rostedt <rostedt@goodmis.org>2011-07-13 22:00:50 -0400
commit41fb61c2d08107ce96a5dcb3a6289b2afd3e135c (patch)
treeca3ae1796ca1d3f01f2bb62eba1fbc93db1affae /kernel/trace/ftrace.c
parent4376cac66778b25e599be3f5d54f33f58ba8ead7 (diff)
ftrace: Balance records when updating the hash
Whenever the hash of the ftrace_ops is updated, the record counts must be balance. This requires disabling the records that are set in the original hash, and then enabling the records that are set in the updated hash. Moving the update into ftrace_hash_move() removes the bug where the hash was updated but the records were not, which results in ftrace triggering a warning and disabling itself because the ftrace_ops filter is updated while the ftrace_ops was registered, and then the failure happens when the ftrace_ops is unregistered. The current code will not trigger this bug, but new code will. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c49
1 files changed, 33 insertions, 16 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index df93392aad89..853f6f0a4b4a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1170,8 +1170,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1170 return NULL; 1170 return NULL;
1171} 1171}
1172 1172
1173static void
1174ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1175static void
1176ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1177
1173static int 1178static int
1174ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) 1179ftrace_hash_move(struct ftrace_ops *ops, int enable,
1180 struct ftrace_hash **dst, struct ftrace_hash *src)
1175{ 1181{
1176 struct ftrace_func_entry *entry; 1182 struct ftrace_func_entry *entry;
1177 struct hlist_node *tp, *tn; 1183 struct hlist_node *tp, *tn;
@@ -1181,9 +1187,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1181 unsigned long key; 1187 unsigned long key;
1182 int size = src->count; 1188 int size = src->count;
1183 int bits = 0; 1189 int bits = 0;
1190 int ret;
1184 int i; 1191 int i;
1185 1192
1186 /* 1193 /*
1194 * Remove the current set, update the hash and add
1195 * them back.
1196 */
1197 ftrace_hash_rec_disable(ops, enable);
1198
1199 /*
1187 * If the new source is empty, just free dst and assign it 1200 * If the new source is empty, just free dst and assign it
1188 * the empty_hash. 1201 * the empty_hash.
1189 */ 1202 */
@@ -1203,9 +1216,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1203 if (bits > FTRACE_HASH_MAX_BITS) 1216 if (bits > FTRACE_HASH_MAX_BITS)
1204 bits = FTRACE_HASH_MAX_BITS; 1217 bits = FTRACE_HASH_MAX_BITS;
1205 1218
1219 ret = -ENOMEM;
1206 new_hash = alloc_ftrace_hash(bits); 1220 new_hash = alloc_ftrace_hash(bits);
1207 if (!new_hash) 1221 if (!new_hash)
1208 return -ENOMEM; 1222 goto out;
1209 1223
1210 size = 1 << src->size_bits; 1224 size = 1 << src->size_bits;
1211 for (i = 0; i < size; i++) { 1225 for (i = 0; i < size; i++) {
@@ -1224,7 +1238,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1224 rcu_assign_pointer(*dst, new_hash); 1238 rcu_assign_pointer(*dst, new_hash);
1225 free_ftrace_hash_rcu(old_hash); 1239 free_ftrace_hash_rcu(old_hash);
1226 1240
1227 return 0; 1241 ret = 0;
1242 out:
1243 /*
1244 * Enable regardless of ret:
1245 * On success, we enable the new hash.
1246 * On failure, we re-enable the original hash.
1247 */
1248 ftrace_hash_rec_enable(ops, enable);
1249
1250 return ret;
1228} 1251}
1229 1252
1230/* 1253/*
@@ -2845,7 +2868,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2845 ftrace_match_records(hash, buf, len); 2868 ftrace_match_records(hash, buf, len);
2846 2869
2847 mutex_lock(&ftrace_lock); 2870 mutex_lock(&ftrace_lock);
2848 ret = ftrace_hash_move(orig_hash, hash); 2871 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2849 mutex_unlock(&ftrace_lock); 2872 mutex_unlock(&ftrace_lock);
2850 2873
2851 mutex_unlock(&ftrace_regex_lock); 2874 mutex_unlock(&ftrace_regex_lock);
@@ -3028,18 +3051,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
3028 orig_hash = &iter->ops->notrace_hash; 3051 orig_hash = &iter->ops->notrace_hash;
3029 3052
3030 mutex_lock(&ftrace_lock); 3053 mutex_lock(&ftrace_lock);
3031 /* 3054 ret = ftrace_hash_move(iter->ops, filter_hash,
3032 * Remove the current set, update the hash and add 3055 orig_hash, iter->hash);
3033 * them back. 3056 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3034 */ 3057 && ftrace_enabled)
3035 ftrace_hash_rec_disable(iter->ops, filter_hash); 3058 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3036 ret = ftrace_hash_move(orig_hash, iter->hash); 3059
3037 if (!ret) {
3038 ftrace_hash_rec_enable(iter->ops, filter_hash);
3039 if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3040 && ftrace_enabled)
3041 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3042 }
3043 mutex_unlock(&ftrace_lock); 3060 mutex_unlock(&ftrace_lock);
3044 } 3061 }
3045 free_ftrace_hash(iter->hash); 3062 free_ftrace_hash(iter->hash);