aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-07-21 03:32:40 -0400
committerIngo Molnar <mingo@elte.hu>2011-07-21 03:32:40 -0400
commit40bcea7bbe8fe452a2d272e2ffd3dea281eec9ff (patch)
treeaedb6d02e53e3cf84cc32fd81db84032cee205e1 /kernel/trace/ftrace.c
parent492f73a303b488ffd67097b2351d54aa6e6c7c73 (diff)
parent14a8fd7ceea6915c613746203d6e9a2bf273f16c (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c95
1 files changed, 74 insertions, 21 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a0e246e2cee..c3e4575e782 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -88,6 +88,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
88static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; 88static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
89static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 89static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
90ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 90ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
91static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
91ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 92ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
92ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 93ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
93static struct ftrace_ops global_ops; 94static struct ftrace_ops global_ops;
@@ -146,9 +147,11 @@ void clear_ftrace_function(void)
146{ 147{
147 ftrace_trace_function = ftrace_stub; 148 ftrace_trace_function = ftrace_stub;
148 __ftrace_trace_function = ftrace_stub; 149 __ftrace_trace_function = ftrace_stub;
150 __ftrace_trace_function_delay = ftrace_stub;
149 ftrace_pid_function = ftrace_stub; 151 ftrace_pid_function = ftrace_stub;
150} 152}
151 153
154#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
152#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 155#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
153/* 156/*
154 * For those archs that do not test ftrace_trace_stop in their 157 * For those archs that do not test ftrace_trace_stop in their
@@ -208,7 +211,12 @@ static void update_ftrace_function(void)
208#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 211#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
209 ftrace_trace_function = func; 212 ftrace_trace_function = func;
210#else 213#else
214#ifdef CONFIG_DYNAMIC_FTRACE
215 /* do not update till all functions have been modified */
216 __ftrace_trace_function_delay = func;
217#else
211 __ftrace_trace_function = func; 218 __ftrace_trace_function = func;
219#endif
212 ftrace_trace_function = ftrace_test_stop_func; 220 ftrace_trace_function = ftrace_test_stop_func;
213#endif 221#endif
214} 222}
@@ -1170,8 +1178,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1170 return NULL; 1178 return NULL;
1171} 1179}
1172 1180
1181static void
1182ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1183static void
1184ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1185
1173static int 1186static int
1174ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) 1187ftrace_hash_move(struct ftrace_ops *ops, int enable,
1188 struct ftrace_hash **dst, struct ftrace_hash *src)
1175{ 1189{
1176 struct ftrace_func_entry *entry; 1190 struct ftrace_func_entry *entry;
1177 struct hlist_node *tp, *tn; 1191 struct hlist_node *tp, *tn;
@@ -1181,9 +1195,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1181 unsigned long key; 1195 unsigned long key;
1182 int size = src->count; 1196 int size = src->count;
1183 int bits = 0; 1197 int bits = 0;
1198 int ret;
1184 int i; 1199 int i;
1185 1200
1186 /* 1201 /*
1202 * Remove the current set, update the hash and add
1203 * them back.
1204 */
1205 ftrace_hash_rec_disable(ops, enable);
1206
1207 /*
1187 * If the new source is empty, just free dst and assign it 1208 * If the new source is empty, just free dst and assign it
1188 * the empty_hash. 1209 * the empty_hash.
1189 */ 1210 */
@@ -1203,9 +1224,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1203 if (bits > FTRACE_HASH_MAX_BITS) 1224 if (bits > FTRACE_HASH_MAX_BITS)
1204 bits = FTRACE_HASH_MAX_BITS; 1225 bits = FTRACE_HASH_MAX_BITS;
1205 1226
1227 ret = -ENOMEM;
1206 new_hash = alloc_ftrace_hash(bits); 1228 new_hash = alloc_ftrace_hash(bits);
1207 if (!new_hash) 1229 if (!new_hash)
1208 return -ENOMEM; 1230 goto out;
1209 1231
1210 size = 1 << src->size_bits; 1232 size = 1 << src->size_bits;
1211 for (i = 0; i < size; i++) { 1233 for (i = 0; i < size; i++) {
@@ -1224,7 +1246,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1224 rcu_assign_pointer(*dst, new_hash); 1246 rcu_assign_pointer(*dst, new_hash);
1225 free_ftrace_hash_rcu(old_hash); 1247 free_ftrace_hash_rcu(old_hash);
1226 1248
1227 return 0; 1249 ret = 0;
1250 out:
1251 /*
1252 * Enable regardless of ret:
1253 * On success, we enable the new hash.
1254 * On failure, we re-enable the original hash.
1255 */
1256 ftrace_hash_rec_enable(ops, enable);
1257
1258 return ret;
1228} 1259}
1229 1260
1230/* 1261/*
@@ -1584,6 +1615,12 @@ static int __ftrace_modify_code(void *data)
1584{ 1615{
1585 int *command = data; 1616 int *command = data;
1586 1617
1618 /*
1619 * Do not call function tracer while we update the code.
1620 * We are in stop machine, no worrying about races.
1621 */
1622 function_trace_stop++;
1623
1587 if (*command & FTRACE_ENABLE_CALLS) 1624 if (*command & FTRACE_ENABLE_CALLS)
1588 ftrace_replace_code(1); 1625 ftrace_replace_code(1);
1589 else if (*command & FTRACE_DISABLE_CALLS) 1626 else if (*command & FTRACE_DISABLE_CALLS)
@@ -1597,6 +1634,18 @@ static int __ftrace_modify_code(void *data)
1597 else if (*command & FTRACE_STOP_FUNC_RET) 1634 else if (*command & FTRACE_STOP_FUNC_RET)
1598 ftrace_disable_ftrace_graph_caller(); 1635 ftrace_disable_ftrace_graph_caller();
1599 1636
1637#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1638 /*
1639 * For archs that call ftrace_test_stop_func(), we must
1640 * wait till after we update all the function callers
1641 * before we update the callback. This keeps different
1642 * ops that record different functions from corrupting
1643 * each other.
1644 */
1645 __ftrace_trace_function = __ftrace_trace_function_delay;
1646#endif
1647 function_trace_stop--;
1648
1600 return 0; 1649 return 0;
1601} 1650}
1602 1651
@@ -2865,7 +2914,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2865 ftrace_match_records(hash, buf, len); 2914 ftrace_match_records(hash, buf, len);
2866 2915
2867 mutex_lock(&ftrace_lock); 2916 mutex_lock(&ftrace_lock);
2868 ret = ftrace_hash_move(orig_hash, hash); 2917 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2918 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2919 && ftrace_enabled)
2920 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2921
2869 mutex_unlock(&ftrace_lock); 2922 mutex_unlock(&ftrace_lock);
2870 2923
2871 mutex_unlock(&ftrace_regex_lock); 2924 mutex_unlock(&ftrace_regex_lock);
@@ -3048,18 +3101,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
3048 orig_hash = &iter->ops->notrace_hash; 3101 orig_hash = &iter->ops->notrace_hash;
3049 3102
3050 mutex_lock(&ftrace_lock); 3103 mutex_lock(&ftrace_lock);
3051 /* 3104 ret = ftrace_hash_move(iter->ops, filter_hash,
3052 * Remove the current set, update the hash and add 3105 orig_hash, iter->hash);
3053 * them back. 3106 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3054 */ 3107 && ftrace_enabled)
3055 ftrace_hash_rec_disable(iter->ops, filter_hash); 3108 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3056 ret = ftrace_hash_move(orig_hash, iter->hash); 3109
3057 if (!ret) {
3058 ftrace_hash_rec_enable(iter->ops, filter_hash);
3059 if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3060 && ftrace_enabled)
3061 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3062 }
3063 mutex_unlock(&ftrace_lock); 3110 mutex_unlock(&ftrace_lock);
3064 } 3111 }
3065 free_ftrace_hash(iter->hash); 3112 free_ftrace_hash(iter->hash);
@@ -3338,7 +3385,7 @@ static int ftrace_process_locs(struct module *mod,
3338{ 3385{
3339 unsigned long *p; 3386 unsigned long *p;
3340 unsigned long addr; 3387 unsigned long addr;
3341 unsigned long flags; 3388 unsigned long flags = 0; /* Shut up gcc */
3342 3389
3343 mutex_lock(&ftrace_lock); 3390 mutex_lock(&ftrace_lock);
3344 p = start; 3391 p = start;
@@ -3356,12 +3403,18 @@ static int ftrace_process_locs(struct module *mod,
3356 } 3403 }
3357 3404
3358 /* 3405 /*
3359 * Disable interrupts to prevent interrupts from executing 3406 * We only need to disable interrupts on start up
3360 * code that is being modified. 3407 * because we are modifying code that an interrupt
3408 * may execute, and the modification is not atomic.
3409 * But for modules, nothing runs the code we modify
3410 * until we are finished with it, and there's no
3411 * reason to cause large interrupt latencies while we do it.
3361 */ 3412 */
3362 local_irq_save(flags); 3413 if (!mod)
3414 local_irq_save(flags);
3363 ftrace_update_code(mod); 3415 ftrace_update_code(mod);
3364 local_irq_restore(flags); 3416 if (!mod)
3417 local_irq_restore(flags);
3365 mutex_unlock(&ftrace_lock); 3418 mutex_unlock(&ftrace_lock);
3366 3419
3367 return 0; 3420 return 0;