aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-12 07:27:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-12 07:27:19 -0400
commit9837acff77f51e40ab21521e914aa19f85beb312 (patch)
tree76e9363fd72afea51e6634fb38cc76e6d4be8767 /kernel/trace
parentca321885b0511a85e2d1cd40caafedbeb18f4af6 (diff)
parent3ddee63a099ebbdc8f84697fe46730b58240c09d (diff)
Merge tag 'trace-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "This set has a few minor updates, but the big change is the redesign of the trampoline logic. The trampoline logic of 3.17 required a descriptor for every function that is registered to be traced and uses a trampoline. Currently, only the function graph tracer uses a trampoline, but if you were to trace all 32,000 (give or take a few thousand) functions with the function graph tracer, it would create 32,000 descriptors to let us know that there's a trampoline associated with it. This takes up a bit of memory when there's a better way to do it. The redesign now reuses the ftrace_ops' (what registers the function graph tracer) hash tables. The hash tables tell ftrace what the tracer wants to trace or doesn't want to trace. There's two of them: one that tells us what to trace, the other tells us what not to trace. If the first one is empty, it means all functions should be traced, otherwise only the ones that are listed should be. The second hash table tells us what not to trace, and if it is empty, all functions may be traced, and if there's any listed, then those should not be traced even if they exist in the first hash table. It took a bit of massaging, but now these hashes can be used to keep track of what has a trampoline and what does not, and allows the ftrace accounting to work. Now we can trace all functions when using the function graph trampoline, and avoid needing to create any special descriptors to hold all the functions that are being traced" * tag 'trace-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace: Only disable ftrace_enabled to test buffer in selftest ftrace: Add sanity check when unregistering last ftrace_ops kernel: trace_syscalls: Replace rcu_assign_pointer() with RCU_INIT_POINTER() tracing: generate RCU warnings even when tracepoints are disabled ftrace: Replace tramp_hash with old_*_hash to save space ftrace: Annotate the ops operation on update ftrace: Grab any ops for a rec for enabled_functions output ftrace: Remove freeing of old_hash from ftrace_hash_move() ftrace: Set callback to ftrace_stub when no ops are registered ftrace: Add helper function ftrace_ops_get_func() ftrace: Add separate function for non recursive callbacks
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c416
-rw-r--r--kernel/trace/trace_selftest.c4
-rw-r--r--kernel/trace/trace_syscalls.c4
3 files changed, 247 insertions, 177 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5916a8e59e87..fb186b9ddf51 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -113,6 +113,9 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113static struct ftrace_ops global_ops; 113static struct ftrace_ops global_ops;
114static struct ftrace_ops control_ops; 114static struct ftrace_ops control_ops;
115 115
116static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
117 struct ftrace_ops *op, struct pt_regs *regs);
118
116#if ARCH_SUPPORTS_FTRACE_OPS 119#if ARCH_SUPPORTS_FTRACE_OPS
117static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 120static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
118 struct ftrace_ops *op, struct pt_regs *regs); 121 struct ftrace_ops *op, struct pt_regs *regs);
@@ -251,18 +254,24 @@ static void update_ftrace_function(void)
251 ftrace_func_t func; 254 ftrace_func_t func;
252 255
253 /* 256 /*
257 * Prepare the ftrace_ops that the arch callback will use.
258 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 * will point to the ops we want.
260 */
261 set_function_trace_op = ftrace_ops_list;
262
263 /* If there's no ftrace_ops registered, just call the stub function */
264 if (ftrace_ops_list == &ftrace_list_end) {
265 func = ftrace_stub;
266
267 /*
254 * If we are at the end of the list and this ops is 268 * If we are at the end of the list and this ops is
255 * recursion safe and not dynamic and the arch supports passing ops, 269 * recursion safe and not dynamic and the arch supports passing ops,
256 * then have the mcount trampoline call the function directly. 270 * then have the mcount trampoline call the function directly.
257 */ 271 */
258 if (ftrace_ops_list == &ftrace_list_end || 272 } else if (ftrace_ops_list->next == &ftrace_list_end) {
259 (ftrace_ops_list->next == &ftrace_list_end && 273 func = ftrace_ops_get_func(ftrace_ops_list);
260 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && 274
261 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
262 !FTRACE_FORCE_LIST_FUNC)) {
263 /* Set the ftrace_ops that the arch callback uses */
264 set_function_trace_op = ftrace_ops_list;
265 func = ftrace_ops_list->func;
266 } else { 275 } else {
267 /* Just use the default ftrace_ops */ 276 /* Just use the default ftrace_ops */
268 set_function_trace_op = &ftrace_list_end; 277 set_function_trace_op = &ftrace_list_end;
@@ -1048,6 +1057,12 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1048 1057
1049static struct ftrace_ops *removed_ops; 1058static struct ftrace_ops *removed_ops;
1050 1059
1060/*
1061 * Set when doing a global update, like enabling all recs or disabling them.
1062 * It is not set when just updating a single ftrace_ops.
1063 */
1064static bool update_all_ops;
1065
1051#ifndef CONFIG_FTRACE_MCOUNT_RECORD 1066#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1052# error Dynamic ftrace depends on MCOUNT_RECORD 1067# error Dynamic ftrace depends on MCOUNT_RECORD
1053#endif 1068#endif
@@ -1307,7 +1322,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1307 struct ftrace_func_entry *entry; 1322 struct ftrace_func_entry *entry;
1308 struct hlist_node *tn; 1323 struct hlist_node *tn;
1309 struct hlist_head *hhd; 1324 struct hlist_head *hhd;
1310 struct ftrace_hash *old_hash;
1311 struct ftrace_hash *new_hash; 1325 struct ftrace_hash *new_hash;
1312 int size = src->count; 1326 int size = src->count;
1313 int bits = 0; 1327 int bits = 0;
@@ -1352,15 +1366,28 @@ update:
1352 */ 1366 */
1353 ftrace_hash_rec_disable_modify(ops, enable); 1367 ftrace_hash_rec_disable_modify(ops, enable);
1354 1368
1355 old_hash = *dst;
1356 rcu_assign_pointer(*dst, new_hash); 1369 rcu_assign_pointer(*dst, new_hash);
1357 free_ftrace_hash_rcu(old_hash);
1358 1370
1359 ftrace_hash_rec_enable_modify(ops, enable); 1371 ftrace_hash_rec_enable_modify(ops, enable);
1360 1372
1361 return 0; 1373 return 0;
1362} 1374}
1363 1375
1376static bool hash_contains_ip(unsigned long ip,
1377 struct ftrace_ops_hash *hash)
1378{
1379 /*
1380 * The function record is a match if it exists in the filter
1381 * hash and not in the notrace hash. Note, an emty hash is
1382 * considered a match for the filter hash, but an empty
1383 * notrace hash is considered not in the notrace hash.
1384 */
1385 return (ftrace_hash_empty(hash->filter_hash) ||
1386 ftrace_lookup_ip(hash->filter_hash, ip)) &&
1387 (ftrace_hash_empty(hash->notrace_hash) ||
1388 !ftrace_lookup_ip(hash->notrace_hash, ip));
1389}
1390
1364/* 1391/*
1365 * Test the hashes for this ops to see if we want to call 1392 * Test the hashes for this ops to see if we want to call
1366 * the ops->func or not. 1393 * the ops->func or not.
@@ -1376,8 +1403,7 @@ update:
1376static int 1403static int
1377ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1404ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1378{ 1405{
1379 struct ftrace_hash *filter_hash; 1406 struct ftrace_ops_hash hash;
1380 struct ftrace_hash *notrace_hash;
1381 int ret; 1407 int ret;
1382 1408
1383#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1409#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
@@ -1390,13 +1416,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1390 return 0; 1416 return 0;
1391#endif 1417#endif
1392 1418
1393 filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); 1419 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1394 notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); 1420 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1395 1421
1396 if ((ftrace_hash_empty(filter_hash) || 1422 if (hash_contains_ip(ip, &hash))
1397 ftrace_lookup_ip(filter_hash, ip)) &&
1398 (ftrace_hash_empty(notrace_hash) ||
1399 !ftrace_lookup_ip(notrace_hash, ip)))
1400 ret = 1; 1423 ret = 1;
1401 else 1424 else
1402 ret = 0; 1425 ret = 0;
@@ -1508,46 +1531,6 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1508 return keep_regs; 1531 return keep_regs;
1509} 1532}
1510 1533
1511static void ftrace_remove_tramp(struct ftrace_ops *ops,
1512 struct dyn_ftrace *rec)
1513{
1514 /* If TRAMP is not set, no ops should have a trampoline for this */
1515 if (!(rec->flags & FTRACE_FL_TRAMP))
1516 return;
1517
1518 rec->flags &= ~FTRACE_FL_TRAMP;
1519
1520 if ((!ftrace_hash_empty(ops->func_hash->filter_hash) &&
1521 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) ||
1522 ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
1523 return;
1524 /*
1525 * The tramp_hash entry will be removed at time
1526 * of update.
1527 */
1528 ops->nr_trampolines--;
1529}
1530
1531static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops)
1532{
1533 struct ftrace_ops *op;
1534
1535 /* If TRAMP is not set, no ops should have a trampoline for this */
1536 if (!(rec->flags & FTRACE_FL_TRAMP))
1537 return;
1538
1539 do_for_each_ftrace_op(op, ftrace_ops_list) {
1540 /*
1541 * This function is called to clear other tramps
1542 * not the one that is being updated.
1543 */
1544 if (op == ops)
1545 continue;
1546 if (op->nr_trampolines)
1547 ftrace_remove_tramp(op, rec);
1548 } while_for_each_ftrace_op(op);
1549}
1550
1551static void __ftrace_hash_rec_update(struct ftrace_ops *ops, 1534static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1552 int filter_hash, 1535 int filter_hash,
1553 bool inc) 1536 bool inc)
@@ -1636,18 +1619,16 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1636 * function, and the ops has a trampoline registered 1619 * function, and the ops has a trampoline registered
1637 * for it, then we can call it directly. 1620 * for it, then we can call it directly.
1638 */ 1621 */
1639 if (ftrace_rec_count(rec) == 1 && ops->trampoline) { 1622 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1640 rec->flags |= FTRACE_FL_TRAMP; 1623 rec->flags |= FTRACE_FL_TRAMP;
1641 ops->nr_trampolines++; 1624 else
1642 } else {
1643 /* 1625 /*
1644 * If we are adding another function callback 1626 * If we are adding another function callback
1645 * to this function, and the previous had a 1627 * to this function, and the previous had a
1646 * custom trampoline in use, then we need to go 1628 * custom trampoline in use, then we need to go
1647 * back to the default trampoline. 1629 * back to the default trampoline.
1648 */ 1630 */
1649 ftrace_clear_tramps(rec, ops); 1631 rec->flags &= ~FTRACE_FL_TRAMP;
1650 }
1651 1632
1652 /* 1633 /*
1653 * If any ops wants regs saved for this function 1634 * If any ops wants regs saved for this function
@@ -1660,9 +1641,6 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1660 return; 1641 return;
1661 rec->flags--; 1642 rec->flags--;
1662 1643
1663 if (ops->trampoline && !ftrace_rec_count(rec))
1664 ftrace_remove_tramp(ops, rec);
1665
1666 /* 1644 /*
1667 * If the rec had REGS enabled and the ops that is 1645 * If the rec had REGS enabled and the ops that is
1668 * being removed had REGS set, then see if there is 1646 * being removed had REGS set, then see if there is
@@ -1677,6 +1655,17 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1677 } 1655 }
1678 1656
1679 /* 1657 /*
1658 * If the rec had TRAMP enabled, then it needs to
1659 * be cleared. As TRAMP can only be enabled iff
1660 * there is only a single ops attached to it.
1661 * In otherwords, always disable it on decrementing.
1662 * In the future, we may set it if rec count is
1663 * decremented to one, and the ops that is left
1664 * has a trampoline.
1665 */
1666 rec->flags &= ~FTRACE_FL_TRAMP;
1667
1668 /*
1680 * flags will be cleared in ftrace_check_record() 1669 * flags will be cleared in ftrace_check_record()
1681 * if rec count is zero. 1670 * if rec count is zero.
1682 */ 1671 */
@@ -1895,21 +1884,72 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1895} 1884}
1896 1885
1897static struct ftrace_ops * 1886static struct ftrace_ops *
1887ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
1888{
1889 struct ftrace_ops *op;
1890 unsigned long ip = rec->ip;
1891
1892 do_for_each_ftrace_op(op, ftrace_ops_list) {
1893
1894 if (!op->trampoline)
1895 continue;
1896
1897 if (hash_contains_ip(ip, op->func_hash))
1898 return op;
1899 } while_for_each_ftrace_op(op);
1900
1901 return NULL;
1902}
1903
1904static struct ftrace_ops *
1898ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 1905ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1899{ 1906{
1900 struct ftrace_ops *op; 1907 struct ftrace_ops *op;
1908 unsigned long ip = rec->ip;
1901 1909
1902 /* Removed ops need to be tested first */ 1910 /*
1903 if (removed_ops && removed_ops->tramp_hash) { 1911 * Need to check removed ops first.
1904 if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip)) 1912 * If they are being removed, and this rec has a tramp,
1913 * and this rec is in the ops list, then it would be the
1914 * one with the tramp.
1915 */
1916 if (removed_ops) {
1917 if (hash_contains_ip(ip, &removed_ops->old_hash))
1905 return removed_ops; 1918 return removed_ops;
1906 } 1919 }
1907 1920
1921 /*
1922 * Need to find the current trampoline for a rec.
1923 * Now, a trampoline is only attached to a rec if there
1924 * was a single 'ops' attached to it. But this can be called
1925 * when we are adding another op to the rec or removing the
1926 * current one. Thus, if the op is being added, we can
1927 * ignore it because it hasn't attached itself to the rec
1928 * yet. That means we just need to find the op that has a
1929 * trampoline and is not beeing added.
1930 */
1908 do_for_each_ftrace_op(op, ftrace_ops_list) { 1931 do_for_each_ftrace_op(op, ftrace_ops_list) {
1909 if (!op->tramp_hash) 1932
1933 if (!op->trampoline)
1910 continue; 1934 continue;
1911 1935
1912 if (ftrace_lookup_ip(op->tramp_hash, rec->ip)) 1936 /*
1937 * If the ops is being added, it hasn't gotten to
1938 * the point to be removed from this tree yet.
1939 */
1940 if (op->flags & FTRACE_OPS_FL_ADDING)
1941 continue;
1942
1943 /*
1944 * If the ops is not being added and has a trampoline,
1945 * then it must be the one that we want!
1946 */
1947 if (hash_contains_ip(ip, op->func_hash))
1948 return op;
1949
1950 /* If the ops is being modified, it may be in the old hash. */
1951 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
1952 hash_contains_ip(ip, &op->old_hash))
1913 return op; 1953 return op;
1914 1954
1915 } while_for_each_ftrace_op(op); 1955 } while_for_each_ftrace_op(op);
@@ -1921,10 +1961,11 @@ static struct ftrace_ops *
1921ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 1961ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
1922{ 1962{
1923 struct ftrace_ops *op; 1963 struct ftrace_ops *op;
1964 unsigned long ip = rec->ip;
1924 1965
1925 do_for_each_ftrace_op(op, ftrace_ops_list) { 1966 do_for_each_ftrace_op(op, ftrace_ops_list) {
1926 /* pass rec in as regs to have non-NULL val */ 1967 /* pass rec in as regs to have non-NULL val */
1927 if (ftrace_ops_test(op, rec->ip, rec)) 1968 if (hash_contains_ip(ip, op->func_hash))
1928 return op; 1969 return op;
1929 } while_for_each_ftrace_op(op); 1970 } while_for_each_ftrace_op(op);
1930 1971
@@ -2231,92 +2272,6 @@ void __weak arch_ftrace_update_code(int command)
2231 ftrace_run_stop_machine(command); 2272 ftrace_run_stop_machine(command);
2232} 2273}
2233 2274
2234static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
2235{
2236 struct ftrace_page *pg;
2237 struct dyn_ftrace *rec;
2238 int size, bits;
2239 int ret;
2240
2241 size = ops->nr_trampolines;
2242 bits = 0;
2243 /*
2244 * Make the hash size about 1/2 the # found
2245 */
2246 for (size /= 2; size; size >>= 1)
2247 bits++;
2248
2249 ops->tramp_hash = alloc_ftrace_hash(bits);
2250 /*
2251 * TODO: a failed allocation is going to screw up
2252 * the accounting of what needs to be modified
2253 * and not. For now, we kill ftrace if we fail
2254 * to allocate here. But there are ways around this,
2255 * but that will take a little more work.
2256 */
2257 if (!ops->tramp_hash)
2258 return -ENOMEM;
2259
2260 do_for_each_ftrace_rec(pg, rec) {
2261 if (ftrace_rec_count(rec) == 1 &&
2262 ftrace_ops_test(ops, rec->ip, rec)) {
2263
2264 /*
2265 * If another ops adds to a rec, the rec will
2266 * lose its trampoline and never get it back
2267 * until all ops are off of it.
2268 */
2269 if (!(rec->flags & FTRACE_FL_TRAMP))
2270 continue;
2271
2272 /* This record had better have a trampoline */
2273 if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN)))
2274 return -1;
2275
2276 ret = add_hash_entry(ops->tramp_hash, rec->ip);
2277 if (ret < 0)
2278 return ret;
2279 }
2280 } while_for_each_ftrace_rec();
2281
2282 /* The number of recs in the hash must match nr_trampolines */
2283 if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines))
2284 pr_warn("count=%ld trampolines=%d\n",
2285 ops->tramp_hash->count,
2286 ops->nr_trampolines);
2287
2288 return 0;
2289}
2290
2291static int ftrace_save_tramp_hashes(void)
2292{
2293 struct ftrace_ops *op;
2294 int ret;
2295
2296 /*
2297 * Now that any trampoline is being used, we need to save the
2298 * hashes for the ops that have them. This allows the mapping
2299 * back from the record to the ops that has the trampoline to
2300 * know what code is being replaced. Modifying code must always
2301 * verify what it is changing.
2302 */
2303 do_for_each_ftrace_op(op, ftrace_ops_list) {
2304
2305 /* The tramp_hash is recreated each time. */
2306 free_ftrace_hash(op->tramp_hash);
2307 op->tramp_hash = NULL;
2308
2309 if (op->nr_trampolines) {
2310 ret = ftrace_save_ops_tramp_hash(op);
2311 if (ret)
2312 return ret;
2313 }
2314
2315 } while_for_each_ftrace_op(op);
2316
2317 return 0;
2318}
2319
2320static void ftrace_run_update_code(int command) 2275static void ftrace_run_update_code(int command)
2321{ 2276{
2322 int ret; 2277 int ret;
@@ -2336,9 +2291,13 @@ static void ftrace_run_update_code(int command)
2336 2291
2337 ret = ftrace_arch_code_modify_post_process(); 2292 ret = ftrace_arch_code_modify_post_process();
2338 FTRACE_WARN_ON(ret); 2293 FTRACE_WARN_ON(ret);
2294}
2339 2295
2340 ret = ftrace_save_tramp_hashes(); 2296static void ftrace_run_modify_code(struct ftrace_ops *ops, int command)
2341 FTRACE_WARN_ON(ret); 2297{
2298 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2299 ftrace_run_update_code(command);
2300 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2342} 2301}
2343 2302
2344static ftrace_func_t saved_ftrace_func; 2303static ftrace_func_t saved_ftrace_func;
@@ -2362,6 +2321,13 @@ static void ftrace_startup_enable(int command)
2362 ftrace_run_update_code(command); 2321 ftrace_run_update_code(command);
2363} 2322}
2364 2323
2324static void ftrace_startup_all(int command)
2325{
2326 update_all_ops = true;
2327 ftrace_startup_enable(command);
2328 update_all_ops = false;
2329}
2330
2365static int ftrace_startup(struct ftrace_ops *ops, int command) 2331static int ftrace_startup(struct ftrace_ops *ops, int command)
2366{ 2332{
2367 int ret; 2333 int ret;
@@ -2376,12 +2342,22 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
2376 ftrace_start_up++; 2342 ftrace_start_up++;
2377 command |= FTRACE_UPDATE_CALLS; 2343 command |= FTRACE_UPDATE_CALLS;
2378 2344
2379 ops->flags |= FTRACE_OPS_FL_ENABLED; 2345 /*
2346 * Note that ftrace probes uses this to start up
2347 * and modify functions it will probe. But we still
2348 * set the ADDING flag for modification, as probes
2349 * do not have trampolines. If they add them in the
2350 * future, then the probes will need to distinguish
2351 * between adding and updating probes.
2352 */
2353 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2380 2354
2381 ftrace_hash_rec_enable(ops, 1); 2355 ftrace_hash_rec_enable(ops, 1);
2382 2356
2383 ftrace_startup_enable(command); 2357 ftrace_startup_enable(command);
2384 2358
2359 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2360
2385 return 0; 2361 return 0;
2386} 2362}
2387 2363
@@ -2431,11 +2407,35 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2431 * If the ops uses a trampoline, then it needs to be 2407 * If the ops uses a trampoline, then it needs to be
2432 * tested first on update. 2408 * tested first on update.
2433 */ 2409 */
2410 ops->flags |= FTRACE_OPS_FL_REMOVING;
2434 removed_ops = ops; 2411 removed_ops = ops;
2435 2412
2413 /* The trampoline logic checks the old hashes */
2414 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2415 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2416
2436 ftrace_run_update_code(command); 2417 ftrace_run_update_code(command);
2437 2418
2419 /*
2420 * If there's no more ops registered with ftrace, run a
2421 * sanity check to make sure all rec flags are cleared.
2422 */
2423 if (ftrace_ops_list == &ftrace_list_end) {
2424 struct ftrace_page *pg;
2425 struct dyn_ftrace *rec;
2426
2427 do_for_each_ftrace_rec(pg, rec) {
2428 if (FTRACE_WARN_ON_ONCE(rec->flags))
2429 pr_warn(" %pS flags:%lx\n",
2430 (void *)rec->ip, rec->flags);
2431 } while_for_each_ftrace_rec();
2432 }
2433
2434 ops->old_hash.filter_hash = NULL;
2435 ops->old_hash.notrace_hash = NULL;
2436
2438 removed_ops = NULL; 2437 removed_ops = NULL;
2438 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2439 2439
2440 /* 2440 /*
2441 * Dynamic ops may be freed, we must make sure that all 2441 * Dynamic ops may be freed, we must make sure that all
@@ -2960,8 +2960,8 @@ static int t_show(struct seq_file *m, void *v)
2960 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2960 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2961 struct ftrace_ops *ops; 2961 struct ftrace_ops *ops;
2962 2962
2963 ops = ftrace_find_tramp_ops_curr(rec); 2963 ops = ftrace_find_tramp_ops_any(rec);
2964 if (ops && ops->trampoline) 2964 if (ops)
2965 seq_printf(m, "\ttramp: %pS", 2965 seq_printf(m, "\ttramp: %pS",
2966 (void *)ops->trampoline); 2966 (void *)ops->trampoline);
2967 else 2967 else
@@ -3348,7 +3348,7 @@ static void __enable_ftrace_function_probe(void)
3348 if (ftrace_probe_registered) { 3348 if (ftrace_probe_registered) {
3349 /* still need to update the function call sites */ 3349 /* still need to update the function call sites */
3350 if (ftrace_enabled) 3350 if (ftrace_enabled)
3351 ftrace_run_update_code(FTRACE_UPDATE_CALLS); 3351 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS);
3352 return; 3352 return;
3353 } 3353 }
3354 3354
@@ -3399,6 +3399,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3399{ 3399{
3400 struct ftrace_func_probe *entry; 3400 struct ftrace_func_probe *entry;
3401 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3401 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3402 struct ftrace_hash *old_hash = *orig_hash;
3402 struct ftrace_hash *hash; 3403 struct ftrace_hash *hash;
3403 struct ftrace_page *pg; 3404 struct ftrace_page *pg;
3404 struct dyn_ftrace *rec; 3405 struct dyn_ftrace *rec;
@@ -3417,7 +3418,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3417 3418
3418 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3419 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3419 3420
3420 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3421 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3421 if (!hash) { 3422 if (!hash) {
3422 count = -ENOMEM; 3423 count = -ENOMEM;
3423 goto out; 3424 goto out;
@@ -3476,7 +3477,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3476 } while_for_each_ftrace_rec(); 3477 } while_for_each_ftrace_rec();
3477 3478
3478 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3479 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3479 if (ret < 0) 3480 if (!ret)
3481 free_ftrace_hash_rcu(old_hash);
3482 else
3480 count = ret; 3483 count = ret;
3481 3484
3482 __enable_ftrace_function_probe(); 3485 __enable_ftrace_function_probe();
@@ -3503,6 +3506,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3503 struct ftrace_func_probe *entry; 3506 struct ftrace_func_probe *entry;
3504 struct ftrace_func_probe *p; 3507 struct ftrace_func_probe *p;
3505 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3508 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3509 struct ftrace_hash *old_hash = *orig_hash;
3506 struct list_head free_list; 3510 struct list_head free_list;
3507 struct ftrace_hash *hash; 3511 struct ftrace_hash *hash;
3508 struct hlist_node *tmp; 3512 struct hlist_node *tmp;
@@ -3510,6 +3514,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3510 int type = MATCH_FULL; 3514 int type = MATCH_FULL;
3511 int i, len = 0; 3515 int i, len = 0;
3512 char *search; 3516 char *search;
3517 int ret;
3513 3518
3514 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3519 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3515 glob = NULL; 3520 glob = NULL;
@@ -3568,8 +3573,11 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3568 * Remove after the disable is called. Otherwise, if the last 3573 * Remove after the disable is called. Otherwise, if the last
3569 * probe is removed, a null hash means *all enabled*. 3574 * probe is removed, a null hash means *all enabled*.
3570 */ 3575 */
3571 ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3576 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3572 synchronize_sched(); 3577 synchronize_sched();
3578 if (!ret)
3579 free_ftrace_hash_rcu(old_hash);
3580
3573 list_for_each_entry_safe(entry, p, &free_list, free_list) { 3581 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3574 list_del(&entry->free_list); 3582 list_del(&entry->free_list);
3575 ftrace_free_entry(entry); 3583 ftrace_free_entry(entry);
@@ -3759,7 +3767,7 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3759static void ftrace_ops_update_code(struct ftrace_ops *ops) 3767static void ftrace_ops_update_code(struct ftrace_ops *ops)
3760{ 3768{
3761 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) 3769 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3762 ftrace_run_update_code(FTRACE_UPDATE_CALLS); 3770 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS);
3763} 3771}
3764 3772
3765static int 3773static int
@@ -3767,6 +3775,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3767 unsigned long ip, int remove, int reset, int enable) 3775 unsigned long ip, int remove, int reset, int enable)
3768{ 3776{
3769 struct ftrace_hash **orig_hash; 3777 struct ftrace_hash **orig_hash;
3778 struct ftrace_hash *old_hash;
3770 struct ftrace_hash *hash; 3779 struct ftrace_hash *hash;
3771 int ret; 3780 int ret;
3772 3781
@@ -3801,10 +3810,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3801 } 3810 }
3802 3811
3803 mutex_lock(&ftrace_lock); 3812 mutex_lock(&ftrace_lock);
3813 old_hash = *orig_hash;
3804 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3814 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3805 if (!ret) 3815 if (!ret) {
3806 ftrace_ops_update_code(ops); 3816 ftrace_ops_update_code(ops);
3807 3817 free_ftrace_hash_rcu(old_hash);
3818 }
3808 mutex_unlock(&ftrace_lock); 3819 mutex_unlock(&ftrace_lock);
3809 3820
3810 out_regex_unlock: 3821 out_regex_unlock:
@@ -4013,6 +4024,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
4013 struct seq_file *m = (struct seq_file *)file->private_data; 4024 struct seq_file *m = (struct seq_file *)file->private_data;
4014 struct ftrace_iterator *iter; 4025 struct ftrace_iterator *iter;
4015 struct ftrace_hash **orig_hash; 4026 struct ftrace_hash **orig_hash;
4027 struct ftrace_hash *old_hash;
4016 struct trace_parser *parser; 4028 struct trace_parser *parser;
4017 int filter_hash; 4029 int filter_hash;
4018 int ret; 4030 int ret;
@@ -4042,11 +4054,13 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
4042 orig_hash = &iter->ops->func_hash->notrace_hash; 4054 orig_hash = &iter->ops->func_hash->notrace_hash;
4043 4055
4044 mutex_lock(&ftrace_lock); 4056 mutex_lock(&ftrace_lock);
4057 old_hash = *orig_hash;
4045 ret = ftrace_hash_move(iter->ops, filter_hash, 4058 ret = ftrace_hash_move(iter->ops, filter_hash,
4046 orig_hash, iter->hash); 4059 orig_hash, iter->hash);
4047 if (!ret) 4060 if (!ret) {
4048 ftrace_ops_update_code(iter->ops); 4061 ftrace_ops_update_code(iter->ops);
4049 4062 free_ftrace_hash_rcu(old_hash);
4063 }
4050 mutex_unlock(&ftrace_lock); 4064 mutex_unlock(&ftrace_lock);
4051 } 4065 }
4052 4066
@@ -4678,6 +4692,7 @@ core_initcall(ftrace_nodyn_init);
4678 4692
4679static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } 4693static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4680static inline void ftrace_startup_enable(int command) { } 4694static inline void ftrace_startup_enable(int command) { }
4695static inline void ftrace_startup_all(int command) { }
4681/* Keep as macros so we do not need to define the commands */ 4696/* Keep as macros so we do not need to define the commands */
4682# define ftrace_startup(ops, command) \ 4697# define ftrace_startup(ops, command) \
4683 ({ \ 4698 ({ \
@@ -4827,6 +4842,56 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4827} 4842}
4828#endif 4843#endif
4829 4844
4845/*
4846 * If there's only one function registered but it does not support
4847 * recursion, this function will be called by the mcount trampoline.
4848 * This function will handle recursion protection.
4849 */
4850static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
4851 struct ftrace_ops *op, struct pt_regs *regs)
4852{
4853 int bit;
4854
4855 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4856 if (bit < 0)
4857 return;
4858
4859 op->func(ip, parent_ip, op, regs);
4860
4861 trace_clear_recursion(bit);
4862}
4863
4864/**
4865 * ftrace_ops_get_func - get the function a trampoline should call
4866 * @ops: the ops to get the function for
4867 *
4868 * Normally the mcount trampoline will call the ops->func, but there
4869 * are times that it should not. For example, if the ops does not
4870 * have its own recursion protection, then it should call the
4871 * ftrace_ops_recurs_func() instead.
4872 *
4873 * Returns the function that the trampoline should call for @ops.
4874 */
4875ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
4876{
4877 /*
4878 * If this is a dynamic ops or we force list func,
4879 * then it needs to call the list anyway.
4880 */
4881 if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
4882 return ftrace_ops_list_func;
4883
4884 /*
4885 * If the func handles its own recursion, call it directly.
4886 * Otherwise call the recursion protected function that
4887 * will call the ftrace ops function.
4888 */
4889 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
4890 return ftrace_ops_recurs_func;
4891
4892 return ops->func;
4893}
4894
4830static void clear_ftrace_swapper(void) 4895static void clear_ftrace_swapper(void)
4831{ 4896{
4832 struct task_struct *p; 4897 struct task_struct *p;
@@ -4927,7 +4992,8 @@ static int ftrace_pid_add(int p)
4927 set_ftrace_pid_task(pid); 4992 set_ftrace_pid_task(pid);
4928 4993
4929 ftrace_update_pid_func(); 4994 ftrace_update_pid_func();
4930 ftrace_startup_enable(0); 4995
4996 ftrace_startup_all(0);
4931 4997
4932 mutex_unlock(&ftrace_lock); 4998 mutex_unlock(&ftrace_lock);
4933 return 0; 4999 return 0;
@@ -4956,7 +5022,7 @@ static void ftrace_pid_reset(void)
4956 } 5022 }
4957 5023
4958 ftrace_update_pid_func(); 5024 ftrace_update_pid_func();
4959 ftrace_startup_enable(0); 5025 ftrace_startup_all(0);
4960 5026
4961 mutex_unlock(&ftrace_lock); 5027 mutex_unlock(&ftrace_lock);
4962} 5028}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 5ef60499dc8e..61a6acd6025d 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -382,6 +382,8 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
382 382
383 /* check the trace buffer */ 383 /* check the trace buffer */
384 ret = trace_test_buffer(&tr->trace_buffer, &count); 384 ret = trace_test_buffer(&tr->trace_buffer, &count);
385
386 ftrace_enabled = 1;
385 tracing_start(); 387 tracing_start();
386 388
387 /* we should only have one item */ 389 /* we should only have one item */
@@ -679,6 +681,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
679 681
680 /* check the trace buffer */ 682 /* check the trace buffer */
681 ret = trace_test_buffer(&tr->trace_buffer, &count); 683 ret = trace_test_buffer(&tr->trace_buffer, &count);
684
685 ftrace_enabled = 1;
682 trace->reset(tr); 686 trace->reset(tr);
683 tracing_start(); 687 tracing_start();
684 688
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 759d5e004517..4dc8b79c5f75 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -425,7 +425,7 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
425 return; 425 return;
426 mutex_lock(&syscall_trace_lock); 426 mutex_lock(&syscall_trace_lock);
427 tr->sys_refcount_enter--; 427 tr->sys_refcount_enter--;
428 rcu_assign_pointer(tr->enter_syscall_files[num], NULL); 428 RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
429 if (!tr->sys_refcount_enter) 429 if (!tr->sys_refcount_enter)
430 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 430 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
431 mutex_unlock(&syscall_trace_lock); 431 mutex_unlock(&syscall_trace_lock);
@@ -463,7 +463,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
463 return; 463 return;
464 mutex_lock(&syscall_trace_lock); 464 mutex_lock(&syscall_trace_lock);
465 tr->sys_refcount_exit--; 465 tr->sys_refcount_exit--;
466 rcu_assign_pointer(tr->exit_syscall_files[num], NULL); 466 RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
467 if (!tr->sys_refcount_exit) 467 if (!tr->sys_refcount_exit)
468 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 468 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
469 mutex_unlock(&syscall_trace_lock); 469 mutex_unlock(&syscall_trace_lock);