diff options
| -rw-r--r-- | include/linux/ftrace.h | 14 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 246 |
2 files changed, 168 insertions, 92 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6bb5e3f2a3b4..f0b0edbf55a9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -102,6 +102,15 @@ enum { | |||
| 102 | FTRACE_OPS_FL_DELETED = 1 << 8, | 102 | FTRACE_OPS_FL_DELETED = 1 << 8, |
| 103 | }; | 103 | }; |
| 104 | 104 | ||
| 105 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 106 | /* The hash used to know what functions callbacks trace */ | ||
| 107 | struct ftrace_ops_hash { | ||
| 108 | struct ftrace_hash *notrace_hash; | ||
| 109 | struct ftrace_hash *filter_hash; | ||
| 110 | struct mutex regex_lock; | ||
| 111 | }; | ||
| 112 | #endif | ||
| 113 | |||
| 105 | /* | 114 | /* |
| 106 | * Note, ftrace_ops can be referenced outside of RCU protection. | 115 | * Note, ftrace_ops can be referenced outside of RCU protection. |
| 107 | * (Although, for perf, the control ops prevent that). If ftrace_ops is | 116 | * (Although, for perf, the control ops prevent that). If ftrace_ops is |
| @@ -121,10 +130,9 @@ struct ftrace_ops { | |||
| 121 | int __percpu *disabled; | 130 | int __percpu *disabled; |
| 122 | #ifdef CONFIG_DYNAMIC_FTRACE | 131 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 123 | int nr_trampolines; | 132 | int nr_trampolines; |
| 124 | struct ftrace_hash *notrace_hash; | 133 | struct ftrace_ops_hash local_hash; |
| 125 | struct ftrace_hash *filter_hash; | 134 | struct ftrace_ops_hash *func_hash; |
| 126 | struct ftrace_hash *tramp_hash; | 135 | struct ftrace_hash *tramp_hash; |
| 127 | struct mutex regex_lock; | ||
| 128 | unsigned long trampoline; | 136 | unsigned long trampoline; |
| 129 | #endif | 137 | #endif |
| 130 | }; | 138 | }; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1654b12c891a..5916a8e59e87 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -65,15 +65,21 @@ | |||
| 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) | 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_DYNAMIC_FTRACE | 67 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 68 | #define INIT_REGEX_LOCK(opsname) \ | 68 | #define INIT_OPS_HASH(opsname) \ |
| 69 | .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), | 69 | .func_hash = &opsname.local_hash, \ |
| 70 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | ||
| 71 | #define ASSIGN_OPS_HASH(opsname, val) \ | ||
| 72 | .func_hash = val, \ | ||
| 73 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | ||
| 70 | #else | 74 | #else |
| 71 | #define INIT_REGEX_LOCK(opsname) | 75 | #define INIT_OPS_HASH(opsname) |
| 76 | #define ASSIGN_OPS_HASH(opsname, val) | ||
| 72 | #endif | 77 | #endif |
| 73 | 78 | ||
| 74 | static struct ftrace_ops ftrace_list_end __read_mostly = { | 79 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
| 75 | .func = ftrace_stub, | 80 | .func = ftrace_stub, |
| 76 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, | 81 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
| 82 | INIT_OPS_HASH(ftrace_list_end) | ||
| 77 | }; | 83 | }; |
| 78 | 84 | ||
| 79 | /* ftrace_enabled is a method to turn ftrace on or off */ | 85 | /* ftrace_enabled is a method to turn ftrace on or off */ |
| @@ -140,7 +146,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops) | |||
| 140 | { | 146 | { |
| 141 | #ifdef CONFIG_DYNAMIC_FTRACE | 147 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 142 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { | 148 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { |
| 143 | mutex_init(&ops->regex_lock); | 149 | mutex_init(&ops->local_hash.regex_lock); |
| 150 | ops->func_hash = &ops->local_hash; | ||
| 144 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; | 151 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
| 145 | } | 152 | } |
| 146 | #endif | 153 | #endif |
| @@ -899,7 +906,7 @@ static void unregister_ftrace_profiler(void) | |||
| 899 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | 906 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
| 900 | .func = function_profile_call, | 907 | .func = function_profile_call, |
| 901 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 908 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 902 | INIT_REGEX_LOCK(ftrace_profile_ops) | 909 | INIT_OPS_HASH(ftrace_profile_ops) |
| 903 | }; | 910 | }; |
| 904 | 911 | ||
| 905 | static int register_ftrace_profiler(void) | 912 | static int register_ftrace_profiler(void) |
| @@ -1081,11 +1088,12 @@ static const struct ftrace_hash empty_hash = { | |||
| 1081 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | 1088 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
| 1082 | 1089 | ||
| 1083 | static struct ftrace_ops global_ops = { | 1090 | static struct ftrace_ops global_ops = { |
| 1084 | .func = ftrace_stub, | 1091 | .func = ftrace_stub, |
| 1085 | .notrace_hash = EMPTY_HASH, | 1092 | .local_hash.notrace_hash = EMPTY_HASH, |
| 1086 | .filter_hash = EMPTY_HASH, | 1093 | .local_hash.filter_hash = EMPTY_HASH, |
| 1087 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 1094 | INIT_OPS_HASH(global_ops) |
| 1088 | INIT_REGEX_LOCK(global_ops) | 1095 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
| 1096 | FTRACE_OPS_FL_INITIALIZED, | ||
| 1089 | }; | 1097 | }; |
| 1090 | 1098 | ||
| 1091 | struct ftrace_page { | 1099 | struct ftrace_page { |
| @@ -1226,8 +1234,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |||
| 1226 | void ftrace_free_filter(struct ftrace_ops *ops) | 1234 | void ftrace_free_filter(struct ftrace_ops *ops) |
| 1227 | { | 1235 | { |
| 1228 | ftrace_ops_init(ops); | 1236 | ftrace_ops_init(ops); |
| 1229 | free_ftrace_hash(ops->filter_hash); | 1237 | free_ftrace_hash(ops->func_hash->filter_hash); |
| 1230 | free_ftrace_hash(ops->notrace_hash); | 1238 | free_ftrace_hash(ops->func_hash->notrace_hash); |
| 1231 | } | 1239 | } |
| 1232 | 1240 | ||
| 1233 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | 1241 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
| @@ -1288,9 +1296,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |||
| 1288 | } | 1296 | } |
| 1289 | 1297 | ||
| 1290 | static void | 1298 | static void |
| 1291 | ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); | 1299 | ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); |
| 1292 | static void | 1300 | static void |
| 1293 | ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); | 1301 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
| 1294 | 1302 | ||
| 1295 | static int | 1303 | static int |
| 1296 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | 1304 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
| @@ -1342,13 +1350,13 @@ update: | |||
| 1342 | * Remove the current set, update the hash and add | 1350 | * Remove the current set, update the hash and add |
| 1343 | * them back. | 1351 | * them back. |
| 1344 | */ | 1352 | */ |
| 1345 | ftrace_hash_rec_disable(ops, enable); | 1353 | ftrace_hash_rec_disable_modify(ops, enable); |
| 1346 | 1354 | ||
| 1347 | old_hash = *dst; | 1355 | old_hash = *dst; |
| 1348 | rcu_assign_pointer(*dst, new_hash); | 1356 | rcu_assign_pointer(*dst, new_hash); |
| 1349 | free_ftrace_hash_rcu(old_hash); | 1357 | free_ftrace_hash_rcu(old_hash); |
| 1350 | 1358 | ||
| 1351 | ftrace_hash_rec_enable(ops, enable); | 1359 | ftrace_hash_rec_enable_modify(ops, enable); |
| 1352 | 1360 | ||
| 1353 | return 0; | 1361 | return 0; |
| 1354 | } | 1362 | } |
| @@ -1382,8 +1390,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
| 1382 | return 0; | 1390 | return 0; |
| 1383 | #endif | 1391 | #endif |
| 1384 | 1392 | ||
| 1385 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); | 1393 | filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); |
| 1386 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); | 1394 | notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); |
| 1387 | 1395 | ||
| 1388 | if ((ftrace_hash_empty(filter_hash) || | 1396 | if ((ftrace_hash_empty(filter_hash) || |
| 1389 | ftrace_lookup_ip(filter_hash, ip)) && | 1397 | ftrace_lookup_ip(filter_hash, ip)) && |
| @@ -1503,25 +1511,38 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | |||
| 1503 | static void ftrace_remove_tramp(struct ftrace_ops *ops, | 1511 | static void ftrace_remove_tramp(struct ftrace_ops *ops, |
| 1504 | struct dyn_ftrace *rec) | 1512 | struct dyn_ftrace *rec) |
| 1505 | { | 1513 | { |
| 1506 | struct ftrace_func_entry *entry; | 1514 | /* If TRAMP is not set, no ops should have a trampoline for this */ |
| 1507 | 1515 | if (!(rec->flags & FTRACE_FL_TRAMP)) | |
| 1508 | entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip); | ||
| 1509 | if (!entry) | ||
| 1510 | return; | 1516 | return; |
| 1511 | 1517 | ||
| 1518 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
| 1519 | |||
| 1520 | if ((!ftrace_hash_empty(ops->func_hash->filter_hash) && | ||
| 1521 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) || | ||
| 1522 | ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) | ||
| 1523 | return; | ||
| 1512 | /* | 1524 | /* |
| 1513 | * The tramp_hash entry will be removed at time | 1525 | * The tramp_hash entry will be removed at time |
| 1514 | * of update. | 1526 | * of update. |
| 1515 | */ | 1527 | */ |
| 1516 | ops->nr_trampolines--; | 1528 | ops->nr_trampolines--; |
| 1517 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
| 1518 | } | 1529 | } |
| 1519 | 1530 | ||
| 1520 | static void ftrace_clear_tramps(struct dyn_ftrace *rec) | 1531 | static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops) |
| 1521 | { | 1532 | { |
| 1522 | struct ftrace_ops *op; | 1533 | struct ftrace_ops *op; |
| 1523 | 1534 | ||
| 1535 | /* If TRAMP is not set, no ops should have a trampoline for this */ | ||
| 1536 | if (!(rec->flags & FTRACE_FL_TRAMP)) | ||
| 1537 | return; | ||
| 1538 | |||
| 1524 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 1539 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
| 1540 | /* | ||
| 1541 | * This function is called to clear other tramps | ||
| 1542 | * not the one that is being updated. | ||
| 1543 | */ | ||
| 1544 | if (op == ops) | ||
| 1545 | continue; | ||
| 1525 | if (op->nr_trampolines) | 1546 | if (op->nr_trampolines) |
| 1526 | ftrace_remove_tramp(op, rec); | 1547 | ftrace_remove_tramp(op, rec); |
| 1527 | } while_for_each_ftrace_op(op); | 1548 | } while_for_each_ftrace_op(op); |
| @@ -1554,14 +1575,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1554 | * gets inversed. | 1575 | * gets inversed. |
| 1555 | */ | 1576 | */ |
| 1556 | if (filter_hash) { | 1577 | if (filter_hash) { |
| 1557 | hash = ops->filter_hash; | 1578 | hash = ops->func_hash->filter_hash; |
| 1558 | other_hash = ops->notrace_hash; | 1579 | other_hash = ops->func_hash->notrace_hash; |
| 1559 | if (ftrace_hash_empty(hash)) | 1580 | if (ftrace_hash_empty(hash)) |
| 1560 | all = 1; | 1581 | all = 1; |
| 1561 | } else { | 1582 | } else { |
| 1562 | inc = !inc; | 1583 | inc = !inc; |
| 1563 | hash = ops->notrace_hash; | 1584 | hash = ops->func_hash->notrace_hash; |
| 1564 | other_hash = ops->filter_hash; | 1585 | other_hash = ops->func_hash->filter_hash; |
| 1565 | /* | 1586 | /* |
| 1566 | * If the notrace hash has no items, | 1587 | * If the notrace hash has no items, |
| 1567 | * then there's nothing to do. | 1588 | * then there's nothing to do. |
| @@ -1622,13 +1643,10 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1622 | /* | 1643 | /* |
| 1623 | * If we are adding another function callback | 1644 | * If we are adding another function callback |
| 1624 | * to this function, and the previous had a | 1645 | * to this function, and the previous had a |
| 1625 | * trampoline used, then we need to go back to | 1646 | * custom trampoline in use, then we need to go |
| 1626 | * the default trampoline. | 1647 | * back to the default trampoline. |
| 1627 | */ | 1648 | */ |
| 1628 | rec->flags &= ~FTRACE_FL_TRAMP; | 1649 | ftrace_clear_tramps(rec, ops); |
| 1629 | |||
| 1630 | /* remove trampolines from any ops for this rec */ | ||
| 1631 | ftrace_clear_tramps(rec); | ||
| 1632 | } | 1650 | } |
| 1633 | 1651 | ||
| 1634 | /* | 1652 | /* |
| @@ -1682,6 +1700,41 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | |||
| 1682 | __ftrace_hash_rec_update(ops, filter_hash, 1); | 1700 | __ftrace_hash_rec_update(ops, filter_hash, 1); |
| 1683 | } | 1701 | } |
| 1684 | 1702 | ||
| 1703 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, | ||
| 1704 | int filter_hash, int inc) | ||
| 1705 | { | ||
| 1706 | struct ftrace_ops *op; | ||
| 1707 | |||
| 1708 | __ftrace_hash_rec_update(ops, filter_hash, inc); | ||
| 1709 | |||
| 1710 | if (ops->func_hash != &global_ops.local_hash) | ||
| 1711 | return; | ||
| 1712 | |||
| 1713 | /* | ||
| 1714 | * If the ops shares the global_ops hash, then we need to update | ||
| 1715 | * all ops that are enabled and use this hash. | ||
| 1716 | */ | ||
| 1717 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 1718 | /* Already done */ | ||
| 1719 | if (op == ops) | ||
| 1720 | continue; | ||
| 1721 | if (op->func_hash == &global_ops.local_hash) | ||
| 1722 | __ftrace_hash_rec_update(op, filter_hash, inc); | ||
| 1723 | } while_for_each_ftrace_op(op); | ||
| 1724 | } | ||
| 1725 | |||
| 1726 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, | ||
| 1727 | int filter_hash) | ||
| 1728 | { | ||
| 1729 | ftrace_hash_rec_update_modify(ops, filter_hash, 0); | ||
| 1730 | } | ||
| 1731 | |||
| 1732 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, | ||
| 1733 | int filter_hash) | ||
| 1734 | { | ||
| 1735 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); | ||
| 1736 | } | ||
| 1737 | |||
| 1685 | static void print_ip_ins(const char *fmt, unsigned char *p) | 1738 | static void print_ip_ins(const char *fmt, unsigned char *p) |
| 1686 | { | 1739 | { |
| 1687 | int i; | 1740 | int i; |
| @@ -1896,8 +1949,8 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | |||
| 1896 | if (rec->flags & FTRACE_FL_TRAMP) { | 1949 | if (rec->flags & FTRACE_FL_TRAMP) { |
| 1897 | ops = ftrace_find_tramp_ops_new(rec); | 1950 | ops = ftrace_find_tramp_ops_new(rec); |
| 1898 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { | 1951 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { |
| 1899 | pr_warning("Bad trampoline accounting at: %p (%pS)\n", | 1952 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", |
| 1900 | (void *)rec->ip, (void *)rec->ip); | 1953 | (void *)rec->ip, (void *)rec->ip, rec->flags); |
| 1901 | /* Ftrace is shutting down, return anything */ | 1954 | /* Ftrace is shutting down, return anything */ |
| 1902 | return (unsigned long)FTRACE_ADDR; | 1955 | return (unsigned long)FTRACE_ADDR; |
| 1903 | } | 1956 | } |
| @@ -1964,7 +2017,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
| 1964 | return ftrace_make_call(rec, ftrace_addr); | 2017 | return ftrace_make_call(rec, ftrace_addr); |
| 1965 | 2018 | ||
| 1966 | case FTRACE_UPDATE_MAKE_NOP: | 2019 | case FTRACE_UPDATE_MAKE_NOP: |
| 1967 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 2020 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); |
| 1968 | 2021 | ||
| 1969 | case FTRACE_UPDATE_MODIFY_CALL: | 2022 | case FTRACE_UPDATE_MODIFY_CALL: |
| 1970 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | 2023 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
| @@ -2227,7 +2280,10 @@ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops) | |||
| 2227 | } while_for_each_ftrace_rec(); | 2280 | } while_for_each_ftrace_rec(); |
| 2228 | 2281 | ||
| 2229 | /* The number of recs in the hash must match nr_trampolines */ | 2282 | /* The number of recs in the hash must match nr_trampolines */ |
| 2230 | FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines); | 2283 | if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines)) |
| 2284 | pr_warn("count=%ld trampolines=%d\n", | ||
| 2285 | ops->tramp_hash->count, | ||
| 2286 | ops->nr_trampolines); | ||
| 2231 | 2287 | ||
| 2232 | return 0; | 2288 | return 0; |
| 2233 | } | 2289 | } |
| @@ -2436,8 +2492,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops) | |||
| 2436 | * Filter_hash being empty will default to trace module. | 2492 | * Filter_hash being empty will default to trace module. |
| 2437 | * But notrace hash requires a test of individual module functions. | 2493 | * But notrace hash requires a test of individual module functions. |
| 2438 | */ | 2494 | */ |
| 2439 | return ftrace_hash_empty(ops->filter_hash) && | 2495 | return ftrace_hash_empty(ops->func_hash->filter_hash) && |
| 2440 | ftrace_hash_empty(ops->notrace_hash); | 2496 | ftrace_hash_empty(ops->func_hash->notrace_hash); |
| 2441 | } | 2497 | } |
| 2442 | 2498 | ||
| 2443 | /* | 2499 | /* |
| @@ -2459,12 +2515,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |||
| 2459 | return 0; | 2515 | return 0; |
| 2460 | 2516 | ||
| 2461 | /* The function must be in the filter */ | 2517 | /* The function must be in the filter */ |
| 2462 | if (!ftrace_hash_empty(ops->filter_hash) && | 2518 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && |
| 2463 | !ftrace_lookup_ip(ops->filter_hash, rec->ip)) | 2519 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) |
| 2464 | return 0; | 2520 | return 0; |
| 2465 | 2521 | ||
| 2466 | /* If in notrace hash, we ignore it too */ | 2522 | /* If in notrace hash, we ignore it too */ |
| 2467 | if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) | 2523 | if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) |
| 2468 | return 0; | 2524 | return 0; |
| 2469 | 2525 | ||
| 2470 | return 1; | 2526 | return 1; |
| @@ -2785,10 +2841,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 2785 | } else { | 2841 | } else { |
| 2786 | rec = &iter->pg->records[iter->idx++]; | 2842 | rec = &iter->pg->records[iter->idx++]; |
| 2787 | if (((iter->flags & FTRACE_ITER_FILTER) && | 2843 | if (((iter->flags & FTRACE_ITER_FILTER) && |
| 2788 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || | 2844 | !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || |
| 2789 | 2845 | ||
| 2790 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 2846 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
| 2791 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || | 2847 | !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) || |
| 2792 | 2848 | ||
| 2793 | ((iter->flags & FTRACE_ITER_ENABLED) && | 2849 | ((iter->flags & FTRACE_ITER_ENABLED) && |
| 2794 | !(rec->flags & FTRACE_FL_ENABLED))) { | 2850 | !(rec->flags & FTRACE_FL_ENABLED))) { |
| @@ -2837,9 +2893,9 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 2837 | * functions are enabled. | 2893 | * functions are enabled. |
| 2838 | */ | 2894 | */ |
| 2839 | if ((iter->flags & FTRACE_ITER_FILTER && | 2895 | if ((iter->flags & FTRACE_ITER_FILTER && |
| 2840 | ftrace_hash_empty(ops->filter_hash)) || | 2896 | ftrace_hash_empty(ops->func_hash->filter_hash)) || |
| 2841 | (iter->flags & FTRACE_ITER_NOTRACE && | 2897 | (iter->flags & FTRACE_ITER_NOTRACE && |
| 2842 | ftrace_hash_empty(ops->notrace_hash))) { | 2898 | ftrace_hash_empty(ops->func_hash->notrace_hash))) { |
| 2843 | if (*pos > 0) | 2899 | if (*pos > 0) |
| 2844 | return t_hash_start(m, pos); | 2900 | return t_hash_start(m, pos); |
| 2845 | iter->flags |= FTRACE_ITER_PRINTALL; | 2901 | iter->flags |= FTRACE_ITER_PRINTALL; |
| @@ -3001,12 +3057,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 3001 | iter->ops = ops; | 3057 | iter->ops = ops; |
| 3002 | iter->flags = flag; | 3058 | iter->flags = flag; |
| 3003 | 3059 | ||
| 3004 | mutex_lock(&ops->regex_lock); | 3060 | mutex_lock(&ops->func_hash->regex_lock); |
| 3005 | 3061 | ||
| 3006 | if (flag & FTRACE_ITER_NOTRACE) | 3062 | if (flag & FTRACE_ITER_NOTRACE) |
| 3007 | hash = ops->notrace_hash; | 3063 | hash = ops->func_hash->notrace_hash; |
| 3008 | else | 3064 | else |
| 3009 | hash = ops->filter_hash; | 3065 | hash = ops->func_hash->filter_hash; |
| 3010 | 3066 | ||
| 3011 | if (file->f_mode & FMODE_WRITE) { | 3067 | if (file->f_mode & FMODE_WRITE) { |
| 3012 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; | 3068 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
| @@ -3041,7 +3097,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 3041 | file->private_data = iter; | 3097 | file->private_data = iter; |
| 3042 | 3098 | ||
| 3043 | out_unlock: | 3099 | out_unlock: |
| 3044 | mutex_unlock(&ops->regex_lock); | 3100 | mutex_unlock(&ops->func_hash->regex_lock); |
| 3045 | 3101 | ||
| 3046 | return ret; | 3102 | return ret; |
| 3047 | } | 3103 | } |
| @@ -3279,7 +3335,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
| 3279 | { | 3335 | { |
| 3280 | .func = function_trace_probe_call, | 3336 | .func = function_trace_probe_call, |
| 3281 | .flags = FTRACE_OPS_FL_INITIALIZED, | 3337 | .flags = FTRACE_OPS_FL_INITIALIZED, |
| 3282 | INIT_REGEX_LOCK(trace_probe_ops) | 3338 | INIT_OPS_HASH(trace_probe_ops) |
| 3283 | }; | 3339 | }; |
| 3284 | 3340 | ||
| 3285 | static int ftrace_probe_registered; | 3341 | static int ftrace_probe_registered; |
| @@ -3342,7 +3398,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3342 | void *data) | 3398 | void *data) |
| 3343 | { | 3399 | { |
| 3344 | struct ftrace_func_probe *entry; | 3400 | struct ftrace_func_probe *entry; |
| 3345 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3401 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3346 | struct ftrace_hash *hash; | 3402 | struct ftrace_hash *hash; |
| 3347 | struct ftrace_page *pg; | 3403 | struct ftrace_page *pg; |
| 3348 | struct dyn_ftrace *rec; | 3404 | struct dyn_ftrace *rec; |
| @@ -3359,7 +3415,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3359 | if (WARN_ON(not)) | 3415 | if (WARN_ON(not)) |
| 3360 | return -EINVAL; | 3416 | return -EINVAL; |
| 3361 | 3417 | ||
| 3362 | mutex_lock(&trace_probe_ops.regex_lock); | 3418 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
| 3363 | 3419 | ||
| 3364 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3420 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
| 3365 | if (!hash) { | 3421 | if (!hash) { |
| @@ -3428,7 +3484,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3428 | out_unlock: | 3484 | out_unlock: |
| 3429 | mutex_unlock(&ftrace_lock); | 3485 | mutex_unlock(&ftrace_lock); |
| 3430 | out: | 3486 | out: |
| 3431 | mutex_unlock(&trace_probe_ops.regex_lock); | 3487 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); |
| 3432 | free_ftrace_hash(hash); | 3488 | free_ftrace_hash(hash); |
| 3433 | 3489 | ||
| 3434 | return count; | 3490 | return count; |
| @@ -3446,7 +3502,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3446 | struct ftrace_func_entry *rec_entry; | 3502 | struct ftrace_func_entry *rec_entry; |
| 3447 | struct ftrace_func_probe *entry; | 3503 | struct ftrace_func_probe *entry; |
| 3448 | struct ftrace_func_probe *p; | 3504 | struct ftrace_func_probe *p; |
| 3449 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3505 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3450 | struct list_head free_list; | 3506 | struct list_head free_list; |
| 3451 | struct ftrace_hash *hash; | 3507 | struct ftrace_hash *hash; |
| 3452 | struct hlist_node *tmp; | 3508 | struct hlist_node *tmp; |
| @@ -3468,7 +3524,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3468 | return; | 3524 | return; |
| 3469 | } | 3525 | } |
| 3470 | 3526 | ||
| 3471 | mutex_lock(&trace_probe_ops.regex_lock); | 3527 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
| 3472 | 3528 | ||
| 3473 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3529 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
| 3474 | if (!hash) | 3530 | if (!hash) |
| @@ -3521,7 +3577,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3521 | mutex_unlock(&ftrace_lock); | 3577 | mutex_unlock(&ftrace_lock); |
| 3522 | 3578 | ||
| 3523 | out_unlock: | 3579 | out_unlock: |
| 3524 | mutex_unlock(&trace_probe_ops.regex_lock); | 3580 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); |
| 3525 | free_ftrace_hash(hash); | 3581 | free_ftrace_hash(hash); |
| 3526 | } | 3582 | } |
| 3527 | 3583 | ||
| @@ -3717,12 +3773,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 3717 | if (unlikely(ftrace_disabled)) | 3773 | if (unlikely(ftrace_disabled)) |
| 3718 | return -ENODEV; | 3774 | return -ENODEV; |
| 3719 | 3775 | ||
| 3720 | mutex_lock(&ops->regex_lock); | 3776 | mutex_lock(&ops->func_hash->regex_lock); |
| 3721 | 3777 | ||
| 3722 | if (enable) | 3778 | if (enable) |
| 3723 | orig_hash = &ops->filter_hash; | 3779 | orig_hash = &ops->func_hash->filter_hash; |
| 3724 | else | 3780 | else |
| 3725 | orig_hash = &ops->notrace_hash; | 3781 | orig_hash = &ops->func_hash->notrace_hash; |
| 3726 | 3782 | ||
| 3727 | if (reset) | 3783 | if (reset) |
| 3728 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | 3784 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
| @@ -3752,7 +3808,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 3752 | mutex_unlock(&ftrace_lock); | 3808 | mutex_unlock(&ftrace_lock); |
| 3753 | 3809 | ||
| 3754 | out_regex_unlock: | 3810 | out_regex_unlock: |
| 3755 | mutex_unlock(&ops->regex_lock); | 3811 | mutex_unlock(&ops->func_hash->regex_lock); |
| 3756 | 3812 | ||
| 3757 | free_ftrace_hash(hash); | 3813 | free_ftrace_hash(hash); |
| 3758 | return ret; | 3814 | return ret; |
| @@ -3975,15 +4031,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 3975 | 4031 | ||
| 3976 | trace_parser_put(parser); | 4032 | trace_parser_put(parser); |
| 3977 | 4033 | ||
| 3978 | mutex_lock(&iter->ops->regex_lock); | 4034 | mutex_lock(&iter->ops->func_hash->regex_lock); |
| 3979 | 4035 | ||
| 3980 | if (file->f_mode & FMODE_WRITE) { | 4036 | if (file->f_mode & FMODE_WRITE) { |
| 3981 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | 4037 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
| 3982 | 4038 | ||
| 3983 | if (filter_hash) | 4039 | if (filter_hash) |
| 3984 | orig_hash = &iter->ops->filter_hash; | 4040 | orig_hash = &iter->ops->func_hash->filter_hash; |
| 3985 | else | 4041 | else |
| 3986 | orig_hash = &iter->ops->notrace_hash; | 4042 | orig_hash = &iter->ops->func_hash->notrace_hash; |
| 3987 | 4043 | ||
| 3988 | mutex_lock(&ftrace_lock); | 4044 | mutex_lock(&ftrace_lock); |
| 3989 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4045 | ret = ftrace_hash_move(iter->ops, filter_hash, |
| @@ -3994,7 +4050,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 3994 | mutex_unlock(&ftrace_lock); | 4050 | mutex_unlock(&ftrace_lock); |
| 3995 | } | 4051 | } |
| 3996 | 4052 | ||
| 3997 | mutex_unlock(&iter->ops->regex_lock); | 4053 | mutex_unlock(&iter->ops->func_hash->regex_lock); |
| 3998 | free_ftrace_hash(iter->hash); | 4054 | free_ftrace_hash(iter->hash); |
| 3999 | kfree(iter); | 4055 | kfree(iter); |
| 4000 | 4056 | ||
| @@ -4611,7 +4667,6 @@ void __init ftrace_init(void) | |||
| 4611 | static struct ftrace_ops global_ops = { | 4667 | static struct ftrace_ops global_ops = { |
| 4612 | .func = ftrace_stub, | 4668 | .func = ftrace_stub, |
| 4613 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4669 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 4614 | INIT_REGEX_LOCK(global_ops) | ||
| 4615 | }; | 4670 | }; |
| 4616 | 4671 | ||
| 4617 | static int __init ftrace_nodyn_init(void) | 4672 | static int __init ftrace_nodyn_init(void) |
| @@ -4713,7 +4768,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
| 4713 | static struct ftrace_ops control_ops = { | 4768 | static struct ftrace_ops control_ops = { |
| 4714 | .func = ftrace_ops_control_func, | 4769 | .func = ftrace_ops_control_func, |
| 4715 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4770 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 4716 | INIT_REGEX_LOCK(control_ops) | 4771 | INIT_OPS_HASH(control_ops) |
| 4717 | }; | 4772 | }; |
| 4718 | 4773 | ||
| 4719 | static inline void | 4774 | static inline void |
| @@ -5145,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 5145 | 5200 | ||
| 5146 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 5201 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 5147 | 5202 | ||
| 5203 | static struct ftrace_ops graph_ops = { | ||
| 5204 | .func = ftrace_stub, | ||
| 5205 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | ||
| 5206 | FTRACE_OPS_FL_INITIALIZED | | ||
| 5207 | FTRACE_OPS_FL_STUB, | ||
| 5208 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | ||
| 5209 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | ||
| 5210 | #endif | ||
| 5211 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | ||
| 5212 | }; | ||
| 5213 | |||
| 5148 | static int ftrace_graph_active; | 5214 | static int ftrace_graph_active; |
| 5149 | 5215 | ||
| 5150 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5216 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
| @@ -5307,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | |||
| 5307 | */ | 5373 | */ |
| 5308 | static void update_function_graph_func(void) | 5374 | static void update_function_graph_func(void) |
| 5309 | { | 5375 | { |
| 5310 | if (ftrace_ops_list == &ftrace_list_end || | 5376 | struct ftrace_ops *op; |
| 5311 | (ftrace_ops_list == &global_ops && | 5377 | bool do_test = false; |
| 5312 | global_ops.next == &ftrace_list_end)) | 5378 | |
| 5313 | ftrace_graph_entry = __ftrace_graph_entry; | 5379 | /* |
| 5314 | else | 5380 | * The graph and global ops share the same set of functions |
| 5381 | * to test. If any other ops is on the list, then | ||
| 5382 | * the graph tracing needs to test if its the function | ||
| 5383 | * it should call. | ||
| 5384 | */ | ||
| 5385 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 5386 | if (op != &global_ops && op != &graph_ops && | ||
| 5387 | op != &ftrace_list_end) { | ||
| 5388 | do_test = true; | ||
| 5389 | /* in double loop, break out with goto */ | ||
| 5390 | goto out; | ||
| 5391 | } | ||
| 5392 | } while_for_each_ftrace_op(op); | ||
| 5393 | out: | ||
| 5394 | if (do_test) | ||
| 5315 | ftrace_graph_entry = ftrace_graph_entry_test; | 5395 | ftrace_graph_entry = ftrace_graph_entry_test; |
| 5396 | else | ||
| 5397 | ftrace_graph_entry = __ftrace_graph_entry; | ||
| 5316 | } | 5398 | } |
| 5317 | 5399 | ||
| 5318 | static struct notifier_block ftrace_suspend_notifier = { | 5400 | static struct notifier_block ftrace_suspend_notifier = { |
| @@ -5353,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
| 5353 | ftrace_graph_entry = ftrace_graph_entry_test; | 5435 | ftrace_graph_entry = ftrace_graph_entry_test; |
| 5354 | update_function_graph_func(); | 5436 | update_function_graph_func(); |
| 5355 | 5437 | ||
| 5356 | /* Function graph doesn't use the .func field of global_ops */ | 5438 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
| 5357 | global_ops.flags |= FTRACE_OPS_FL_STUB; | ||
| 5358 | |||
| 5359 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 5360 | /* Optimize function graph calling (if implemented by arch) */ | ||
| 5361 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
| 5362 | global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR; | ||
| 5363 | #endif | ||
| 5364 | |||
| 5365 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | ||
| 5366 | 5439 | ||
| 5367 | out: | 5440 | out: |
| 5368 | mutex_unlock(&ftrace_lock); | 5441 | mutex_unlock(&ftrace_lock); |
| @@ -5380,12 +5453,7 @@ void unregister_ftrace_graph(void) | |||
| 5380 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 5453 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 5381 | ftrace_graph_entry = ftrace_graph_entry_stub; | 5454 | ftrace_graph_entry = ftrace_graph_entry_stub; |
| 5382 | __ftrace_graph_entry = ftrace_graph_entry_stub; | 5455 | __ftrace_graph_entry = ftrace_graph_entry_stub; |
| 5383 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); | 5456 | ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); |
| 5384 | global_ops.flags &= ~FTRACE_OPS_FL_STUB; | ||
| 5385 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 5386 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
| 5387 | global_ops.trampoline = 0; | ||
| 5388 | #endif | ||
| 5389 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5457 | unregister_pm_notifier(&ftrace_suspend_notifier); |
| 5390 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5458 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
| 5391 | 5459 | ||
