diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 267 |
1 files changed, 116 insertions, 151 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4a54a25afa2f..5b372e3ed675 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #define FTRACE_HASH_DEFAULT_BITS 10 | 62 | #define FTRACE_HASH_DEFAULT_BITS 10 |
63 | #define FTRACE_HASH_MAX_BITS 12 | 63 | #define FTRACE_HASH_MAX_BITS 12 |
64 | 64 | ||
65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) | 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) |
66 | 66 | ||
67 | #ifdef CONFIG_DYNAMIC_FTRACE | 67 | #ifdef CONFIG_DYNAMIC_FTRACE |
68 | #define INIT_REGEX_LOCK(opsname) \ | 68 | #define INIT_REGEX_LOCK(opsname) \ |
@@ -103,7 +103,6 @@ static int ftrace_disabled __read_mostly; | |||
103 | 103 | ||
104 | static DEFINE_MUTEX(ftrace_lock); | 104 | static DEFINE_MUTEX(ftrace_lock); |
105 | 105 | ||
106 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; | ||
107 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | 106 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; |
108 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 107 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
109 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 108 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
@@ -171,23 +170,6 @@ int ftrace_nr_registered_ops(void) | |||
171 | return cnt; | 170 | return cnt; |
172 | } | 171 | } |
173 | 172 | ||
174 | static void | ||
175 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, | ||
176 | struct ftrace_ops *op, struct pt_regs *regs) | ||
177 | { | ||
178 | int bit; | ||
179 | |||
180 | bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX); | ||
181 | if (bit < 0) | ||
182 | return; | ||
183 | |||
184 | do_for_each_ftrace_op(op, ftrace_global_list) { | ||
185 | op->func(ip, parent_ip, op, regs); | ||
186 | } while_for_each_ftrace_op(op); | ||
187 | |||
188 | trace_clear_recursion(bit); | ||
189 | } | ||
190 | |||
191 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, | 173 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
192 | struct ftrace_ops *op, struct pt_regs *regs) | 174 | struct ftrace_ops *op, struct pt_regs *regs) |
193 | { | 175 | { |
@@ -237,43 +219,6 @@ static int control_ops_alloc(struct ftrace_ops *ops) | |||
237 | return 0; | 219 | return 0; |
238 | } | 220 | } |
239 | 221 | ||
240 | static void update_global_ops(void) | ||
241 | { | ||
242 | ftrace_func_t func = ftrace_global_list_func; | ||
243 | void *private = NULL; | ||
244 | |||
245 | /* The list has its own recursion protection. */ | ||
246 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
247 | |||
248 | /* | ||
249 | * If there's only one function registered, then call that | ||
250 | * function directly. Otherwise, we need to iterate over the | ||
251 | * registered callers. | ||
252 | */ | ||
253 | if (ftrace_global_list == &ftrace_list_end || | ||
254 | ftrace_global_list->next == &ftrace_list_end) { | ||
255 | func = ftrace_global_list->func; | ||
256 | private = ftrace_global_list->private; | ||
257 | /* | ||
258 | * As we are calling the function directly. | ||
259 | * If it does not have recursion protection, | ||
260 | * the function_trace_op needs to be updated | ||
261 | * accordingly. | ||
262 | */ | ||
263 | if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)) | ||
264 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; | ||
265 | } | ||
266 | |||
267 | /* If we filter on pids, update to use the pid function */ | ||
268 | if (!list_empty(&ftrace_pids)) { | ||
269 | set_ftrace_pid_function(func); | ||
270 | func = ftrace_pid_func; | ||
271 | } | ||
272 | |||
273 | global_ops.func = func; | ||
274 | global_ops.private = private; | ||
275 | } | ||
276 | |||
277 | static void ftrace_sync(struct work_struct *work) | 222 | static void ftrace_sync(struct work_struct *work) |
278 | { | 223 | { |
279 | /* | 224 | /* |
@@ -301,8 +246,6 @@ static void update_ftrace_function(void) | |||
301 | { | 246 | { |
302 | ftrace_func_t func; | 247 | ftrace_func_t func; |
303 | 248 | ||
304 | update_global_ops(); | ||
305 | |||
306 | /* | 249 | /* |
307 | * If we are at the end of the list and this ops is | 250 | * If we are at the end of the list and this ops is |
308 | * recursion safe and not dynamic and the arch supports passing ops, | 251 | * recursion safe and not dynamic and the arch supports passing ops, |
@@ -314,10 +257,7 @@ static void update_ftrace_function(void) | |||
314 | (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && | 257 | (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && |
315 | !FTRACE_FORCE_LIST_FUNC)) { | 258 | !FTRACE_FORCE_LIST_FUNC)) { |
316 | /* Set the ftrace_ops that the arch callback uses */ | 259 | /* Set the ftrace_ops that the arch callback uses */ |
317 | if (ftrace_ops_list == &global_ops) | 260 | set_function_trace_op = ftrace_ops_list; |
318 | set_function_trace_op = ftrace_global_list; | ||
319 | else | ||
320 | set_function_trace_op = ftrace_ops_list; | ||
321 | func = ftrace_ops_list->func; | 261 | func = ftrace_ops_list->func; |
322 | } else { | 262 | } else { |
323 | /* Just use the default ftrace_ops */ | 263 | /* Just use the default ftrace_ops */ |
@@ -373,6 +313,11 @@ static void update_ftrace_function(void) | |||
373 | ftrace_trace_function = func; | 313 | ftrace_trace_function = func; |
374 | } | 314 | } |
375 | 315 | ||
316 | int using_ftrace_ops_list_func(void) | ||
317 | { | ||
318 | return ftrace_trace_function == ftrace_ops_list_func; | ||
319 | } | ||
320 | |||
376 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 321 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
377 | { | 322 | { |
378 | ops->next = *list; | 323 | ops->next = *list; |
@@ -434,16 +379,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
434 | if (ops->flags & FTRACE_OPS_FL_DELETED) | 379 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
435 | return -EINVAL; | 380 | return -EINVAL; |
436 | 381 | ||
437 | if (FTRACE_WARN_ON(ops == &global_ops)) | ||
438 | return -EINVAL; | ||
439 | |||
440 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) | 382 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
441 | return -EBUSY; | 383 | return -EBUSY; |
442 | 384 | ||
443 | /* We don't support both control and global flags set. */ | ||
444 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) | ||
445 | return -EINVAL; | ||
446 | |||
447 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS | 385 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
448 | /* | 386 | /* |
449 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | 387 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used |
@@ -461,10 +399,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
461 | if (!core_kernel_data((unsigned long)ops)) | 399 | if (!core_kernel_data((unsigned long)ops)) |
462 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | 400 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
463 | 401 | ||
464 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 402 | if (ops->flags & FTRACE_OPS_FL_CONTROL) { |
465 | add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops); | ||
466 | ops->flags |= FTRACE_OPS_FL_ENABLED; | ||
467 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { | ||
468 | if (control_ops_alloc(ops)) | 403 | if (control_ops_alloc(ops)) |
469 | return -ENOMEM; | 404 | return -ENOMEM; |
470 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); | 405 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); |
@@ -484,15 +419,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
484 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) | 419 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
485 | return -EBUSY; | 420 | return -EBUSY; |
486 | 421 | ||
487 | if (FTRACE_WARN_ON(ops == &global_ops)) | 422 | if (ops->flags & FTRACE_OPS_FL_CONTROL) { |
488 | return -EINVAL; | ||
489 | |||
490 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
491 | ret = remove_ftrace_list_ops(&ftrace_global_list, | ||
492 | &global_ops, ops); | ||
493 | if (!ret) | ||
494 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
495 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { | ||
496 | ret = remove_ftrace_list_ops(&ftrace_control_list, | 423 | ret = remove_ftrace_list_ops(&ftrace_control_list, |
497 | &control_ops, ops); | 424 | &control_ops, ops); |
498 | } else | 425 | } else |
@@ -895,7 +822,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip, | |||
895 | 822 | ||
896 | local_irq_save(flags); | 823 | local_irq_save(flags); |
897 | 824 | ||
898 | stat = &__get_cpu_var(ftrace_profile_stats); | 825 | stat = this_cpu_ptr(&ftrace_profile_stats); |
899 | if (!stat->hash || !ftrace_profile_enabled) | 826 | if (!stat->hash || !ftrace_profile_enabled) |
900 | goto out; | 827 | goto out; |
901 | 828 | ||
@@ -926,7 +853,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
926 | unsigned long flags; | 853 | unsigned long flags; |
927 | 854 | ||
928 | local_irq_save(flags); | 855 | local_irq_save(flags); |
929 | stat = &__get_cpu_var(ftrace_profile_stats); | 856 | stat = this_cpu_ptr(&ftrace_profile_stats); |
930 | if (!stat->hash || !ftrace_profile_enabled) | 857 | if (!stat->hash || !ftrace_profile_enabled) |
931 | goto out; | 858 | goto out; |
932 | 859 | ||
@@ -1178,7 +1105,7 @@ struct ftrace_page { | |||
1178 | static struct ftrace_page *ftrace_pages_start; | 1105 | static struct ftrace_page *ftrace_pages_start; |
1179 | static struct ftrace_page *ftrace_pages; | 1106 | static struct ftrace_page *ftrace_pages; |
1180 | 1107 | ||
1181 | static bool ftrace_hash_empty(struct ftrace_hash *hash) | 1108 | static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash) |
1182 | { | 1109 | { |
1183 | return !hash || !hash->count; | 1110 | return !hash || !hash->count; |
1184 | } | 1111 | } |
@@ -1625,7 +1552,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
1625 | in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); | 1552 | in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); |
1626 | 1553 | ||
1627 | /* | 1554 | /* |
1555 | * If filter_hash is set, we want to match all functions | ||
1556 | * that are in the hash but not in the other hash. | ||
1628 | * | 1557 | * |
1558 | * If filter_hash is not set, then we are decrementing. | ||
1559 | * That means we match anything that is in the hash | ||
1560 | * and also in the other_hash. That is, we need to turn | ||
1561 | * off functions in the other hash because they are disabled | ||
1562 | * by this hash. | ||
1629 | */ | 1563 | */ |
1630 | if (filter_hash && in_hash && !in_other_hash) | 1564 | if (filter_hash && in_hash && !in_other_hash) |
1631 | match = 1; | 1565 | match = 1; |
@@ -1767,19 +1701,15 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
1767 | /* | 1701 | /* |
1768 | * If this record is being updated from a nop, then | 1702 | * If this record is being updated from a nop, then |
1769 | * return UPDATE_MAKE_CALL. | 1703 | * return UPDATE_MAKE_CALL. |
1770 | * Otherwise, if the EN flag is set, then return | ||
1771 | * UPDATE_MODIFY_CALL_REGS to tell the caller to convert | ||
1772 | * from the non-save regs, to a save regs function. | ||
1773 | * Otherwise, | 1704 | * Otherwise, |
1774 | * return UPDATE_MODIFY_CALL to tell the caller to convert | 1705 | * return UPDATE_MODIFY_CALL to tell the caller to convert |
1775 | * from the save regs, to a non-save regs function. | 1706 | * from the save regs, to a non-save regs function or |
1707 | * vice versa. | ||
1776 | */ | 1708 | */ |
1777 | if (flag & FTRACE_FL_ENABLED) | 1709 | if (flag & FTRACE_FL_ENABLED) |
1778 | return FTRACE_UPDATE_MAKE_CALL; | 1710 | return FTRACE_UPDATE_MAKE_CALL; |
1779 | else if (rec->flags & FTRACE_FL_REGS_EN) | 1711 | |
1780 | return FTRACE_UPDATE_MODIFY_CALL_REGS; | 1712 | return FTRACE_UPDATE_MODIFY_CALL; |
1781 | else | ||
1782 | return FTRACE_UPDATE_MODIFY_CALL; | ||
1783 | } | 1713 | } |
1784 | 1714 | ||
1785 | if (update) { | 1715 | if (update) { |
@@ -1821,6 +1751,42 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) | |||
1821 | return ftrace_check_record(rec, enable, 0); | 1751 | return ftrace_check_record(rec, enable, 0); |
1822 | } | 1752 | } |
1823 | 1753 | ||
1754 | /** | ||
1755 | * ftrace_get_addr_new - Get the call address to set to | ||
1756 | * @rec: The ftrace record descriptor | ||
1757 | * | ||
1758 | * If the record has the FTRACE_FL_REGS set, that means that it | ||
1759 | * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS | ||
1760 | * is not not set, then it wants to convert to the normal callback. | ||
1761 | * | ||
1762 | * Returns the address of the trampoline to set to | ||
1763 | */ | ||
1764 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | ||
1765 | { | ||
1766 | if (rec->flags & FTRACE_FL_REGS) | ||
1767 | return (unsigned long)FTRACE_REGS_ADDR; | ||
1768 | else | ||
1769 | return (unsigned long)FTRACE_ADDR; | ||
1770 | } | ||
1771 | |||
1772 | /** | ||
1773 | * ftrace_get_addr_curr - Get the call address that is already there | ||
1774 | * @rec: The ftrace record descriptor | ||
1775 | * | ||
1776 | * The FTRACE_FL_REGS_EN is set when the record already points to | ||
1777 | * a function that saves all the regs. Basically the '_EN' version | ||
1778 | * represents the current state of the function. | ||
1779 | * | ||
1780 | * Returns the address of the trampoline that is currently being called | ||
1781 | */ | ||
1782 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) | ||
1783 | { | ||
1784 | if (rec->flags & FTRACE_FL_REGS_EN) | ||
1785 | return (unsigned long)FTRACE_REGS_ADDR; | ||
1786 | else | ||
1787 | return (unsigned long)FTRACE_ADDR; | ||
1788 | } | ||
1789 | |||
1824 | static int | 1790 | static int |
1825 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | 1791 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
1826 | { | 1792 | { |
@@ -1828,12 +1794,12 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
1828 | unsigned long ftrace_addr; | 1794 | unsigned long ftrace_addr; |
1829 | int ret; | 1795 | int ret; |
1830 | 1796 | ||
1831 | ret = ftrace_update_record(rec, enable); | 1797 | ftrace_addr = ftrace_get_addr_new(rec); |
1832 | 1798 | ||
1833 | if (rec->flags & FTRACE_FL_REGS) | 1799 | /* This needs to be done before we call ftrace_update_record */ |
1834 | ftrace_addr = (unsigned long)FTRACE_REGS_ADDR; | 1800 | ftrace_old_addr = ftrace_get_addr_curr(rec); |
1835 | else | 1801 | |
1836 | ftrace_addr = (unsigned long)FTRACE_ADDR; | 1802 | ret = ftrace_update_record(rec, enable); |
1837 | 1803 | ||
1838 | switch (ret) { | 1804 | switch (ret) { |
1839 | case FTRACE_UPDATE_IGNORE: | 1805 | case FTRACE_UPDATE_IGNORE: |
@@ -1845,13 +1811,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
1845 | case FTRACE_UPDATE_MAKE_NOP: | 1811 | case FTRACE_UPDATE_MAKE_NOP: |
1846 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 1812 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
1847 | 1813 | ||
1848 | case FTRACE_UPDATE_MODIFY_CALL_REGS: | ||
1849 | case FTRACE_UPDATE_MODIFY_CALL: | 1814 | case FTRACE_UPDATE_MODIFY_CALL: |
1850 | if (rec->flags & FTRACE_FL_REGS) | ||
1851 | ftrace_old_addr = (unsigned long)FTRACE_ADDR; | ||
1852 | else | ||
1853 | ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR; | ||
1854 | |||
1855 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | 1815 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
1856 | } | 1816 | } |
1857 | 1817 | ||
@@ -2115,7 +2075,6 @@ static void ftrace_startup_enable(int command) | |||
2115 | 2075 | ||
2116 | static int ftrace_startup(struct ftrace_ops *ops, int command) | 2076 | static int ftrace_startup(struct ftrace_ops *ops, int command) |
2117 | { | 2077 | { |
2118 | bool hash_enable = true; | ||
2119 | int ret; | 2078 | int ret; |
2120 | 2079 | ||
2121 | if (unlikely(ftrace_disabled)) | 2080 | if (unlikely(ftrace_disabled)) |
@@ -2128,18 +2087,9 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) | |||
2128 | ftrace_start_up++; | 2087 | ftrace_start_up++; |
2129 | command |= FTRACE_UPDATE_CALLS; | 2088 | command |= FTRACE_UPDATE_CALLS; |
2130 | 2089 | ||
2131 | /* ops marked global share the filter hashes */ | ||
2132 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
2133 | ops = &global_ops; | ||
2134 | /* Don't update hash if global is already set */ | ||
2135 | if (global_start_up) | ||
2136 | hash_enable = false; | ||
2137 | global_start_up++; | ||
2138 | } | ||
2139 | |||
2140 | ops->flags |= FTRACE_OPS_FL_ENABLED; | 2090 | ops->flags |= FTRACE_OPS_FL_ENABLED; |
2141 | if (hash_enable) | 2091 | |
2142 | ftrace_hash_rec_enable(ops, 1); | 2092 | ftrace_hash_rec_enable(ops, 1); |
2143 | 2093 | ||
2144 | ftrace_startup_enable(command); | 2094 | ftrace_startup_enable(command); |
2145 | 2095 | ||
@@ -2148,7 +2098,6 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) | |||
2148 | 2098 | ||
2149 | static int ftrace_shutdown(struct ftrace_ops *ops, int command) | 2099 | static int ftrace_shutdown(struct ftrace_ops *ops, int command) |
2150 | { | 2100 | { |
2151 | bool hash_disable = true; | ||
2152 | int ret; | 2101 | int ret; |
2153 | 2102 | ||
2154 | if (unlikely(ftrace_disabled)) | 2103 | if (unlikely(ftrace_disabled)) |
@@ -2166,21 +2115,9 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2166 | */ | 2115 | */ |
2167 | WARN_ON_ONCE(ftrace_start_up < 0); | 2116 | WARN_ON_ONCE(ftrace_start_up < 0); |
2168 | 2117 | ||
2169 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | 2118 | ftrace_hash_rec_disable(ops, 1); |
2170 | ops = &global_ops; | ||
2171 | global_start_up--; | ||
2172 | WARN_ON_ONCE(global_start_up < 0); | ||
2173 | /* Don't update hash if global still has users */ | ||
2174 | if (global_start_up) { | ||
2175 | WARN_ON_ONCE(!ftrace_start_up); | ||
2176 | hash_disable = false; | ||
2177 | } | ||
2178 | } | ||
2179 | |||
2180 | if (hash_disable) | ||
2181 | ftrace_hash_rec_disable(ops, 1); | ||
2182 | 2119 | ||
2183 | if (ops != &global_ops || !global_start_up) | 2120 | if (!global_start_up) |
2184 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | 2121 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
2185 | 2122 | ||
2186 | command |= FTRACE_UPDATE_CALLS; | 2123 | command |= FTRACE_UPDATE_CALLS; |
@@ -3524,10 +3461,6 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3524 | struct ftrace_hash *hash; | 3461 | struct ftrace_hash *hash; |
3525 | int ret; | 3462 | int ret; |
3526 | 3463 | ||
3527 | /* All global ops uses the global ops filters */ | ||
3528 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) | ||
3529 | ops = &global_ops; | ||
3530 | |||
3531 | if (unlikely(ftrace_disabled)) | 3464 | if (unlikely(ftrace_disabled)) |
3532 | return -ENODEV; | 3465 | return -ENODEV; |
3533 | 3466 | ||
@@ -3639,8 +3572,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | |||
3639 | } | 3572 | } |
3640 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | 3573 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); |
3641 | /** | 3574 | /** |
3642 | * ftrace_set_filter - set a function to filter on in ftrace | 3575 | * ftrace_set_global_filter - set a function to filter on with global tracers |
3643 | * @ops - the ops to set the filter with | ||
3644 | * @buf - the string that holds the function filter text. | 3576 | * @buf - the string that holds the function filter text. |
3645 | * @len - the length of the string. | 3577 | * @len - the length of the string. |
3646 | * @reset - non zero to reset all filters before applying this filter. | 3578 | * @reset - non zero to reset all filters before applying this filter. |
@@ -3655,8 +3587,7 @@ void ftrace_set_global_filter(unsigned char *buf, int len, int reset) | |||
3655 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); | 3587 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); |
3656 | 3588 | ||
3657 | /** | 3589 | /** |
3658 | * ftrace_set_notrace - set a function to not trace in ftrace | 3590 | * ftrace_set_global_notrace - set a function to not trace with global tracers |
3659 | * @ops - the ops to set the notrace filter with | ||
3660 | * @buf - the string that holds the function notrace text. | 3591 | * @buf - the string that holds the function notrace text. |
3661 | * @len - the length of the string. | 3592 | * @len - the length of the string. |
3662 | * @reset - non zero to reset all filters before applying this filter. | 3593 | * @reset - non zero to reset all filters before applying this filter. |
@@ -4443,6 +4374,34 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
4443 | 4374 | ||
4444 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 4375 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
4445 | 4376 | ||
4377 | __init void ftrace_init_global_array_ops(struct trace_array *tr) | ||
4378 | { | ||
4379 | tr->ops = &global_ops; | ||
4380 | tr->ops->private = tr; | ||
4381 | } | ||
4382 | |||
4383 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) | ||
4384 | { | ||
4385 | /* If we filter on pids, update to use the pid function */ | ||
4386 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | ||
4387 | if (WARN_ON(tr->ops->func != ftrace_stub)) | ||
4388 | printk("ftrace ops had %pS for function\n", | ||
4389 | tr->ops->func); | ||
4390 | /* Only the top level instance does pid tracing */ | ||
4391 | if (!list_empty(&ftrace_pids)) { | ||
4392 | set_ftrace_pid_function(func); | ||
4393 | func = ftrace_pid_func; | ||
4394 | } | ||
4395 | } | ||
4396 | tr->ops->func = func; | ||
4397 | tr->ops->private = tr; | ||
4398 | } | ||
4399 | |||
4400 | void ftrace_reset_array_ops(struct trace_array *tr) | ||
4401 | { | ||
4402 | tr->ops->func = ftrace_stub; | ||
4403 | } | ||
4404 | |||
4446 | static void | 4405 | static void |
4447 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | 4406 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, |
4448 | struct ftrace_ops *op, struct pt_regs *regs) | 4407 | struct ftrace_ops *op, struct pt_regs *regs) |
@@ -4501,9 +4460,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4501 | */ | 4460 | */ |
4502 | preempt_disable_notrace(); | 4461 | preempt_disable_notrace(); |
4503 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 4462 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4504 | if (ftrace_ops_test(op, ip, regs)) | 4463 | if (ftrace_ops_test(op, ip, regs)) { |
4464 | if (WARN_ON(!op->func)) { | ||
4465 | function_trace_stop = 1; | ||
4466 | printk("op=%p %pS\n", op, op); | ||
4467 | goto out; | ||
4468 | } | ||
4505 | op->func(ip, parent_ip, op, regs); | 4469 | op->func(ip, parent_ip, op, regs); |
4470 | } | ||
4506 | } while_for_each_ftrace_op(op); | 4471 | } while_for_each_ftrace_op(op); |
4472 | out: | ||
4507 | preempt_enable_notrace(); | 4473 | preempt_enable_notrace(); |
4508 | trace_clear_recursion(bit); | 4474 | trace_clear_recursion(bit); |
4509 | } | 4475 | } |
@@ -4908,7 +4874,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
4908 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 4874 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
4909 | 4875 | ||
4910 | static int ftrace_graph_active; | 4876 | static int ftrace_graph_active; |
4911 | static struct notifier_block ftrace_suspend_notifier; | ||
4912 | 4877 | ||
4913 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 4878 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
4914 | { | 4879 | { |
@@ -5054,13 +5019,6 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | |||
5054 | return NOTIFY_DONE; | 5019 | return NOTIFY_DONE; |
5055 | } | 5020 | } |
5056 | 5021 | ||
5057 | /* Just a place holder for function graph */ | ||
5058 | static struct ftrace_ops fgraph_ops __read_mostly = { | ||
5059 | .func = ftrace_stub, | ||
5060 | .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL | | ||
5061 | FTRACE_OPS_FL_RECURSION_SAFE, | ||
5062 | }; | ||
5063 | |||
5064 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | 5022 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) |
5065 | { | 5023 | { |
5066 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) | 5024 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) |
@@ -5085,6 +5043,10 @@ static void update_function_graph_func(void) | |||
5085 | ftrace_graph_entry = ftrace_graph_entry_test; | 5043 | ftrace_graph_entry = ftrace_graph_entry_test; |
5086 | } | 5044 | } |
5087 | 5045 | ||
5046 | static struct notifier_block ftrace_suspend_notifier = { | ||
5047 | .notifier_call = ftrace_suspend_notifier_call, | ||
5048 | }; | ||
5049 | |||
5088 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 5050 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
5089 | trace_func_graph_ent_t entryfunc) | 5051 | trace_func_graph_ent_t entryfunc) |
5090 | { | 5052 | { |
@@ -5098,7 +5060,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
5098 | goto out; | 5060 | goto out; |
5099 | } | 5061 | } |
5100 | 5062 | ||
5101 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | ||
5102 | register_pm_notifier(&ftrace_suspend_notifier); | 5063 | register_pm_notifier(&ftrace_suspend_notifier); |
5103 | 5064 | ||
5104 | ftrace_graph_active++; | 5065 | ftrace_graph_active++; |
@@ -5120,7 +5081,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
5120 | ftrace_graph_entry = ftrace_graph_entry_test; | 5081 | ftrace_graph_entry = ftrace_graph_entry_test; |
5121 | update_function_graph_func(); | 5082 | update_function_graph_func(); |
5122 | 5083 | ||
5123 | ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); | 5084 | /* Function graph doesn't use the .func field of global_ops */ |
5085 | global_ops.flags |= FTRACE_OPS_FL_STUB; | ||
5086 | |||
5087 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | ||
5124 | 5088 | ||
5125 | out: | 5089 | out: |
5126 | mutex_unlock(&ftrace_lock); | 5090 | mutex_unlock(&ftrace_lock); |
@@ -5138,7 +5102,8 @@ void unregister_ftrace_graph(void) | |||
5138 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 5102 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
5139 | ftrace_graph_entry = ftrace_graph_entry_stub; | 5103 | ftrace_graph_entry = ftrace_graph_entry_stub; |
5140 | __ftrace_graph_entry = ftrace_graph_entry_stub; | 5104 | __ftrace_graph_entry = ftrace_graph_entry_stub; |
5141 | ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); | 5105 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); |
5106 | global_ops.flags &= ~FTRACE_OPS_FL_STUB; | ||
5142 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5107 | unregister_pm_notifier(&ftrace_suspend_notifier); |
5143 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5108 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
5144 | 5109 | ||