diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 322 |
1 files changed, 244 insertions, 78 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b4f20fba09fc..9dcf15d38380 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -64,12 +64,20 @@ | |||
64 | 64 | ||
65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) | 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) |
66 | 66 | ||
67 | static struct ftrace_ops ftrace_list_end __read_mostly = { | ||
68 | .func = ftrace_stub, | ||
69 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
70 | }; | ||
71 | |||
67 | /* ftrace_enabled is a method to turn ftrace on or off */ | 72 | /* ftrace_enabled is a method to turn ftrace on or off */ |
68 | int ftrace_enabled __read_mostly; | 73 | int ftrace_enabled __read_mostly; |
69 | static int last_ftrace_enabled; | 74 | static int last_ftrace_enabled; |
70 | 75 | ||
71 | /* Quick disabling of function tracer. */ | 76 | /* Quick disabling of function tracer. */ |
72 | int function_trace_stop; | 77 | int function_trace_stop __read_mostly; |
78 | |||
79 | /* Current function tracing op */ | ||
80 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | ||
73 | 81 | ||
74 | /* List for set_ftrace_pid's pids. */ | 82 | /* List for set_ftrace_pid's pids. */ |
75 | LIST_HEAD(ftrace_pids); | 83 | LIST_HEAD(ftrace_pids); |
@@ -86,22 +94,43 @@ static int ftrace_disabled __read_mostly; | |||
86 | 94 | ||
87 | static DEFINE_MUTEX(ftrace_lock); | 95 | static DEFINE_MUTEX(ftrace_lock); |
88 | 96 | ||
89 | static struct ftrace_ops ftrace_list_end __read_mostly = { | ||
90 | .func = ftrace_stub, | ||
91 | }; | ||
92 | |||
93 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; | 97 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; |
94 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | 98 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; |
95 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 99 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
96 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 100 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
97 | static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; | ||
98 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | ||
99 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 101 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
100 | static struct ftrace_ops global_ops; | 102 | static struct ftrace_ops global_ops; |
101 | static struct ftrace_ops control_ops; | 103 | static struct ftrace_ops control_ops; |
102 | 104 | ||
103 | static void | 105 | #if ARCH_SUPPORTS_FTRACE_OPS |
104 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | 106 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
107 | struct ftrace_ops *op, struct pt_regs *regs); | ||
108 | #else | ||
109 | /* See comment below, where ftrace_ops_list_func is defined */ | ||
110 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | ||
111 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) | ||
112 | #endif | ||
113 | |||
114 | /** | ||
115 | * ftrace_nr_registered_ops - return number of ops registered | ||
116 | * | ||
117 | * Returns the number of ftrace_ops registered and tracing functions | ||
118 | */ | ||
119 | int ftrace_nr_registered_ops(void) | ||
120 | { | ||
121 | struct ftrace_ops *ops; | ||
122 | int cnt = 0; | ||
123 | |||
124 | mutex_lock(&ftrace_lock); | ||
125 | |||
126 | for (ops = ftrace_ops_list; | ||
127 | ops != &ftrace_list_end; ops = ops->next) | ||
128 | cnt++; | ||
129 | |||
130 | mutex_unlock(&ftrace_lock); | ||
131 | |||
132 | return cnt; | ||
133 | } | ||
105 | 134 | ||
106 | /* | 135 | /* |
107 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | 136 | * Traverse the ftrace_global_list, invoking all entries. The reason that we |
@@ -112,29 +141,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | |||
112 | * | 141 | * |
113 | * Silly Alpha and silly pointer-speculation compiler optimizations! | 142 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
114 | */ | 143 | */ |
115 | static void ftrace_global_list_func(unsigned long ip, | 144 | static void |
116 | unsigned long parent_ip) | 145 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, |
146 | struct ftrace_ops *op, struct pt_regs *regs) | ||
117 | { | 147 | { |
118 | struct ftrace_ops *op; | ||
119 | |||
120 | if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) | 148 | if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) |
121 | return; | 149 | return; |
122 | 150 | ||
123 | trace_recursion_set(TRACE_GLOBAL_BIT); | 151 | trace_recursion_set(TRACE_GLOBAL_BIT); |
124 | op = rcu_dereference_raw(ftrace_global_list); /*see above*/ | 152 | op = rcu_dereference_raw(ftrace_global_list); /*see above*/ |
125 | while (op != &ftrace_list_end) { | 153 | while (op != &ftrace_list_end) { |
126 | op->func(ip, parent_ip); | 154 | op->func(ip, parent_ip, op, regs); |
127 | op = rcu_dereference_raw(op->next); /*see above*/ | 155 | op = rcu_dereference_raw(op->next); /*see above*/ |
128 | }; | 156 | }; |
129 | trace_recursion_clear(TRACE_GLOBAL_BIT); | 157 | trace_recursion_clear(TRACE_GLOBAL_BIT); |
130 | } | 158 | } |
131 | 159 | ||
132 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | 160 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
161 | struct ftrace_ops *op, struct pt_regs *regs) | ||
133 | { | 162 | { |
134 | if (!test_tsk_trace_trace(current)) | 163 | if (!test_tsk_trace_trace(current)) |
135 | return; | 164 | return; |
136 | 165 | ||
137 | ftrace_pid_function(ip, parent_ip); | 166 | ftrace_pid_function(ip, parent_ip, op, regs); |
138 | } | 167 | } |
139 | 168 | ||
140 | static void set_ftrace_pid_function(ftrace_func_t func) | 169 | static void set_ftrace_pid_function(ftrace_func_t func) |
@@ -153,25 +182,9 @@ static void set_ftrace_pid_function(ftrace_func_t func) | |||
153 | void clear_ftrace_function(void) | 182 | void clear_ftrace_function(void) |
154 | { | 183 | { |
155 | ftrace_trace_function = ftrace_stub; | 184 | ftrace_trace_function = ftrace_stub; |
156 | __ftrace_trace_function = ftrace_stub; | ||
157 | __ftrace_trace_function_delay = ftrace_stub; | ||
158 | ftrace_pid_function = ftrace_stub; | 185 | ftrace_pid_function = ftrace_stub; |
159 | } | 186 | } |
160 | 187 | ||
161 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
162 | /* | ||
163 | * For those archs that do not test ftrace_trace_stop in their | ||
164 | * mcount call site, we need to do it from C. | ||
165 | */ | ||
166 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | ||
167 | { | ||
168 | if (function_trace_stop) | ||
169 | return; | ||
170 | |||
171 | __ftrace_trace_function(ip, parent_ip); | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | static void control_ops_disable_all(struct ftrace_ops *ops) | 188 | static void control_ops_disable_all(struct ftrace_ops *ops) |
176 | { | 189 | { |
177 | int cpu; | 190 | int cpu; |
@@ -230,28 +243,27 @@ static void update_ftrace_function(void) | |||
230 | 243 | ||
231 | /* | 244 | /* |
232 | * If we are at the end of the list and this ops is | 245 | * If we are at the end of the list and this ops is |
233 | * not dynamic, then have the mcount trampoline call | 246 | * recursion safe and not dynamic and the arch supports passing ops, |
234 | * the function directly | 247 | * then have the mcount trampoline call the function directly. |
235 | */ | 248 | */ |
236 | if (ftrace_ops_list == &ftrace_list_end || | 249 | if (ftrace_ops_list == &ftrace_list_end || |
237 | (ftrace_ops_list->next == &ftrace_list_end && | 250 | (ftrace_ops_list->next == &ftrace_list_end && |
238 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) | 251 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && |
252 | (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && | ||
253 | !FTRACE_FORCE_LIST_FUNC)) { | ||
254 | /* Set the ftrace_ops that the arch callback uses */ | ||
255 | if (ftrace_ops_list == &global_ops) | ||
256 | function_trace_op = ftrace_global_list; | ||
257 | else | ||
258 | function_trace_op = ftrace_ops_list; | ||
239 | func = ftrace_ops_list->func; | 259 | func = ftrace_ops_list->func; |
240 | else | 260 | } else { |
261 | /* Just use the default ftrace_ops */ | ||
262 | function_trace_op = &ftrace_list_end; | ||
241 | func = ftrace_ops_list_func; | 263 | func = ftrace_ops_list_func; |
264 | } | ||
242 | 265 | ||
243 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
244 | ftrace_trace_function = func; | 266 | ftrace_trace_function = func; |
245 | #else | ||
246 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
247 | /* do not update till all functions have been modified */ | ||
248 | __ftrace_trace_function_delay = func; | ||
249 | #else | ||
250 | __ftrace_trace_function = func; | ||
251 | #endif | ||
252 | ftrace_trace_function = | ||
253 | (func == ftrace_stub) ? func : ftrace_test_stop_func; | ||
254 | #endif | ||
255 | } | 267 | } |
256 | 268 | ||
257 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 269 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
@@ -325,6 +337,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
325 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) | 337 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) |
326 | return -EINVAL; | 338 | return -EINVAL; |
327 | 339 | ||
340 | #ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS | ||
341 | /* | ||
342 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | ||
343 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. | ||
344 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. | ||
345 | */ | ||
346 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && | ||
347 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) | ||
348 | return -EINVAL; | ||
349 | |||
350 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) | ||
351 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; | ||
352 | #endif | ||
353 | |||
328 | if (!core_kernel_data((unsigned long)ops)) | 354 | if (!core_kernel_data((unsigned long)ops)) |
329 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | 355 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
330 | 356 | ||
@@ -773,7 +799,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) | |||
773 | } | 799 | } |
774 | 800 | ||
775 | static void | 801 | static void |
776 | function_profile_call(unsigned long ip, unsigned long parent_ip) | 802 | function_profile_call(unsigned long ip, unsigned long parent_ip, |
803 | struct ftrace_ops *ops, struct pt_regs *regs) | ||
777 | { | 804 | { |
778 | struct ftrace_profile_stat *stat; | 805 | struct ftrace_profile_stat *stat; |
779 | struct ftrace_profile *rec; | 806 | struct ftrace_profile *rec; |
@@ -803,7 +830,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip) | |||
803 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 830 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
804 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | 831 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
805 | { | 832 | { |
806 | function_profile_call(trace->func, 0); | 833 | function_profile_call(trace->func, 0, NULL, NULL); |
807 | return 1; | 834 | return 1; |
808 | } | 835 | } |
809 | 836 | ||
@@ -863,6 +890,7 @@ static void unregister_ftrace_profiler(void) | |||
863 | #else | 890 | #else |
864 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | 891 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
865 | .func = function_profile_call, | 892 | .func = function_profile_call, |
893 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
866 | }; | 894 | }; |
867 | 895 | ||
868 | static int register_ftrace_profiler(void) | 896 | static int register_ftrace_profiler(void) |
@@ -1045,6 +1073,7 @@ static struct ftrace_ops global_ops = { | |||
1045 | .func = ftrace_stub, | 1073 | .func = ftrace_stub, |
1046 | .notrace_hash = EMPTY_HASH, | 1074 | .notrace_hash = EMPTY_HASH, |
1047 | .filter_hash = EMPTY_HASH, | 1075 | .filter_hash = EMPTY_HASH, |
1076 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
1048 | }; | 1077 | }; |
1049 | 1078 | ||
1050 | static DEFINE_MUTEX(ftrace_regex_lock); | 1079 | static DEFINE_MUTEX(ftrace_regex_lock); |
@@ -1525,6 +1554,12 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
1525 | rec->flags++; | 1554 | rec->flags++; |
1526 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) | 1555 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) |
1527 | return; | 1556 | return; |
1557 | /* | ||
1558 | * If any ops wants regs saved for this function | ||
1559 | * then all ops will get saved regs. | ||
1560 | */ | ||
1561 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) | ||
1562 | rec->flags |= FTRACE_FL_REGS; | ||
1528 | } else { | 1563 | } else { |
1529 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) | 1564 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) |
1530 | return; | 1565 | return; |
@@ -1616,18 +1651,59 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
1616 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) | 1651 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) |
1617 | flag = FTRACE_FL_ENABLED; | 1652 | flag = FTRACE_FL_ENABLED; |
1618 | 1653 | ||
1654 | /* | ||
1655 | * If enabling and the REGS flag does not match the REGS_EN, then | ||
1656 | * do not ignore this record. Set flags to fail the compare against | ||
1657 | * ENABLED. | ||
1658 | */ | ||
1659 | if (flag && | ||
1660 | (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN))) | ||
1661 | flag |= FTRACE_FL_REGS; | ||
1662 | |||
1619 | /* If the state of this record hasn't changed, then do nothing */ | 1663 | /* If the state of this record hasn't changed, then do nothing */ |
1620 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | 1664 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
1621 | return FTRACE_UPDATE_IGNORE; | 1665 | return FTRACE_UPDATE_IGNORE; |
1622 | 1666 | ||
1623 | if (flag) { | 1667 | if (flag) { |
1624 | if (update) | 1668 | /* Save off if rec is being enabled (for return value) */ |
1669 | flag ^= rec->flags & FTRACE_FL_ENABLED; | ||
1670 | |||
1671 | if (update) { | ||
1625 | rec->flags |= FTRACE_FL_ENABLED; | 1672 | rec->flags |= FTRACE_FL_ENABLED; |
1626 | return FTRACE_UPDATE_MAKE_CALL; | 1673 | if (flag & FTRACE_FL_REGS) { |
1674 | if (rec->flags & FTRACE_FL_REGS) | ||
1675 | rec->flags |= FTRACE_FL_REGS_EN; | ||
1676 | else | ||
1677 | rec->flags &= ~FTRACE_FL_REGS_EN; | ||
1678 | } | ||
1679 | } | ||
1680 | |||
1681 | /* | ||
1682 | * If this record is being updated from a nop, then | ||
1683 | * return UPDATE_MAKE_CALL. | ||
1684 | * Otherwise, if the EN flag is set, then return | ||
1685 | * UPDATE_MODIFY_CALL_REGS to tell the caller to convert | ||
1686 | * from the non-save regs, to a save regs function. | ||
1687 | * Otherwise, | ||
1688 | * return UPDATE_MODIFY_CALL to tell the caller to convert | ||
1689 | * from the save regs, to a non-save regs function. | ||
1690 | */ | ||
1691 | if (flag & FTRACE_FL_ENABLED) | ||
1692 | return FTRACE_UPDATE_MAKE_CALL; | ||
1693 | else if (rec->flags & FTRACE_FL_REGS_EN) | ||
1694 | return FTRACE_UPDATE_MODIFY_CALL_REGS; | ||
1695 | else | ||
1696 | return FTRACE_UPDATE_MODIFY_CALL; | ||
1627 | } | 1697 | } |
1628 | 1698 | ||
1629 | if (update) | 1699 | if (update) { |
1630 | rec->flags &= ~FTRACE_FL_ENABLED; | 1700 | /* If there's no more users, clear all flags */ |
1701 | if (!(rec->flags & ~FTRACE_FL_MASK)) | ||
1702 | rec->flags = 0; | ||
1703 | else | ||
1704 | /* Just disable the record (keep REGS state) */ | ||
1705 | rec->flags &= ~FTRACE_FL_ENABLED; | ||
1706 | } | ||
1631 | 1707 | ||
1632 | return FTRACE_UPDATE_MAKE_NOP; | 1708 | return FTRACE_UPDATE_MAKE_NOP; |
1633 | } | 1709 | } |
@@ -1662,13 +1738,17 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) | |||
1662 | static int | 1738 | static int |
1663 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | 1739 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
1664 | { | 1740 | { |
1741 | unsigned long ftrace_old_addr; | ||
1665 | unsigned long ftrace_addr; | 1742 | unsigned long ftrace_addr; |
1666 | int ret; | 1743 | int ret; |
1667 | 1744 | ||
1668 | ftrace_addr = (unsigned long)FTRACE_ADDR; | ||
1669 | |||
1670 | ret = ftrace_update_record(rec, enable); | 1745 | ret = ftrace_update_record(rec, enable); |
1671 | 1746 | ||
1747 | if (rec->flags & FTRACE_FL_REGS) | ||
1748 | ftrace_addr = (unsigned long)FTRACE_REGS_ADDR; | ||
1749 | else | ||
1750 | ftrace_addr = (unsigned long)FTRACE_ADDR; | ||
1751 | |||
1672 | switch (ret) { | 1752 | switch (ret) { |
1673 | case FTRACE_UPDATE_IGNORE: | 1753 | case FTRACE_UPDATE_IGNORE: |
1674 | return 0; | 1754 | return 0; |
@@ -1678,6 +1758,15 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
1678 | 1758 | ||
1679 | case FTRACE_UPDATE_MAKE_NOP: | 1759 | case FTRACE_UPDATE_MAKE_NOP: |
1680 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 1760 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
1761 | |||
1762 | case FTRACE_UPDATE_MODIFY_CALL_REGS: | ||
1763 | case FTRACE_UPDATE_MODIFY_CALL: | ||
1764 | if (rec->flags & FTRACE_FL_REGS) | ||
1765 | ftrace_old_addr = (unsigned long)FTRACE_ADDR; | ||
1766 | else | ||
1767 | ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR; | ||
1768 | |||
1769 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | ||
1681 | } | 1770 | } |
1682 | 1771 | ||
1683 | return -1; /* unknow ftrace bug */ | 1772 | return -1; /* unknow ftrace bug */ |
@@ -1882,16 +1971,6 @@ static void ftrace_run_update_code(int command) | |||
1882 | */ | 1971 | */ |
1883 | arch_ftrace_update_code(command); | 1972 | arch_ftrace_update_code(command); |
1884 | 1973 | ||
1885 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
1886 | /* | ||
1887 | * For archs that call ftrace_test_stop_func(), we must | ||
1888 | * wait till after we update all the function callers | ||
1889 | * before we update the callback. This keeps different | ||
1890 | * ops that record different functions from corrupting | ||
1891 | * each other. | ||
1892 | */ | ||
1893 | __ftrace_trace_function = __ftrace_trace_function_delay; | ||
1894 | #endif | ||
1895 | function_trace_stop--; | 1974 | function_trace_stop--; |
1896 | 1975 | ||
1897 | ret = ftrace_arch_code_modify_post_process(); | 1976 | ret = ftrace_arch_code_modify_post_process(); |
@@ -2441,8 +2520,9 @@ static int t_show(struct seq_file *m, void *v) | |||
2441 | 2520 | ||
2442 | seq_printf(m, "%ps", (void *)rec->ip); | 2521 | seq_printf(m, "%ps", (void *)rec->ip); |
2443 | if (iter->flags & FTRACE_ITER_ENABLED) | 2522 | if (iter->flags & FTRACE_ITER_ENABLED) |
2444 | seq_printf(m, " (%ld)", | 2523 | seq_printf(m, " (%ld)%s", |
2445 | rec->flags & ~FTRACE_FL_MASK); | 2524 | rec->flags & ~FTRACE_FL_MASK, |
2525 | rec->flags & FTRACE_FL_REGS ? " R" : ""); | ||
2446 | seq_printf(m, "\n"); | 2526 | seq_printf(m, "\n"); |
2447 | 2527 | ||
2448 | return 0; | 2528 | return 0; |
@@ -2790,8 +2870,8 @@ static int __init ftrace_mod_cmd_init(void) | |||
2790 | } | 2870 | } |
2791 | device_initcall(ftrace_mod_cmd_init); | 2871 | device_initcall(ftrace_mod_cmd_init); |
2792 | 2872 | ||
2793 | static void | 2873 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
2794 | function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | 2874 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
2795 | { | 2875 | { |
2796 | struct ftrace_func_probe *entry; | 2876 | struct ftrace_func_probe *entry; |
2797 | struct hlist_head *hhd; | 2877 | struct hlist_head *hhd; |
@@ -3162,8 +3242,27 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, | |||
3162 | } | 3242 | } |
3163 | 3243 | ||
3164 | static int | 3244 | static int |
3165 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | 3245 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) |
3166 | int reset, int enable) | 3246 | { |
3247 | struct ftrace_func_entry *entry; | ||
3248 | |||
3249 | if (!ftrace_location(ip)) | ||
3250 | return -EINVAL; | ||
3251 | |||
3252 | if (remove) { | ||
3253 | entry = ftrace_lookup_ip(hash, ip); | ||
3254 | if (!entry) | ||
3255 | return -ENOENT; | ||
3256 | free_hash_entry(hash, entry); | ||
3257 | return 0; | ||
3258 | } | ||
3259 | |||
3260 | return add_hash_entry(hash, ip); | ||
3261 | } | ||
3262 | |||
3263 | static int | ||
3264 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | ||
3265 | unsigned long ip, int remove, int reset, int enable) | ||
3167 | { | 3266 | { |
3168 | struct ftrace_hash **orig_hash; | 3267 | struct ftrace_hash **orig_hash; |
3169 | struct ftrace_hash *hash; | 3268 | struct ftrace_hash *hash; |
@@ -3192,6 +3291,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3192 | ret = -EINVAL; | 3291 | ret = -EINVAL; |
3193 | goto out_regex_unlock; | 3292 | goto out_regex_unlock; |
3194 | } | 3293 | } |
3294 | if (ip) { | ||
3295 | ret = ftrace_match_addr(hash, ip, remove); | ||
3296 | if (ret < 0) | ||
3297 | goto out_regex_unlock; | ||
3298 | } | ||
3195 | 3299 | ||
3196 | mutex_lock(&ftrace_lock); | 3300 | mutex_lock(&ftrace_lock); |
3197 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 3301 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
@@ -3208,6 +3312,37 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3208 | return ret; | 3312 | return ret; |
3209 | } | 3313 | } |
3210 | 3314 | ||
3315 | static int | ||
3316 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, | ||
3317 | int reset, int enable) | ||
3318 | { | ||
3319 | return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); | ||
3320 | } | ||
3321 | |||
3322 | /** | ||
3323 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address | ||
3324 | * @ops - the ops to set the filter with | ||
3325 | * @ip - the address to add to or remove from the filter. | ||
3326 | * @remove - non zero to remove the ip from the filter | ||
3327 | * @reset - non zero to reset all filters before applying this filter. | ||
3328 | * | ||
3329 | * Filters denote which functions should be enabled when tracing is enabled | ||
3330 | * If @ip is NULL, it failes to update filter. | ||
3331 | */ | ||
3332 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, | ||
3333 | int remove, int reset) | ||
3334 | { | ||
3335 | return ftrace_set_addr(ops, ip, remove, reset, 1); | ||
3336 | } | ||
3337 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); | ||
3338 | |||
3339 | static int | ||
3340 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | ||
3341 | int reset, int enable) | ||
3342 | { | ||
3343 | return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); | ||
3344 | } | ||
3345 | |||
3211 | /** | 3346 | /** |
3212 | * ftrace_set_filter - set a function to filter on in ftrace | 3347 | * ftrace_set_filter - set a function to filter on in ftrace |
3213 | * @ops - the ops to set the filter with | 3348 | * @ops - the ops to set the filter with |
@@ -3912,6 +4047,7 @@ void __init ftrace_init(void) | |||
3912 | 4047 | ||
3913 | static struct ftrace_ops global_ops = { | 4048 | static struct ftrace_ops global_ops = { |
3914 | .func = ftrace_stub, | 4049 | .func = ftrace_stub, |
4050 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
3915 | }; | 4051 | }; |
3916 | 4052 | ||
3917 | static int __init ftrace_nodyn_init(void) | 4053 | static int __init ftrace_nodyn_init(void) |
@@ -3942,10 +4078,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | |||
3942 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 4078 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
3943 | 4079 | ||
3944 | static void | 4080 | static void |
3945 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) | 4081 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, |
4082 | struct ftrace_ops *op, struct pt_regs *regs) | ||
3946 | { | 4083 | { |
3947 | struct ftrace_ops *op; | ||
3948 | |||
3949 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) | 4084 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) |
3950 | return; | 4085 | return; |
3951 | 4086 | ||
@@ -3959,7 +4094,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) | |||
3959 | while (op != &ftrace_list_end) { | 4094 | while (op != &ftrace_list_end) { |
3960 | if (!ftrace_function_local_disabled(op) && | 4095 | if (!ftrace_function_local_disabled(op) && |
3961 | ftrace_ops_test(op, ip)) | 4096 | ftrace_ops_test(op, ip)) |
3962 | op->func(ip, parent_ip); | 4097 | op->func(ip, parent_ip, op, regs); |
3963 | 4098 | ||
3964 | op = rcu_dereference_raw(op->next); | 4099 | op = rcu_dereference_raw(op->next); |
3965 | }; | 4100 | }; |
@@ -3969,13 +4104,18 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) | |||
3969 | 4104 | ||
3970 | static struct ftrace_ops control_ops = { | 4105 | static struct ftrace_ops control_ops = { |
3971 | .func = ftrace_ops_control_func, | 4106 | .func = ftrace_ops_control_func, |
4107 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
3972 | }; | 4108 | }; |
3973 | 4109 | ||
3974 | static void | 4110 | static inline void |
3975 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | 4111 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
4112 | struct ftrace_ops *ignored, struct pt_regs *regs) | ||
3976 | { | 4113 | { |
3977 | struct ftrace_ops *op; | 4114 | struct ftrace_ops *op; |
3978 | 4115 | ||
4116 | if (function_trace_stop) | ||
4117 | return; | ||
4118 | |||
3979 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) | 4119 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) |
3980 | return; | 4120 | return; |
3981 | 4121 | ||
@@ -3988,13 +4128,39 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | |||
3988 | op = rcu_dereference_raw(ftrace_ops_list); | 4128 | op = rcu_dereference_raw(ftrace_ops_list); |
3989 | while (op != &ftrace_list_end) { | 4129 | while (op != &ftrace_list_end) { |
3990 | if (ftrace_ops_test(op, ip)) | 4130 | if (ftrace_ops_test(op, ip)) |
3991 | op->func(ip, parent_ip); | 4131 | op->func(ip, parent_ip, op, regs); |
3992 | op = rcu_dereference_raw(op->next); | 4132 | op = rcu_dereference_raw(op->next); |
3993 | }; | 4133 | }; |
3994 | preempt_enable_notrace(); | 4134 | preempt_enable_notrace(); |
3995 | trace_recursion_clear(TRACE_INTERNAL_BIT); | 4135 | trace_recursion_clear(TRACE_INTERNAL_BIT); |
3996 | } | 4136 | } |
3997 | 4137 | ||
4138 | /* | ||
4139 | * Some archs only support passing ip and parent_ip. Even though | ||
4140 | * the list function ignores the op parameter, we do not want any | ||
4141 | * C side effects, where a function is called without the caller | ||
4142 | * sending a third parameter. | ||
4143 | * Archs are to support both the regs and ftrace_ops at the same time. | ||
4144 | * If they support ftrace_ops, it is assumed they support regs. | ||
4145 | * If call backs want to use regs, they must either check for regs | ||
4146 | * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS. | ||
4147 | * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved. | ||
4148 | * An architecture can pass partial regs with ftrace_ops and still | ||
4149 | * set the ARCH_SUPPORT_FTARCE_OPS. | ||
4150 | */ | ||
4151 | #if ARCH_SUPPORTS_FTRACE_OPS | ||
4152 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | ||
4153 | struct ftrace_ops *op, struct pt_regs *regs) | ||
4154 | { | ||
4155 | __ftrace_ops_list_func(ip, parent_ip, NULL, regs); | ||
4156 | } | ||
4157 | #else | ||
4158 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | ||
4159 | { | ||
4160 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); | ||
4161 | } | ||
4162 | #endif | ||
4163 | |||
3998 | static void clear_ftrace_swapper(void) | 4164 | static void clear_ftrace_swapper(void) |
3999 | { | 4165 | { |
4000 | struct task_struct *p; | 4166 | struct task_struct *p; |