diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 160 |
1 files changed, 98 insertions, 62 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3ffe4c5ad3f3..ab25b88aae56 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | |||
111 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) | 111 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | /* | ||
115 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | ||
116 | * can use rcu_dereference_raw() is that elements removed from this list | ||
117 | * are simply leaked, so there is no need to interact with a grace-period | ||
118 | * mechanism. The rcu_dereference_raw() calls are needed to handle | ||
119 | * concurrent insertions into the ftrace_global_list. | ||
120 | * | ||
121 | * Silly Alpha and silly pointer-speculation compiler optimizations! | ||
122 | */ | ||
123 | #define do_for_each_ftrace_op(op, list) \ | ||
124 | op = rcu_dereference_raw(list); \ | ||
125 | do | ||
126 | |||
127 | /* | ||
128 | * Optimized for just a single item in the list (as that is the normal case). | ||
129 | */ | ||
130 | #define while_for_each_ftrace_op(op) \ | ||
131 | while (likely(op = rcu_dereference_raw((op)->next)) && \ | ||
132 | unlikely((op) != &ftrace_list_end)) | ||
133 | |||
114 | /** | 134 | /** |
115 | * ftrace_nr_registered_ops - return number of ops registered | 135 | * ftrace_nr_registered_ops - return number of ops registered |
116 | * | 136 | * |
@@ -132,29 +152,21 @@ int ftrace_nr_registered_ops(void) | |||
132 | return cnt; | 152 | return cnt; |
133 | } | 153 | } |
134 | 154 | ||
135 | /* | ||
136 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | ||
137 | * can use rcu_dereference_raw() is that elements removed from this list | ||
138 | * are simply leaked, so there is no need to interact with a grace-period | ||
139 | * mechanism. The rcu_dereference_raw() calls are needed to handle | ||
140 | * concurrent insertions into the ftrace_global_list. | ||
141 | * | ||
142 | * Silly Alpha and silly pointer-speculation compiler optimizations! | ||
143 | */ | ||
144 | static void | 155 | static void |
145 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, | 156 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, |
146 | struct ftrace_ops *op, struct pt_regs *regs) | 157 | struct ftrace_ops *op, struct pt_regs *regs) |
147 | { | 158 | { |
148 | if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) | 159 | int bit; |
160 | |||
161 | bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX); | ||
162 | if (bit < 0) | ||
149 | return; | 163 | return; |
150 | 164 | ||
151 | trace_recursion_set(TRACE_GLOBAL_BIT); | 165 | do_for_each_ftrace_op(op, ftrace_global_list) { |
152 | op = rcu_dereference_raw(ftrace_global_list); /*see above*/ | ||
153 | while (op != &ftrace_list_end) { | ||
154 | op->func(ip, parent_ip, op, regs); | 166 | op->func(ip, parent_ip, op, regs); |
155 | op = rcu_dereference_raw(op->next); /*see above*/ | 167 | } while_for_each_ftrace_op(op); |
156 | }; | 168 | |
157 | trace_recursion_clear(TRACE_GLOBAL_BIT); | 169 | trace_clear_recursion(bit); |
158 | } | 170 | } |
159 | 171 | ||
160 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, | 172 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
@@ -221,10 +233,24 @@ static void update_global_ops(void) | |||
221 | * registered callers. | 233 | * registered callers. |
222 | */ | 234 | */ |
223 | if (ftrace_global_list == &ftrace_list_end || | 235 | if (ftrace_global_list == &ftrace_list_end || |
224 | ftrace_global_list->next == &ftrace_list_end) | 236 | ftrace_global_list->next == &ftrace_list_end) { |
225 | func = ftrace_global_list->func; | 237 | func = ftrace_global_list->func; |
226 | else | 238 | /* |
239 | * As we are calling the function directly. | ||
240 | * If it does not have recursion protection, | ||
241 | * the function_trace_op needs to be updated | ||
242 | * accordingly. | ||
243 | */ | ||
244 | if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) | ||
245 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
246 | else | ||
247 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; | ||
248 | } else { | ||
227 | func = ftrace_global_list_func; | 249 | func = ftrace_global_list_func; |
250 | /* The list has its own recursion protection. */ | ||
251 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
252 | } | ||
253 | |||
228 | 254 | ||
229 | /* If we filter on pids, update to use the pid function */ | 255 | /* If we filter on pids, update to use the pid function */ |
230 | if (!list_empty(&ftrace_pids)) { | 256 | if (!list_empty(&ftrace_pids)) { |
@@ -337,7 +363,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
337 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) | 363 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) |
338 | return -EINVAL; | 364 | return -EINVAL; |
339 | 365 | ||
340 | #ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS | 366 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
341 | /* | 367 | /* |
342 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | 368 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used |
343 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. | 369 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. |
@@ -736,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | |||
736 | { | 762 | { |
737 | struct ftrace_profile *rec; | 763 | struct ftrace_profile *rec; |
738 | struct hlist_head *hhd; | 764 | struct hlist_head *hhd; |
739 | struct hlist_node *n; | ||
740 | unsigned long key; | 765 | unsigned long key; |
741 | 766 | ||
742 | key = hash_long(ip, ftrace_profile_bits); | 767 | key = hash_long(ip, ftrace_profile_bits); |
@@ -745,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | |||
745 | if (hlist_empty(hhd)) | 770 | if (hlist_empty(hhd)) |
746 | return NULL; | 771 | return NULL; |
747 | 772 | ||
748 | hlist_for_each_entry_rcu(rec, n, hhd, node) { | 773 | hlist_for_each_entry_rcu(rec, hhd, node) { |
749 | if (rec->ip == ip) | 774 | if (rec->ip == ip) |
750 | return rec; | 775 | return rec; |
751 | } | 776 | } |
@@ -1107,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |||
1107 | unsigned long key; | 1132 | unsigned long key; |
1108 | struct ftrace_func_entry *entry; | 1133 | struct ftrace_func_entry *entry; |
1109 | struct hlist_head *hhd; | 1134 | struct hlist_head *hhd; |
1110 | struct hlist_node *n; | ||
1111 | 1135 | ||
1112 | if (ftrace_hash_empty(hash)) | 1136 | if (ftrace_hash_empty(hash)) |
1113 | return NULL; | 1137 | return NULL; |
@@ -1119,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |||
1119 | 1143 | ||
1120 | hhd = &hash->buckets[key]; | 1144 | hhd = &hash->buckets[key]; |
1121 | 1145 | ||
1122 | hlist_for_each_entry_rcu(entry, n, hhd, hlist) { | 1146 | hlist_for_each_entry_rcu(entry, hhd, hlist) { |
1123 | if (entry->ip == ip) | 1147 | if (entry->ip == ip) |
1124 | return entry; | 1148 | return entry; |
1125 | } | 1149 | } |
@@ -1176,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash, | |||
1176 | static void ftrace_hash_clear(struct ftrace_hash *hash) | 1200 | static void ftrace_hash_clear(struct ftrace_hash *hash) |
1177 | { | 1201 | { |
1178 | struct hlist_head *hhd; | 1202 | struct hlist_head *hhd; |
1179 | struct hlist_node *tp, *tn; | 1203 | struct hlist_node *tn; |
1180 | struct ftrace_func_entry *entry; | 1204 | struct ftrace_func_entry *entry; |
1181 | int size = 1 << hash->size_bits; | 1205 | int size = 1 << hash->size_bits; |
1182 | int i; | 1206 | int i; |
@@ -1186,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash) | |||
1186 | 1210 | ||
1187 | for (i = 0; i < size; i++) { | 1211 | for (i = 0; i < size; i++) { |
1188 | hhd = &hash->buckets[i]; | 1212 | hhd = &hash->buckets[i]; |
1189 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) | 1213 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) |
1190 | free_hash_entry(hash, entry); | 1214 | free_hash_entry(hash, entry); |
1191 | } | 1215 | } |
1192 | FTRACE_WARN_ON(hash->count); | 1216 | FTRACE_WARN_ON(hash->count); |
@@ -1249,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |||
1249 | { | 1273 | { |
1250 | struct ftrace_func_entry *entry; | 1274 | struct ftrace_func_entry *entry; |
1251 | struct ftrace_hash *new_hash; | 1275 | struct ftrace_hash *new_hash; |
1252 | struct hlist_node *tp; | ||
1253 | int size; | 1276 | int size; |
1254 | int ret; | 1277 | int ret; |
1255 | int i; | 1278 | int i; |
@@ -1264,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |||
1264 | 1287 | ||
1265 | size = 1 << hash->size_bits; | 1288 | size = 1 << hash->size_bits; |
1266 | for (i = 0; i < size; i++) { | 1289 | for (i = 0; i < size; i++) { |
1267 | hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { | 1290 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
1268 | ret = add_hash_entry(new_hash, entry->ip); | 1291 | ret = add_hash_entry(new_hash, entry->ip); |
1269 | if (ret < 0) | 1292 | if (ret < 0) |
1270 | goto free_hash; | 1293 | goto free_hash; |
@@ -1290,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1290 | struct ftrace_hash **dst, struct ftrace_hash *src) | 1313 | struct ftrace_hash **dst, struct ftrace_hash *src) |
1291 | { | 1314 | { |
1292 | struct ftrace_func_entry *entry; | 1315 | struct ftrace_func_entry *entry; |
1293 | struct hlist_node *tp, *tn; | 1316 | struct hlist_node *tn; |
1294 | struct hlist_head *hhd; | 1317 | struct hlist_head *hhd; |
1295 | struct ftrace_hash *old_hash; | 1318 | struct ftrace_hash *old_hash; |
1296 | struct ftrace_hash *new_hash; | 1319 | struct ftrace_hash *new_hash; |
@@ -1336,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1336 | size = 1 << src->size_bits; | 1359 | size = 1 << src->size_bits; |
1337 | for (i = 0; i < size; i++) { | 1360 | for (i = 0; i < size; i++) { |
1338 | hhd = &src->buckets[i]; | 1361 | hhd = &src->buckets[i]; |
1339 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { | 1362 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) { |
1340 | if (bits > 0) | 1363 | if (bits > 0) |
1341 | key = hash_long(entry->ip, bits); | 1364 | key = hash_long(entry->ip, bits); |
1342 | else | 1365 | else |
@@ -2875,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | |||
2875 | { | 2898 | { |
2876 | struct ftrace_func_probe *entry; | 2899 | struct ftrace_func_probe *entry; |
2877 | struct hlist_head *hhd; | 2900 | struct hlist_head *hhd; |
2878 | struct hlist_node *n; | ||
2879 | unsigned long key; | 2901 | unsigned long key; |
2880 | 2902 | ||
2881 | key = hash_long(ip, FTRACE_HASH_BITS); | 2903 | key = hash_long(ip, FTRACE_HASH_BITS); |
@@ -2891,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | |||
2891 | * on the hash. rcu_read_lock is too dangerous here. | 2913 | * on the hash. rcu_read_lock is too dangerous here. |
2892 | */ | 2914 | */ |
2893 | preempt_disable_notrace(); | 2915 | preempt_disable_notrace(); |
2894 | hlist_for_each_entry_rcu(entry, n, hhd, node) { | 2916 | hlist_for_each_entry_rcu(entry, hhd, node) { |
2895 | if (entry->ip == ip) | 2917 | if (entry->ip == ip) |
2896 | entry->ops->func(ip, parent_ip, &entry->data); | 2918 | entry->ops->func(ip, parent_ip, &entry->data); |
2897 | } | 2919 | } |
@@ -3042,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3042 | void *data, int flags) | 3064 | void *data, int flags) |
3043 | { | 3065 | { |
3044 | struct ftrace_func_probe *entry; | 3066 | struct ftrace_func_probe *entry; |
3045 | struct hlist_node *n, *tmp; | 3067 | struct hlist_node *tmp; |
3046 | char str[KSYM_SYMBOL_LEN]; | 3068 | char str[KSYM_SYMBOL_LEN]; |
3047 | int type = MATCH_FULL; | 3069 | int type = MATCH_FULL; |
3048 | int i, len = 0; | 3070 | int i, len = 0; |
@@ -3065,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3065 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 3087 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { |
3066 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 3088 | struct hlist_head *hhd = &ftrace_func_hash[i]; |
3067 | 3089 | ||
3068 | hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { | 3090 | hlist_for_each_entry_safe(entry, tmp, hhd, node) { |
3069 | 3091 | ||
3070 | /* break up if statements for readability */ | 3092 | /* break up if statements for readability */ |
3071 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) | 3093 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) |
@@ -3970,35 +3992,49 @@ static void ftrace_init_module(struct module *mod, | |||
3970 | ftrace_process_locs(mod, start, end); | 3992 | ftrace_process_locs(mod, start, end); |
3971 | } | 3993 | } |
3972 | 3994 | ||
3973 | static int ftrace_module_notify(struct notifier_block *self, | 3995 | static int ftrace_module_notify_enter(struct notifier_block *self, |
3974 | unsigned long val, void *data) | 3996 | unsigned long val, void *data) |
3975 | { | 3997 | { |
3976 | struct module *mod = data; | 3998 | struct module *mod = data; |
3977 | 3999 | ||
3978 | switch (val) { | 4000 | if (val == MODULE_STATE_COMING) |
3979 | case MODULE_STATE_COMING: | ||
3980 | ftrace_init_module(mod, mod->ftrace_callsites, | 4001 | ftrace_init_module(mod, mod->ftrace_callsites, |
3981 | mod->ftrace_callsites + | 4002 | mod->ftrace_callsites + |
3982 | mod->num_ftrace_callsites); | 4003 | mod->num_ftrace_callsites); |
3983 | break; | 4004 | return 0; |
3984 | case MODULE_STATE_GOING: | 4005 | } |
4006 | |||
4007 | static int ftrace_module_notify_exit(struct notifier_block *self, | ||
4008 | unsigned long val, void *data) | ||
4009 | { | ||
4010 | struct module *mod = data; | ||
4011 | |||
4012 | if (val == MODULE_STATE_GOING) | ||
3985 | ftrace_release_mod(mod); | 4013 | ftrace_release_mod(mod); |
3986 | break; | ||
3987 | } | ||
3988 | 4014 | ||
3989 | return 0; | 4015 | return 0; |
3990 | } | 4016 | } |
3991 | #else | 4017 | #else |
3992 | static int ftrace_module_notify(struct notifier_block *self, | 4018 | static int ftrace_module_notify_enter(struct notifier_block *self, |
3993 | unsigned long val, void *data) | 4019 | unsigned long val, void *data) |
4020 | { | ||
4021 | return 0; | ||
4022 | } | ||
4023 | static int ftrace_module_notify_exit(struct notifier_block *self, | ||
4024 | unsigned long val, void *data) | ||
3994 | { | 4025 | { |
3995 | return 0; | 4026 | return 0; |
3996 | } | 4027 | } |
3997 | #endif /* CONFIG_MODULES */ | 4028 | #endif /* CONFIG_MODULES */ |
3998 | 4029 | ||
3999 | struct notifier_block ftrace_module_nb = { | 4030 | struct notifier_block ftrace_module_enter_nb = { |
4000 | .notifier_call = ftrace_module_notify, | 4031 | .notifier_call = ftrace_module_notify_enter, |
4001 | .priority = 0, | 4032 | .priority = INT_MAX, /* Run before anything that can use kprobes */ |
4033 | }; | ||
4034 | |||
4035 | struct notifier_block ftrace_module_exit_nb = { | ||
4036 | .notifier_call = ftrace_module_notify_exit, | ||
4037 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | ||
4002 | }; | 4038 | }; |
4003 | 4039 | ||
4004 | extern unsigned long __start_mcount_loc[]; | 4040 | extern unsigned long __start_mcount_loc[]; |
@@ -4032,9 +4068,13 @@ void __init ftrace_init(void) | |||
4032 | __start_mcount_loc, | 4068 | __start_mcount_loc, |
4033 | __stop_mcount_loc); | 4069 | __stop_mcount_loc); |
4034 | 4070 | ||
4035 | ret = register_module_notifier(&ftrace_module_nb); | 4071 | ret = register_module_notifier(&ftrace_module_enter_nb); |
4036 | if (ret) | 4072 | if (ret) |
4037 | pr_warning("Failed to register trace ftrace module notifier\n"); | 4073 | pr_warning("Failed to register trace ftrace module enter notifier\n"); |
4074 | |||
4075 | ret = register_module_notifier(&ftrace_module_exit_nb); | ||
4076 | if (ret) | ||
4077 | pr_warning("Failed to register trace ftrace module exit notifier\n"); | ||
4038 | 4078 | ||
4039 | set_ftrace_early_filters(); | 4079 | set_ftrace_early_filters(); |
4040 | 4080 | ||
@@ -4090,14 +4130,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
4090 | */ | 4130 | */ |
4091 | preempt_disable_notrace(); | 4131 | preempt_disable_notrace(); |
4092 | trace_recursion_set(TRACE_CONTROL_BIT); | 4132 | trace_recursion_set(TRACE_CONTROL_BIT); |
4093 | op = rcu_dereference_raw(ftrace_control_list); | 4133 | do_for_each_ftrace_op(op, ftrace_control_list) { |
4094 | while (op != &ftrace_list_end) { | ||
4095 | if (!ftrace_function_local_disabled(op) && | 4134 | if (!ftrace_function_local_disabled(op) && |
4096 | ftrace_ops_test(op, ip)) | 4135 | ftrace_ops_test(op, ip)) |
4097 | op->func(ip, parent_ip, op, regs); | 4136 | op->func(ip, parent_ip, op, regs); |
4098 | 4137 | } while_for_each_ftrace_op(op); | |
4099 | op = rcu_dereference_raw(op->next); | ||
4100 | }; | ||
4101 | trace_recursion_clear(TRACE_CONTROL_BIT); | 4138 | trace_recursion_clear(TRACE_CONTROL_BIT); |
4102 | preempt_enable_notrace(); | 4139 | preempt_enable_notrace(); |
4103 | } | 4140 | } |
@@ -4112,27 +4149,26 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4112 | struct ftrace_ops *ignored, struct pt_regs *regs) | 4149 | struct ftrace_ops *ignored, struct pt_regs *regs) |
4113 | { | 4150 | { |
4114 | struct ftrace_ops *op; | 4151 | struct ftrace_ops *op; |
4152 | int bit; | ||
4115 | 4153 | ||
4116 | if (function_trace_stop) | 4154 | if (function_trace_stop) |
4117 | return; | 4155 | return; |
4118 | 4156 | ||
4119 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) | 4157 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); |
4158 | if (bit < 0) | ||
4120 | return; | 4159 | return; |
4121 | 4160 | ||
4122 | trace_recursion_set(TRACE_INTERNAL_BIT); | ||
4123 | /* | 4161 | /* |
4124 | * Some of the ops may be dynamically allocated, | 4162 | * Some of the ops may be dynamically allocated, |
4125 | * they must be freed after a synchronize_sched(). | 4163 | * they must be freed after a synchronize_sched(). |
4126 | */ | 4164 | */ |
4127 | preempt_disable_notrace(); | 4165 | preempt_disable_notrace(); |
4128 | op = rcu_dereference_raw(ftrace_ops_list); | 4166 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4129 | while (op != &ftrace_list_end) { | ||
4130 | if (ftrace_ops_test(op, ip)) | 4167 | if (ftrace_ops_test(op, ip)) |
4131 | op->func(ip, parent_ip, op, regs); | 4168 | op->func(ip, parent_ip, op, regs); |
4132 | op = rcu_dereference_raw(op->next); | 4169 | } while_for_each_ftrace_op(op); |
4133 | }; | ||
4134 | preempt_enable_notrace(); | 4170 | preempt_enable_notrace(); |
4135 | trace_recursion_clear(TRACE_INTERNAL_BIT); | 4171 | trace_clear_recursion(bit); |
4136 | } | 4172 | } |
4137 | 4173 | ||
4138 | /* | 4174 | /* |
@@ -4143,8 +4179,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4143 | * Archs are to support both the regs and ftrace_ops at the same time. | 4179 | * Archs are to support both the regs and ftrace_ops at the same time. |
4144 | * If they support ftrace_ops, it is assumed they support regs. | 4180 | * If they support ftrace_ops, it is assumed they support regs. |
4145 | * If call backs want to use regs, they must either check for regs | 4181 | * If call backs want to use regs, they must either check for regs |
4146 | * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS. | 4182 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. |
4147 | * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved. | 4183 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. |
4148 | * An architecture can pass partial regs with ftrace_ops and still | 4184 | * An architecture can pass partial regs with ftrace_ops and still |
4149 | * set the ARCH_SUPPORT_FTARCE_OPS. | 4185 | * set the ARCH_SUPPORT_FTARCE_OPS. |
4150 | */ | 4186 | */ |