diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-05-19 13:48:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-05-19 13:48:03 -0400 |
commit | 29510ec3b626c86de9707bb8904ff940d430289b (patch) | |
tree | 4e1f579058302cbe7274435a72c64ee54012c192 | |
parent | 398995ce7980b03b5803f8f31073b45d87746bc1 (diff) | |
parent | 95950c2ecb31314ef827428e43ff771cf3b037e5 (diff) |
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
-rw-r--r-- | include/linux/ftrace.h | 33 | ||||
-rw-r--r-- | include/linux/kernel.h | 1 | ||||
-rw-r--r-- | kernel/extable.c | 8 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 992 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 214 | ||||
-rw-r--r-- | kernel/trace/trace_selftest_dynamic.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 1 |
11 files changed, 1116 insertions, 145 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 32047449b309..9d88e1cb5dbb 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -29,9 +29,22 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
29 | 29 | ||
30 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); | 30 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); |
31 | 31 | ||
32 | struct ftrace_hash; | ||
33 | |||
34 | enum { | ||
35 | FTRACE_OPS_FL_ENABLED = 1 << 0, | ||
36 | FTRACE_OPS_FL_GLOBAL = 1 << 1, | ||
37 | FTRACE_OPS_FL_DYNAMIC = 1 << 2, | ||
38 | }; | ||
39 | |||
32 | struct ftrace_ops { | 40 | struct ftrace_ops { |
33 | ftrace_func_t func; | 41 | ftrace_func_t func; |
34 | struct ftrace_ops *next; | 42 | struct ftrace_ops *next; |
43 | unsigned long flags; | ||
44 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
45 | struct ftrace_hash *notrace_hash; | ||
46 | struct ftrace_hash *filter_hash; | ||
47 | #endif | ||
35 | }; | 48 | }; |
36 | 49 | ||
37 | extern int function_trace_stop; | 50 | extern int function_trace_stop; |
@@ -146,12 +159,13 @@ extern void unregister_ftrace_function_probe_all(char *glob); | |||
146 | extern int ftrace_text_reserved(void *start, void *end); | 159 | extern int ftrace_text_reserved(void *start, void *end); |
147 | 160 | ||
148 | enum { | 161 | enum { |
149 | FTRACE_FL_FREE = (1 << 0), | 162 | FTRACE_FL_ENABLED = (1 << 30), |
150 | FTRACE_FL_FILTER = (1 << 1), | 163 | FTRACE_FL_FREE = (1 << 31), |
151 | FTRACE_FL_ENABLED = (1 << 2), | ||
152 | FTRACE_FL_NOTRACE = (1 << 3), | ||
153 | }; | 164 | }; |
154 | 165 | ||
166 | #define FTRACE_FL_MASK (0x3UL << 30) | ||
167 | #define FTRACE_REF_MAX ((1 << 30) - 1) | ||
168 | |||
155 | struct dyn_ftrace { | 169 | struct dyn_ftrace { |
156 | union { | 170 | union { |
157 | unsigned long ip; /* address of mcount call-site */ | 171 | unsigned long ip; /* address of mcount call-site */ |
@@ -165,7 +179,12 @@ struct dyn_ftrace { | |||
165 | }; | 179 | }; |
166 | 180 | ||
167 | int ftrace_force_update(void); | 181 | int ftrace_force_update(void); |
168 | void ftrace_set_filter(unsigned char *buf, int len, int reset); | 182 | void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
183 | int len, int reset); | ||
184 | void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | ||
185 | int len, int reset); | ||
186 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | ||
187 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | ||
169 | 188 | ||
170 | int register_ftrace_command(struct ftrace_func_command *cmd); | 189 | int register_ftrace_command(struct ftrace_func_command *cmd); |
171 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | 190 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 00cec4dc0ae2..f37ba716ef8b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -283,6 +283,7 @@ extern char *get_options(const char *str, int nints, int *ints); | |||
283 | extern unsigned long long memparse(const char *ptr, char **retptr); | 283 | extern unsigned long long memparse(const char *ptr, char **retptr); |
284 | 284 | ||
285 | extern int core_kernel_text(unsigned long addr); | 285 | extern int core_kernel_text(unsigned long addr); |
286 | extern int core_kernel_data(unsigned long addr); | ||
286 | extern int __kernel_text_address(unsigned long addr); | 287 | extern int __kernel_text_address(unsigned long addr); |
287 | extern int kernel_text_address(unsigned long addr); | 288 | extern int kernel_text_address(unsigned long addr); |
288 | extern int func_ptr_is_kernel_text(void *ptr); | 289 | extern int func_ptr_is_kernel_text(void *ptr); |
diff --git a/kernel/extable.c b/kernel/extable.c index 7f8f263f8524..c2d625fcda77 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -72,6 +72,14 @@ int core_kernel_text(unsigned long addr) | |||
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | int core_kernel_data(unsigned long addr) | ||
76 | { | ||
77 | if (addr >= (unsigned long)_sdata && | ||
78 | addr < (unsigned long)_edata) | ||
79 | return 1; | ||
80 | return 0; | ||
81 | } | ||
82 | |||
75 | int __kernel_text_address(unsigned long addr) | 83 | int __kernel_text_address(unsigned long addr) |
76 | { | 84 | { |
77 | if (core_kernel_text(addr)) | 85 | if (core_kernel_text(addr)) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d3406346ced6..d017c2c82c44 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -57,6 +57,8 @@ | |||
57 | /* hash bits for specific function selection */ | 57 | /* hash bits for specific function selection */ |
58 | #define FTRACE_HASH_BITS 7 | 58 | #define FTRACE_HASH_BITS 7 |
59 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | 59 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
60 | #define FTRACE_HASH_DEFAULT_BITS 10 | ||
61 | #define FTRACE_HASH_MAX_BITS 12 | ||
60 | 62 | ||
61 | /* ftrace_enabled is a method to turn ftrace on or off */ | 63 | /* ftrace_enabled is a method to turn ftrace on or off */ |
62 | int ftrace_enabled __read_mostly; | 64 | int ftrace_enabled __read_mostly; |
@@ -85,23 +87,29 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
85 | .func = ftrace_stub, | 87 | .func = ftrace_stub, |
86 | }; | 88 | }; |
87 | 89 | ||
88 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 90 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; |
91 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | ||
89 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 92 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
90 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 93 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
91 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 94 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
95 | static struct ftrace_ops global_ops; | ||
96 | |||
97 | static void | ||
98 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | ||
92 | 99 | ||
93 | /* | 100 | /* |
94 | * Traverse the ftrace_list, invoking all entries. The reason that we | 101 | * Traverse the ftrace_global_list, invoking all entries. The reason that we |
95 | * can use rcu_dereference_raw() is that elements removed from this list | 102 | * can use rcu_dereference_raw() is that elements removed from this list |
96 | * are simply leaked, so there is no need to interact with a grace-period | 103 | * are simply leaked, so there is no need to interact with a grace-period |
97 | * mechanism. The rcu_dereference_raw() calls are needed to handle | 104 | * mechanism. The rcu_dereference_raw() calls are needed to handle |
98 | * concurrent insertions into the ftrace_list. | 105 | * concurrent insertions into the ftrace_global_list. |
99 | * | 106 | * |
100 | * Silly Alpha and silly pointer-speculation compiler optimizations! | 107 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
101 | */ | 108 | */ |
102 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 109 | static void ftrace_global_list_func(unsigned long ip, |
110 | unsigned long parent_ip) | ||
103 | { | 111 | { |
104 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ | 112 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/ |
105 | 113 | ||
106 | while (op != &ftrace_list_end) { | 114 | while (op != &ftrace_list_end) { |
107 | op->func(ip, parent_ip); | 115 | op->func(ip, parent_ip); |
@@ -151,7 +159,7 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | |||
151 | } | 159 | } |
152 | #endif | 160 | #endif |
153 | 161 | ||
154 | static void update_ftrace_function(void) | 162 | static void update_global_ops(void) |
155 | { | 163 | { |
156 | ftrace_func_t func; | 164 | ftrace_func_t func; |
157 | 165 | ||
@@ -160,17 +168,39 @@ static void update_ftrace_function(void) | |||
160 | * function directly. Otherwise, we need to iterate over the | 168 | * function directly. Otherwise, we need to iterate over the |
161 | * registered callers. | 169 | * registered callers. |
162 | */ | 170 | */ |
163 | if (ftrace_list == &ftrace_list_end || | 171 | if (ftrace_global_list == &ftrace_list_end || |
164 | ftrace_list->next == &ftrace_list_end) | 172 | ftrace_global_list->next == &ftrace_list_end) |
165 | func = ftrace_list->func; | 173 | func = ftrace_global_list->func; |
166 | else | 174 | else |
167 | func = ftrace_list_func; | 175 | func = ftrace_global_list_func; |
168 | 176 | ||
169 | /* If we filter on pids, update to use the pid function */ | 177 | /* If we filter on pids, update to use the pid function */ |
170 | if (!list_empty(&ftrace_pids)) { | 178 | if (!list_empty(&ftrace_pids)) { |
171 | set_ftrace_pid_function(func); | 179 | set_ftrace_pid_function(func); |
172 | func = ftrace_pid_func; | 180 | func = ftrace_pid_func; |
173 | } | 181 | } |
182 | |||
183 | global_ops.func = func; | ||
184 | } | ||
185 | |||
186 | static void update_ftrace_function(void) | ||
187 | { | ||
188 | ftrace_func_t func; | ||
189 | |||
190 | update_global_ops(); | ||
191 | |||
192 | /* | ||
193 | * If we are at the end of the list and this ops is | ||
194 | * not dynamic, then have the mcount trampoline call | ||
195 | * the function directly | ||
196 | */ | ||
197 | if (ftrace_ops_list == &ftrace_list_end || | ||
198 | (ftrace_ops_list->next == &ftrace_list_end && | ||
199 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) | ||
200 | func = ftrace_ops_list->func; | ||
201 | else | ||
202 | func = ftrace_ops_list_func; | ||
203 | |||
174 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 204 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
175 | ftrace_trace_function = func; | 205 | ftrace_trace_function = func; |
176 | #else | 206 | #else |
@@ -179,24 +209,19 @@ static void update_ftrace_function(void) | |||
179 | #endif | 209 | #endif |
180 | } | 210 | } |
181 | 211 | ||
182 | static int __register_ftrace_function(struct ftrace_ops *ops) | 212 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
183 | { | 213 | { |
184 | ops->next = ftrace_list; | 214 | ops->next = *list; |
185 | /* | 215 | /* |
186 | * We are entering ops into the ftrace_list but another | 216 | * We are entering ops into the list but another |
187 | * CPU might be walking that list. We need to make sure | 217 | * CPU might be walking that list. We need to make sure |
188 | * the ops->next pointer is valid before another CPU sees | 218 | * the ops->next pointer is valid before another CPU sees |
189 | * the ops pointer included into the ftrace_list. | 219 | * the ops pointer included into the list. |
190 | */ | 220 | */ |
191 | rcu_assign_pointer(ftrace_list, ops); | 221 | rcu_assign_pointer(*list, ops); |
192 | |||
193 | if (ftrace_enabled) | ||
194 | update_ftrace_function(); | ||
195 | |||
196 | return 0; | ||
197 | } | 222 | } |
198 | 223 | ||
199 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 224 | static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
200 | { | 225 | { |
201 | struct ftrace_ops **p; | 226 | struct ftrace_ops **p; |
202 | 227 | ||
@@ -204,13 +229,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
204 | * If we are removing the last function, then simply point | 229 | * If we are removing the last function, then simply point |
205 | * to the ftrace_stub. | 230 | * to the ftrace_stub. |
206 | */ | 231 | */ |
207 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | 232 | if (*list == ops && ops->next == &ftrace_list_end) { |
208 | ftrace_trace_function = ftrace_stub; | 233 | *list = &ftrace_list_end; |
209 | ftrace_list = &ftrace_list_end; | ||
210 | return 0; | 234 | return 0; |
211 | } | 235 | } |
212 | 236 | ||
213 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | 237 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) |
214 | if (*p == ops) | 238 | if (*p == ops) |
215 | break; | 239 | break; |
216 | 240 | ||
@@ -218,6 +242,31 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
218 | return -1; | 242 | return -1; |
219 | 243 | ||
220 | *p = (*p)->next; | 244 | *p = (*p)->next; |
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int __register_ftrace_function(struct ftrace_ops *ops) | ||
249 | { | ||
250 | if (ftrace_disabled) | ||
251 | return -ENODEV; | ||
252 | |||
253 | if (FTRACE_WARN_ON(ops == &global_ops)) | ||
254 | return -EINVAL; | ||
255 | |||
256 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) | ||
257 | return -EBUSY; | ||
258 | |||
259 | if (!core_kernel_data((unsigned long)ops)) | ||
260 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | ||
261 | |||
262 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
263 | int first = ftrace_global_list == &ftrace_list_end; | ||
264 | add_ftrace_ops(&ftrace_global_list, ops); | ||
265 | ops->flags |= FTRACE_OPS_FL_ENABLED; | ||
266 | if (first) | ||
267 | add_ftrace_ops(&ftrace_ops_list, &global_ops); | ||
268 | } else | ||
269 | add_ftrace_ops(&ftrace_ops_list, ops); | ||
221 | 270 | ||
222 | if (ftrace_enabled) | 271 | if (ftrace_enabled) |
223 | update_ftrace_function(); | 272 | update_ftrace_function(); |
@@ -225,6 +274,44 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
225 | return 0; | 274 | return 0; |
226 | } | 275 | } |
227 | 276 | ||
277 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | ||
278 | { | ||
279 | int ret; | ||
280 | |||
281 | if (ftrace_disabled) | ||
282 | return -ENODEV; | ||
283 | |||
284 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) | ||
285 | return -EBUSY; | ||
286 | |||
287 | if (FTRACE_WARN_ON(ops == &global_ops)) | ||
288 | return -EINVAL; | ||
289 | |||
290 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
291 | ret = remove_ftrace_ops(&ftrace_global_list, ops); | ||
292 | if (!ret && ftrace_global_list == &ftrace_list_end) | ||
293 | ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops); | ||
294 | if (!ret) | ||
295 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
296 | } else | ||
297 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | ||
298 | |||
299 | if (ret < 0) | ||
300 | return ret; | ||
301 | |||
302 | if (ftrace_enabled) | ||
303 | update_ftrace_function(); | ||
304 | |||
305 | /* | ||
306 | * Dynamic ops may be freed, we must make sure that all | ||
307 | * callers are done before leaving this function. | ||
308 | */ | ||
309 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
310 | synchronize_sched(); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
228 | static void ftrace_update_pid_func(void) | 315 | static void ftrace_update_pid_func(void) |
229 | { | 316 | { |
230 | /* Only do something if we are tracing something */ | 317 | /* Only do something if we are tracing something */ |
@@ -865,8 +952,35 @@ enum { | |||
865 | FTRACE_START_FUNC_RET = (1 << 3), | 952 | FTRACE_START_FUNC_RET = (1 << 3), |
866 | FTRACE_STOP_FUNC_RET = (1 << 4), | 953 | FTRACE_STOP_FUNC_RET = (1 << 4), |
867 | }; | 954 | }; |
955 | struct ftrace_func_entry { | ||
956 | struct hlist_node hlist; | ||
957 | unsigned long ip; | ||
958 | }; | ||
868 | 959 | ||
869 | static int ftrace_filtered; | 960 | struct ftrace_hash { |
961 | unsigned long size_bits; | ||
962 | struct hlist_head *buckets; | ||
963 | unsigned long count; | ||
964 | struct rcu_head rcu; | ||
965 | }; | ||
966 | |||
967 | /* | ||
968 | * We make these constant because no one should touch them, | ||
969 | * but they are used as the default "empty hash", to avoid allocating | ||
970 | * it all the time. These are in a read only section such that if | ||
971 | * anyone does try to modify it, it will cause an exception. | ||
972 | */ | ||
973 | static const struct hlist_head empty_buckets[1]; | ||
974 | static const struct ftrace_hash empty_hash = { | ||
975 | .buckets = (struct hlist_head *)empty_buckets, | ||
976 | }; | ||
977 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | ||
978 | |||
979 | static struct ftrace_ops global_ops = { | ||
980 | .func = ftrace_stub, | ||
981 | .notrace_hash = EMPTY_HASH, | ||
982 | .filter_hash = EMPTY_HASH, | ||
983 | }; | ||
870 | 984 | ||
871 | static struct dyn_ftrace *ftrace_new_addrs; | 985 | static struct dyn_ftrace *ftrace_new_addrs; |
872 | 986 | ||
@@ -889,6 +1003,269 @@ static struct ftrace_page *ftrace_pages; | |||
889 | 1003 | ||
890 | static struct dyn_ftrace *ftrace_free_records; | 1004 | static struct dyn_ftrace *ftrace_free_records; |
891 | 1005 | ||
1006 | static struct ftrace_func_entry * | ||
1007 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | ||
1008 | { | ||
1009 | unsigned long key; | ||
1010 | struct ftrace_func_entry *entry; | ||
1011 | struct hlist_head *hhd; | ||
1012 | struct hlist_node *n; | ||
1013 | |||
1014 | if (!hash->count) | ||
1015 | return NULL; | ||
1016 | |||
1017 | if (hash->size_bits > 0) | ||
1018 | key = hash_long(ip, hash->size_bits); | ||
1019 | else | ||
1020 | key = 0; | ||
1021 | |||
1022 | hhd = &hash->buckets[key]; | ||
1023 | |||
1024 | hlist_for_each_entry_rcu(entry, n, hhd, hlist) { | ||
1025 | if (entry->ip == ip) | ||
1026 | return entry; | ||
1027 | } | ||
1028 | return NULL; | ||
1029 | } | ||
1030 | |||
1031 | static void __add_hash_entry(struct ftrace_hash *hash, | ||
1032 | struct ftrace_func_entry *entry) | ||
1033 | { | ||
1034 | struct hlist_head *hhd; | ||
1035 | unsigned long key; | ||
1036 | |||
1037 | if (hash->size_bits) | ||
1038 | key = hash_long(entry->ip, hash->size_bits); | ||
1039 | else | ||
1040 | key = 0; | ||
1041 | |||
1042 | hhd = &hash->buckets[key]; | ||
1043 | hlist_add_head(&entry->hlist, hhd); | ||
1044 | hash->count++; | ||
1045 | } | ||
1046 | |||
1047 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | ||
1048 | { | ||
1049 | struct ftrace_func_entry *entry; | ||
1050 | |||
1051 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
1052 | if (!entry) | ||
1053 | return -ENOMEM; | ||
1054 | |||
1055 | entry->ip = ip; | ||
1056 | __add_hash_entry(hash, entry); | ||
1057 | |||
1058 | return 0; | ||
1059 | } | ||
1060 | |||
1061 | static void | ||
1062 | free_hash_entry(struct ftrace_hash *hash, | ||
1063 | struct ftrace_func_entry *entry) | ||
1064 | { | ||
1065 | hlist_del(&entry->hlist); | ||
1066 | kfree(entry); | ||
1067 | hash->count--; | ||
1068 | } | ||
1069 | |||
1070 | static void | ||
1071 | remove_hash_entry(struct ftrace_hash *hash, | ||
1072 | struct ftrace_func_entry *entry) | ||
1073 | { | ||
1074 | hlist_del(&entry->hlist); | ||
1075 | hash->count--; | ||
1076 | } | ||
1077 | |||
1078 | static void ftrace_hash_clear(struct ftrace_hash *hash) | ||
1079 | { | ||
1080 | struct hlist_head *hhd; | ||
1081 | struct hlist_node *tp, *tn; | ||
1082 | struct ftrace_func_entry *entry; | ||
1083 | int size = 1 << hash->size_bits; | ||
1084 | int i; | ||
1085 | |||
1086 | if (!hash->count) | ||
1087 | return; | ||
1088 | |||
1089 | for (i = 0; i < size; i++) { | ||
1090 | hhd = &hash->buckets[i]; | ||
1091 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) | ||
1092 | free_hash_entry(hash, entry); | ||
1093 | } | ||
1094 | FTRACE_WARN_ON(hash->count); | ||
1095 | } | ||
1096 | |||
1097 | static void free_ftrace_hash(struct ftrace_hash *hash) | ||
1098 | { | ||
1099 | if (!hash || hash == EMPTY_HASH) | ||
1100 | return; | ||
1101 | ftrace_hash_clear(hash); | ||
1102 | kfree(hash->buckets); | ||
1103 | kfree(hash); | ||
1104 | } | ||
1105 | |||
1106 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) | ||
1107 | { | ||
1108 | struct ftrace_hash *hash; | ||
1109 | |||
1110 | hash = container_of(rcu, struct ftrace_hash, rcu); | ||
1111 | free_ftrace_hash(hash); | ||
1112 | } | ||
1113 | |||
1114 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | ||
1115 | { | ||
1116 | if (!hash || hash == EMPTY_HASH) | ||
1117 | return; | ||
1118 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | ||
1119 | } | ||
1120 | |||
1121 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | ||
1122 | { | ||
1123 | struct ftrace_hash *hash; | ||
1124 | int size; | ||
1125 | |||
1126 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | ||
1127 | if (!hash) | ||
1128 | return NULL; | ||
1129 | |||
1130 | size = 1 << size_bits; | ||
1131 | hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); | ||
1132 | |||
1133 | if (!hash->buckets) { | ||
1134 | kfree(hash); | ||
1135 | return NULL; | ||
1136 | } | ||
1137 | |||
1138 | hash->size_bits = size_bits; | ||
1139 | |||
1140 | return hash; | ||
1141 | } | ||
1142 | |||
1143 | static struct ftrace_hash * | ||
1144 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | ||
1145 | { | ||
1146 | struct ftrace_func_entry *entry; | ||
1147 | struct ftrace_hash *new_hash; | ||
1148 | struct hlist_node *tp; | ||
1149 | int size; | ||
1150 | int ret; | ||
1151 | int i; | ||
1152 | |||
1153 | new_hash = alloc_ftrace_hash(size_bits); | ||
1154 | if (!new_hash) | ||
1155 | return NULL; | ||
1156 | |||
1157 | /* Empty hash? */ | ||
1158 | if (!hash || !hash->count) | ||
1159 | return new_hash; | ||
1160 | |||
1161 | size = 1 << hash->size_bits; | ||
1162 | for (i = 0; i < size; i++) { | ||
1163 | hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { | ||
1164 | ret = add_hash_entry(new_hash, entry->ip); | ||
1165 | if (ret < 0) | ||
1166 | goto free_hash; | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | FTRACE_WARN_ON(new_hash->count != hash->count); | ||
1171 | |||
1172 | return new_hash; | ||
1173 | |||
1174 | free_hash: | ||
1175 | free_ftrace_hash(new_hash); | ||
1176 | return NULL; | ||
1177 | } | ||
1178 | |||
1179 | static int | ||
1180 | ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) | ||
1181 | { | ||
1182 | struct ftrace_func_entry *entry; | ||
1183 | struct hlist_node *tp, *tn; | ||
1184 | struct hlist_head *hhd; | ||
1185 | struct ftrace_hash *old_hash; | ||
1186 | struct ftrace_hash *new_hash; | ||
1187 | unsigned long key; | ||
1188 | int size = src->count; | ||
1189 | int bits = 0; | ||
1190 | int i; | ||
1191 | |||
1192 | /* | ||
1193 | * If the new source is empty, just free dst and assign it | ||
1194 | * the empty_hash. | ||
1195 | */ | ||
1196 | if (!src->count) { | ||
1197 | free_ftrace_hash_rcu(*dst); | ||
1198 | rcu_assign_pointer(*dst, EMPTY_HASH); | ||
1199 | return 0; | ||
1200 | } | ||
1201 | |||
1202 | /* | ||
1203 | * Make the hash size about 1/2 the # found | ||
1204 | */ | ||
1205 | for (size /= 2; size; size >>= 1) | ||
1206 | bits++; | ||
1207 | |||
1208 | /* Don't allocate too much */ | ||
1209 | if (bits > FTRACE_HASH_MAX_BITS) | ||
1210 | bits = FTRACE_HASH_MAX_BITS; | ||
1211 | |||
1212 | new_hash = alloc_ftrace_hash(bits); | ||
1213 | if (!new_hash) | ||
1214 | return -ENOMEM; | ||
1215 | |||
1216 | size = 1 << src->size_bits; | ||
1217 | for (i = 0; i < size; i++) { | ||
1218 | hhd = &src->buckets[i]; | ||
1219 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { | ||
1220 | if (bits > 0) | ||
1221 | key = hash_long(entry->ip, bits); | ||
1222 | else | ||
1223 | key = 0; | ||
1224 | remove_hash_entry(src, entry); | ||
1225 | __add_hash_entry(new_hash, entry); | ||
1226 | } | ||
1227 | } | ||
1228 | |||
1229 | old_hash = *dst; | ||
1230 | rcu_assign_pointer(*dst, new_hash); | ||
1231 | free_ftrace_hash_rcu(old_hash); | ||
1232 | |||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1236 | /* | ||
1237 | * Test the hashes for this ops to see if we want to call | ||
1238 | * the ops->func or not. | ||
1239 | * | ||
1240 | * It's a match if the ip is in the ops->filter_hash or | ||
1241 | * the filter_hash does not exist or is empty, | ||
1242 | * AND | ||
1243 | * the ip is not in the ops->notrace_hash. | ||
1244 | * | ||
1245 | * This needs to be called with preemption disabled as | ||
1246 | * the hashes are freed with call_rcu_sched(). | ||
1247 | */ | ||
1248 | static int | ||
1249 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | ||
1250 | { | ||
1251 | struct ftrace_hash *filter_hash; | ||
1252 | struct ftrace_hash *notrace_hash; | ||
1253 | int ret; | ||
1254 | |||
1255 | filter_hash = rcu_dereference_raw(ops->filter_hash); | ||
1256 | notrace_hash = rcu_dereference_raw(ops->notrace_hash); | ||
1257 | |||
1258 | if ((!filter_hash || !filter_hash->count || | ||
1259 | ftrace_lookup_ip(filter_hash, ip)) && | ||
1260 | (!notrace_hash || !notrace_hash->count || | ||
1261 | !ftrace_lookup_ip(notrace_hash, ip))) | ||
1262 | ret = 1; | ||
1263 | else | ||
1264 | ret = 0; | ||
1265 | |||
1266 | return ret; | ||
1267 | } | ||
1268 | |||
892 | /* | 1269 | /* |
893 | * This is a double for. Do not use 'break' to break out of the loop, | 1270 | * This is a double for. Do not use 'break' to break out of the loop, |
894 | * you must use a goto. | 1271 | * you must use a goto. |
@@ -903,6 +1280,105 @@ static struct dyn_ftrace *ftrace_free_records; | |||
903 | } \ | 1280 | } \ |
904 | } | 1281 | } |
905 | 1282 | ||
1283 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | ||
1284 | int filter_hash, | ||
1285 | bool inc) | ||
1286 | { | ||
1287 | struct ftrace_hash *hash; | ||
1288 | struct ftrace_hash *other_hash; | ||
1289 | struct ftrace_page *pg; | ||
1290 | struct dyn_ftrace *rec; | ||
1291 | int count = 0; | ||
1292 | int all = 0; | ||
1293 | |||
1294 | /* Only update if the ops has been registered */ | ||
1295 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | ||
1296 | return; | ||
1297 | |||
1298 | /* | ||
1299 | * In the filter_hash case: | ||
1300 | * If the count is zero, we update all records. | ||
1301 | * Otherwise we just update the items in the hash. | ||
1302 | * | ||
1303 | * In the notrace_hash case: | ||
1304 | * We enable the update in the hash. | ||
1305 | * As disabling notrace means enabling the tracing, | ||
1306 | * and enabling notrace means disabling, the inc variable | ||
1307 | * gets inversed. | ||
1308 | */ | ||
1309 | if (filter_hash) { | ||
1310 | hash = ops->filter_hash; | ||
1311 | other_hash = ops->notrace_hash; | ||
1312 | if (!hash || !hash->count) | ||
1313 | all = 1; | ||
1314 | } else { | ||
1315 | inc = !inc; | ||
1316 | hash = ops->notrace_hash; | ||
1317 | other_hash = ops->filter_hash; | ||
1318 | /* | ||
1319 | * If the notrace hash has no items, | ||
1320 | * then there's nothing to do. | ||
1321 | */ | ||
1322 | if (hash && !hash->count) | ||
1323 | return; | ||
1324 | } | ||
1325 | |||
1326 | do_for_each_ftrace_rec(pg, rec) { | ||
1327 | int in_other_hash = 0; | ||
1328 | int in_hash = 0; | ||
1329 | int match = 0; | ||
1330 | |||
1331 | if (all) { | ||
1332 | /* | ||
1333 | * Only the filter_hash affects all records. | ||
1334 | * Update if the record is not in the notrace hash. | ||
1335 | */ | ||
1336 | if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) | ||
1337 | match = 1; | ||
1338 | } else { | ||
1339 | in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip); | ||
1340 | in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip); | ||
1341 | |||
1342 | /* | ||
1343 | * | ||
1344 | */ | ||
1345 | if (filter_hash && in_hash && !in_other_hash) | ||
1346 | match = 1; | ||
1347 | else if (!filter_hash && in_hash && | ||
1348 | (in_other_hash || !other_hash->count)) | ||
1349 | match = 1; | ||
1350 | } | ||
1351 | if (!match) | ||
1352 | continue; | ||
1353 | |||
1354 | if (inc) { | ||
1355 | rec->flags++; | ||
1356 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) | ||
1357 | return; | ||
1358 | } else { | ||
1359 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) | ||
1360 | return; | ||
1361 | rec->flags--; | ||
1362 | } | ||
1363 | count++; | ||
1364 | /* Shortcut, if we handled all records, we are done. */ | ||
1365 | if (!all && count == hash->count) | ||
1366 | return; | ||
1367 | } while_for_each_ftrace_rec(); | ||
1368 | } | ||
1369 | |||
1370 | static void ftrace_hash_rec_disable(struct ftrace_ops *ops, | ||
1371 | int filter_hash) | ||
1372 | { | ||
1373 | __ftrace_hash_rec_update(ops, filter_hash, 0); | ||
1374 | } | ||
1375 | |||
1376 | static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | ||
1377 | int filter_hash) | ||
1378 | { | ||
1379 | __ftrace_hash_rec_update(ops, filter_hash, 1); | ||
1380 | } | ||
1381 | |||
906 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 1382 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
907 | { | 1383 | { |
908 | rec->freelist = ftrace_free_records; | 1384 | rec->freelist = ftrace_free_records; |
@@ -1024,18 +1500,18 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
1024 | ftrace_addr = (unsigned long)FTRACE_ADDR; | 1500 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
1025 | 1501 | ||
1026 | /* | 1502 | /* |
1027 | * If this record is not to be traced or we want to disable it, | 1503 | * If we are enabling tracing: |
1028 | * then disable it. | ||
1029 | * | 1504 | * |
1030 | * If we want to enable it and filtering is off, then enable it. | 1505 | * If the record has a ref count, then we need to enable it |
1506 | * because someone is using it. | ||
1031 | * | 1507 | * |
1032 | * If we want to enable it and filtering is on, enable it only if | 1508 | * Otherwise we make sure its disabled. |
1033 | * it's filtered | 1509 | * |
1510 | * If we are disabling tracing, then disable all records that | ||
1511 | * are enabled. | ||
1034 | */ | 1512 | */ |
1035 | if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { | 1513 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) |
1036 | if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) | 1514 | flag = FTRACE_FL_ENABLED; |
1037 | flag = FTRACE_FL_ENABLED; | ||
1038 | } | ||
1039 | 1515 | ||
1040 | /* If the state of this record hasn't changed, then do nothing */ | 1516 | /* If the state of this record hasn't changed, then do nothing */ |
1041 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | 1517 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
@@ -1147,6 +1623,7 @@ static void ftrace_run_update_code(int command) | |||
1147 | 1623 | ||
1148 | static ftrace_func_t saved_ftrace_func; | 1624 | static ftrace_func_t saved_ftrace_func; |
1149 | static int ftrace_start_up; | 1625 | static int ftrace_start_up; |
1626 | static int global_start_up; | ||
1150 | 1627 | ||
1151 | static void ftrace_startup_enable(int command) | 1628 | static void ftrace_startup_enable(int command) |
1152 | { | 1629 | { |
@@ -1161,19 +1638,36 @@ static void ftrace_startup_enable(int command) | |||
1161 | ftrace_run_update_code(command); | 1638 | ftrace_run_update_code(command); |
1162 | } | 1639 | } |
1163 | 1640 | ||
1164 | static void ftrace_startup(int command) | 1641 | static void ftrace_startup(struct ftrace_ops *ops, int command) |
1165 | { | 1642 | { |
1643 | bool hash_enable = true; | ||
1644 | |||
1166 | if (unlikely(ftrace_disabled)) | 1645 | if (unlikely(ftrace_disabled)) |
1167 | return; | 1646 | return; |
1168 | 1647 | ||
1169 | ftrace_start_up++; | 1648 | ftrace_start_up++; |
1170 | command |= FTRACE_ENABLE_CALLS; | 1649 | command |= FTRACE_ENABLE_CALLS; |
1171 | 1650 | ||
1651 | /* ops marked global share the filter hashes */ | ||
1652 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
1653 | ops = &global_ops; | ||
1654 | /* Don't update hash if global is already set */ | ||
1655 | if (global_start_up) | ||
1656 | hash_enable = false; | ||
1657 | global_start_up++; | ||
1658 | } | ||
1659 | |||
1660 | ops->flags |= FTRACE_OPS_FL_ENABLED; | ||
1661 | if (hash_enable) | ||
1662 | ftrace_hash_rec_enable(ops, 1); | ||
1663 | |||
1172 | ftrace_startup_enable(command); | 1664 | ftrace_startup_enable(command); |
1173 | } | 1665 | } |
1174 | 1666 | ||
1175 | static void ftrace_shutdown(int command) | 1667 | static void ftrace_shutdown(struct ftrace_ops *ops, int command) |
1176 | { | 1668 | { |
1669 | bool hash_disable = true; | ||
1670 | |||
1177 | if (unlikely(ftrace_disabled)) | 1671 | if (unlikely(ftrace_disabled)) |
1178 | return; | 1672 | return; |
1179 | 1673 | ||
@@ -1185,6 +1679,23 @@ static void ftrace_shutdown(int command) | |||
1185 | */ | 1679 | */ |
1186 | WARN_ON_ONCE(ftrace_start_up < 0); | 1680 | WARN_ON_ONCE(ftrace_start_up < 0); |
1187 | 1681 | ||
1682 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
1683 | ops = &global_ops; | ||
1684 | global_start_up--; | ||
1685 | WARN_ON_ONCE(global_start_up < 0); | ||
1686 | /* Don't update hash if global still has users */ | ||
1687 | if (global_start_up) { | ||
1688 | WARN_ON_ONCE(!ftrace_start_up); | ||
1689 | hash_disable = false; | ||
1690 | } | ||
1691 | } | ||
1692 | |||
1693 | if (hash_disable) | ||
1694 | ftrace_hash_rec_disable(ops, 1); | ||
1695 | |||
1696 | if (ops != &global_ops || !global_start_up) | ||
1697 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
1698 | |||
1188 | if (!ftrace_start_up) | 1699 | if (!ftrace_start_up) |
1189 | command |= FTRACE_DISABLE_CALLS; | 1700 | command |= FTRACE_DISABLE_CALLS; |
1190 | 1701 | ||
@@ -1329,6 +1840,7 @@ enum { | |||
1329 | FTRACE_ITER_NOTRACE = (1 << 1), | 1840 | FTRACE_ITER_NOTRACE = (1 << 1), |
1330 | FTRACE_ITER_PRINTALL = (1 << 2), | 1841 | FTRACE_ITER_PRINTALL = (1 << 2), |
1331 | FTRACE_ITER_HASH = (1 << 3), | 1842 | FTRACE_ITER_HASH = (1 << 3), |
1843 | FTRACE_ITER_ENABLED = (1 << 4), | ||
1332 | }; | 1844 | }; |
1333 | 1845 | ||
1334 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 1846 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
@@ -1340,6 +1852,8 @@ struct ftrace_iterator { | |||
1340 | struct dyn_ftrace *func; | 1852 | struct dyn_ftrace *func; |
1341 | struct ftrace_func_probe *probe; | 1853 | struct ftrace_func_probe *probe; |
1342 | struct trace_parser parser; | 1854 | struct trace_parser parser; |
1855 | struct ftrace_hash *hash; | ||
1856 | struct ftrace_ops *ops; | ||
1343 | int hidx; | 1857 | int hidx; |
1344 | int idx; | 1858 | int idx; |
1345 | unsigned flags; | 1859 | unsigned flags; |
@@ -1436,6 +1950,7 @@ static void * | |||
1436 | t_next(struct seq_file *m, void *v, loff_t *pos) | 1950 | t_next(struct seq_file *m, void *v, loff_t *pos) |
1437 | { | 1951 | { |
1438 | struct ftrace_iterator *iter = m->private; | 1952 | struct ftrace_iterator *iter = m->private; |
1953 | struct ftrace_ops *ops = &global_ops; | ||
1439 | struct dyn_ftrace *rec = NULL; | 1954 | struct dyn_ftrace *rec = NULL; |
1440 | 1955 | ||
1441 | if (unlikely(ftrace_disabled)) | 1956 | if (unlikely(ftrace_disabled)) |
@@ -1462,10 +1977,14 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
1462 | if ((rec->flags & FTRACE_FL_FREE) || | 1977 | if ((rec->flags & FTRACE_FL_FREE) || |
1463 | 1978 | ||
1464 | ((iter->flags & FTRACE_ITER_FILTER) && | 1979 | ((iter->flags & FTRACE_ITER_FILTER) && |
1465 | !(rec->flags & FTRACE_FL_FILTER)) || | 1980 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || |
1466 | 1981 | ||
1467 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 1982 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
1468 | !(rec->flags & FTRACE_FL_NOTRACE))) { | 1983 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || |
1984 | |||
1985 | ((iter->flags & FTRACE_ITER_ENABLED) && | ||
1986 | !(rec->flags & ~FTRACE_FL_MASK))) { | ||
1987 | |||
1469 | rec = NULL; | 1988 | rec = NULL; |
1470 | goto retry; | 1989 | goto retry; |
1471 | } | 1990 | } |
@@ -1489,6 +2008,7 @@ static void reset_iter_read(struct ftrace_iterator *iter) | |||
1489 | static void *t_start(struct seq_file *m, loff_t *pos) | 2008 | static void *t_start(struct seq_file *m, loff_t *pos) |
1490 | { | 2009 | { |
1491 | struct ftrace_iterator *iter = m->private; | 2010 | struct ftrace_iterator *iter = m->private; |
2011 | struct ftrace_ops *ops = &global_ops; | ||
1492 | void *p = NULL; | 2012 | void *p = NULL; |
1493 | loff_t l; | 2013 | loff_t l; |
1494 | 2014 | ||
@@ -1508,7 +2028,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1508 | * off, we can short cut and just print out that all | 2028 | * off, we can short cut and just print out that all |
1509 | * functions are enabled. | 2029 | * functions are enabled. |
1510 | */ | 2030 | */ |
1511 | if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { | 2031 | if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) { |
1512 | if (*pos > 0) | 2032 | if (*pos > 0) |
1513 | return t_hash_start(m, pos); | 2033 | return t_hash_start(m, pos); |
1514 | iter->flags |= FTRACE_ITER_PRINTALL; | 2034 | iter->flags |= FTRACE_ITER_PRINTALL; |
@@ -1566,7 +2086,11 @@ static int t_show(struct seq_file *m, void *v) | |||
1566 | if (!rec) | 2086 | if (!rec) |
1567 | return 0; | 2087 | return 0; |
1568 | 2088 | ||
1569 | seq_printf(m, "%ps\n", (void *)rec->ip); | 2089 | seq_printf(m, "%ps", (void *)rec->ip); |
2090 | if (iter->flags & FTRACE_ITER_ENABLED) | ||
2091 | seq_printf(m, " (%ld)", | ||
2092 | rec->flags & ~FTRACE_FL_MASK); | ||
2093 | seq_printf(m, "\n"); | ||
1570 | 2094 | ||
1571 | return 0; | 2095 | return 0; |
1572 | } | 2096 | } |
@@ -1605,25 +2129,47 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
1605 | return ret; | 2129 | return ret; |
1606 | } | 2130 | } |
1607 | 2131 | ||
1608 | static void ftrace_filter_reset(int enable) | 2132 | static int |
2133 | ftrace_enabled_open(struct inode *inode, struct file *file) | ||
1609 | { | 2134 | { |
1610 | struct ftrace_page *pg; | 2135 | struct ftrace_iterator *iter; |
1611 | struct dyn_ftrace *rec; | 2136 | int ret; |
1612 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
1613 | 2137 | ||
2138 | if (unlikely(ftrace_disabled)) | ||
2139 | return -ENODEV; | ||
2140 | |||
2141 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||
2142 | if (!iter) | ||
2143 | return -ENOMEM; | ||
2144 | |||
2145 | iter->pg = ftrace_pages_start; | ||
2146 | iter->flags = FTRACE_ITER_ENABLED; | ||
2147 | |||
2148 | ret = seq_open(file, &show_ftrace_seq_ops); | ||
2149 | if (!ret) { | ||
2150 | struct seq_file *m = file->private_data; | ||
2151 | |||
2152 | m->private = iter; | ||
2153 | } else { | ||
2154 | kfree(iter); | ||
2155 | } | ||
2156 | |||
2157 | return ret; | ||
2158 | } | ||
2159 | |||
2160 | static void ftrace_filter_reset(struct ftrace_hash *hash) | ||
2161 | { | ||
1614 | mutex_lock(&ftrace_lock); | 2162 | mutex_lock(&ftrace_lock); |
1615 | if (enable) | 2163 | ftrace_hash_clear(hash); |
1616 | ftrace_filtered = 0; | ||
1617 | do_for_each_ftrace_rec(pg, rec) { | ||
1618 | rec->flags &= ~type; | ||
1619 | } while_for_each_ftrace_rec(); | ||
1620 | mutex_unlock(&ftrace_lock); | 2164 | mutex_unlock(&ftrace_lock); |
1621 | } | 2165 | } |
1622 | 2166 | ||
1623 | static int | 2167 | static int |
1624 | ftrace_regex_open(struct inode *inode, struct file *file, int enable) | 2168 | ftrace_regex_open(struct ftrace_ops *ops, int flag, |
2169 | struct inode *inode, struct file *file) | ||
1625 | { | 2170 | { |
1626 | struct ftrace_iterator *iter; | 2171 | struct ftrace_iterator *iter; |
2172 | struct ftrace_hash *hash; | ||
1627 | int ret = 0; | 2173 | int ret = 0; |
1628 | 2174 | ||
1629 | if (unlikely(ftrace_disabled)) | 2175 | if (unlikely(ftrace_disabled)) |
@@ -1638,21 +2184,42 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1638 | return -ENOMEM; | 2184 | return -ENOMEM; |
1639 | } | 2185 | } |
1640 | 2186 | ||
2187 | if (flag & FTRACE_ITER_NOTRACE) | ||
2188 | hash = ops->notrace_hash; | ||
2189 | else | ||
2190 | hash = ops->filter_hash; | ||
2191 | |||
2192 | iter->ops = ops; | ||
2193 | iter->flags = flag; | ||
2194 | |||
2195 | if (file->f_mode & FMODE_WRITE) { | ||
2196 | mutex_lock(&ftrace_lock); | ||
2197 | iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); | ||
2198 | mutex_unlock(&ftrace_lock); | ||
2199 | |||
2200 | if (!iter->hash) { | ||
2201 | trace_parser_put(&iter->parser); | ||
2202 | kfree(iter); | ||
2203 | return -ENOMEM; | ||
2204 | } | ||
2205 | } | ||
2206 | |||
1641 | mutex_lock(&ftrace_regex_lock); | 2207 | mutex_lock(&ftrace_regex_lock); |
2208 | |||
1642 | if ((file->f_mode & FMODE_WRITE) && | 2209 | if ((file->f_mode & FMODE_WRITE) && |
1643 | (file->f_flags & O_TRUNC)) | 2210 | (file->f_flags & O_TRUNC)) |
1644 | ftrace_filter_reset(enable); | 2211 | ftrace_filter_reset(iter->hash); |
1645 | 2212 | ||
1646 | if (file->f_mode & FMODE_READ) { | 2213 | if (file->f_mode & FMODE_READ) { |
1647 | iter->pg = ftrace_pages_start; | 2214 | iter->pg = ftrace_pages_start; |
1648 | iter->flags = enable ? FTRACE_ITER_FILTER : | ||
1649 | FTRACE_ITER_NOTRACE; | ||
1650 | 2215 | ||
1651 | ret = seq_open(file, &show_ftrace_seq_ops); | 2216 | ret = seq_open(file, &show_ftrace_seq_ops); |
1652 | if (!ret) { | 2217 | if (!ret) { |
1653 | struct seq_file *m = file->private_data; | 2218 | struct seq_file *m = file->private_data; |
1654 | m->private = iter; | 2219 | m->private = iter; |
1655 | } else { | 2220 | } else { |
2221 | /* Failed */ | ||
2222 | free_ftrace_hash(iter->hash); | ||
1656 | trace_parser_put(&iter->parser); | 2223 | trace_parser_put(&iter->parser); |
1657 | kfree(iter); | 2224 | kfree(iter); |
1658 | } | 2225 | } |
@@ -1666,13 +2233,15 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1666 | static int | 2233 | static int |
1667 | ftrace_filter_open(struct inode *inode, struct file *file) | 2234 | ftrace_filter_open(struct inode *inode, struct file *file) |
1668 | { | 2235 | { |
1669 | return ftrace_regex_open(inode, file, 1); | 2236 | return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER, |
2237 | inode, file); | ||
1670 | } | 2238 | } |
1671 | 2239 | ||
1672 | static int | 2240 | static int |
1673 | ftrace_notrace_open(struct inode *inode, struct file *file) | 2241 | ftrace_notrace_open(struct inode *inode, struct file *file) |
1674 | { | 2242 | { |
1675 | return ftrace_regex_open(inode, file, 0); | 2243 | return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, |
2244 | inode, file); | ||
1676 | } | 2245 | } |
1677 | 2246 | ||
1678 | static loff_t | 2247 | static loff_t |
@@ -1716,13 +2285,27 @@ static int ftrace_match(char *str, char *regex, int len, int type) | |||
1716 | return matched; | 2285 | return matched; |
1717 | } | 2286 | } |
1718 | 2287 | ||
1719 | static void | 2288 | static int |
1720 | update_record(struct dyn_ftrace *rec, unsigned long flag, int not) | 2289 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) |
1721 | { | 2290 | { |
1722 | if (not) | 2291 | struct ftrace_func_entry *entry; |
1723 | rec->flags &= ~flag; | 2292 | int ret = 0; |
1724 | else | 2293 | |
1725 | rec->flags |= flag; | 2294 | entry = ftrace_lookup_ip(hash, rec->ip); |
2295 | if (not) { | ||
2296 | /* Do nothing if it doesn't exist */ | ||
2297 | if (!entry) | ||
2298 | return 0; | ||
2299 | |||
2300 | free_hash_entry(hash, entry); | ||
2301 | } else { | ||
2302 | /* Do nothing if it exists */ | ||
2303 | if (entry) | ||
2304 | return 0; | ||
2305 | |||
2306 | ret = add_hash_entry(hash, rec->ip); | ||
2307 | } | ||
2308 | return ret; | ||
1726 | } | 2309 | } |
1727 | 2310 | ||
1728 | static int | 2311 | static int |
@@ -1747,23 +2330,23 @@ ftrace_match_record(struct dyn_ftrace *rec, char *mod, | |||
1747 | return ftrace_match(str, regex, len, type); | 2330 | return ftrace_match(str, regex, len, type); |
1748 | } | 2331 | } |
1749 | 2332 | ||
1750 | static int match_records(char *buff, int len, char *mod, int enable, int not) | 2333 | static int |
2334 | match_records(struct ftrace_hash *hash, char *buff, | ||
2335 | int len, char *mod, int not) | ||
1751 | { | 2336 | { |
1752 | unsigned search_len = 0; | 2337 | unsigned search_len = 0; |
1753 | struct ftrace_page *pg; | 2338 | struct ftrace_page *pg; |
1754 | struct dyn_ftrace *rec; | 2339 | struct dyn_ftrace *rec; |
1755 | int type = MATCH_FULL; | 2340 | int type = MATCH_FULL; |
1756 | char *search = buff; | 2341 | char *search = buff; |
1757 | unsigned long flag; | ||
1758 | int found = 0; | 2342 | int found = 0; |
2343 | int ret; | ||
1759 | 2344 | ||
1760 | if (len) { | 2345 | if (len) { |
1761 | type = filter_parse_regex(buff, len, &search, ¬); | 2346 | type = filter_parse_regex(buff, len, &search, ¬); |
1762 | search_len = strlen(search); | 2347 | search_len = strlen(search); |
1763 | } | 2348 | } |
1764 | 2349 | ||
1765 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
1766 | |||
1767 | mutex_lock(&ftrace_lock); | 2350 | mutex_lock(&ftrace_lock); |
1768 | 2351 | ||
1769 | if (unlikely(ftrace_disabled)) | 2352 | if (unlikely(ftrace_disabled)) |
@@ -1772,16 +2355,13 @@ static int match_records(char *buff, int len, char *mod, int enable, int not) | |||
1772 | do_for_each_ftrace_rec(pg, rec) { | 2355 | do_for_each_ftrace_rec(pg, rec) { |
1773 | 2356 | ||
1774 | if (ftrace_match_record(rec, mod, search, search_len, type)) { | 2357 | if (ftrace_match_record(rec, mod, search, search_len, type)) { |
1775 | update_record(rec, flag, not); | 2358 | ret = enter_record(hash, rec, not); |
2359 | if (ret < 0) { | ||
2360 | found = ret; | ||
2361 | goto out_unlock; | ||
2362 | } | ||
1776 | found = 1; | 2363 | found = 1; |
1777 | } | 2364 | } |
1778 | /* | ||
1779 | * Only enable filtering if we have a function that | ||
1780 | * is filtered on. | ||
1781 | */ | ||
1782 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
1783 | ftrace_filtered = 1; | ||
1784 | |||
1785 | } while_for_each_ftrace_rec(); | 2365 | } while_for_each_ftrace_rec(); |
1786 | out_unlock: | 2366 | out_unlock: |
1787 | mutex_unlock(&ftrace_lock); | 2367 | mutex_unlock(&ftrace_lock); |
@@ -1790,12 +2370,13 @@ static int match_records(char *buff, int len, char *mod, int enable, int not) | |||
1790 | } | 2370 | } |
1791 | 2371 | ||
1792 | static int | 2372 | static int |
1793 | ftrace_match_records(char *buff, int len, int enable) | 2373 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) |
1794 | { | 2374 | { |
1795 | return match_records(buff, len, NULL, enable, 0); | 2375 | return match_records(hash, buff, len, NULL, 0); |
1796 | } | 2376 | } |
1797 | 2377 | ||
1798 | static int ftrace_match_module_records(char *buff, char *mod, int enable) | 2378 | static int |
2379 | ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) | ||
1799 | { | 2380 | { |
1800 | int not = 0; | 2381 | int not = 0; |
1801 | 2382 | ||
@@ -1809,7 +2390,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) | |||
1809 | not = 1; | 2390 | not = 1; |
1810 | } | 2391 | } |
1811 | 2392 | ||
1812 | return match_records(buff, strlen(buff), mod, enable, not); | 2393 | return match_records(hash, buff, strlen(buff), mod, not); |
1813 | } | 2394 | } |
1814 | 2395 | ||
1815 | /* | 2396 | /* |
@@ -1820,7 +2401,10 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) | |||
1820 | static int | 2401 | static int |
1821 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | 2402 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) |
1822 | { | 2403 | { |
2404 | struct ftrace_ops *ops = &global_ops; | ||
2405 | struct ftrace_hash *hash; | ||
1823 | char *mod; | 2406 | char *mod; |
2407 | int ret = -EINVAL; | ||
1824 | 2408 | ||
1825 | /* | 2409 | /* |
1826 | * cmd == 'mod' because we only registered this func | 2410 | * cmd == 'mod' because we only registered this func |
@@ -1832,15 +2416,24 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | |||
1832 | 2416 | ||
1833 | /* we must have a module name */ | 2417 | /* we must have a module name */ |
1834 | if (!param) | 2418 | if (!param) |
1835 | return -EINVAL; | 2419 | return ret; |
1836 | 2420 | ||
1837 | mod = strsep(¶m, ":"); | 2421 | mod = strsep(¶m, ":"); |
1838 | if (!strlen(mod)) | 2422 | if (!strlen(mod)) |
1839 | return -EINVAL; | 2423 | return ret; |
1840 | 2424 | ||
1841 | if (ftrace_match_module_records(func, mod, enable)) | 2425 | if (enable) |
1842 | return 0; | 2426 | hash = ops->filter_hash; |
1843 | return -EINVAL; | 2427 | else |
2428 | hash = ops->notrace_hash; | ||
2429 | |||
2430 | ret = ftrace_match_module_records(hash, func, mod); | ||
2431 | if (!ret) | ||
2432 | ret = -EINVAL; | ||
2433 | if (ret < 0) | ||
2434 | return ret; | ||
2435 | |||
2436 | return 0; | ||
1844 | } | 2437 | } |
1845 | 2438 | ||
1846 | static struct ftrace_func_command ftrace_mod_cmd = { | 2439 | static struct ftrace_func_command ftrace_mod_cmd = { |
@@ -1891,6 +2484,7 @@ static int ftrace_probe_registered; | |||
1891 | 2484 | ||
1892 | static void __enable_ftrace_function_probe(void) | 2485 | static void __enable_ftrace_function_probe(void) |
1893 | { | 2486 | { |
2487 | int ret; | ||
1894 | int i; | 2488 | int i; |
1895 | 2489 | ||
1896 | if (ftrace_probe_registered) | 2490 | if (ftrace_probe_registered) |
@@ -1905,13 +2499,16 @@ static void __enable_ftrace_function_probe(void) | |||
1905 | if (i == FTRACE_FUNC_HASHSIZE) | 2499 | if (i == FTRACE_FUNC_HASHSIZE) |
1906 | return; | 2500 | return; |
1907 | 2501 | ||
1908 | __register_ftrace_function(&trace_probe_ops); | 2502 | ret = __register_ftrace_function(&trace_probe_ops); |
1909 | ftrace_startup(0); | 2503 | if (!ret) |
2504 | ftrace_startup(&trace_probe_ops, 0); | ||
2505 | |||
1910 | ftrace_probe_registered = 1; | 2506 | ftrace_probe_registered = 1; |
1911 | } | 2507 | } |
1912 | 2508 | ||
1913 | static void __disable_ftrace_function_probe(void) | 2509 | static void __disable_ftrace_function_probe(void) |
1914 | { | 2510 | { |
2511 | int ret; | ||
1915 | int i; | 2512 | int i; |
1916 | 2513 | ||
1917 | if (!ftrace_probe_registered) | 2514 | if (!ftrace_probe_registered) |
@@ -1924,8 +2521,10 @@ static void __disable_ftrace_function_probe(void) | |||
1924 | } | 2521 | } |
1925 | 2522 | ||
1926 | /* no more funcs left */ | 2523 | /* no more funcs left */ |
1927 | __unregister_ftrace_function(&trace_probe_ops); | 2524 | ret = __unregister_ftrace_function(&trace_probe_ops); |
1928 | ftrace_shutdown(0); | 2525 | if (!ret) |
2526 | ftrace_shutdown(&trace_probe_ops, 0); | ||
2527 | |||
1929 | ftrace_probe_registered = 0; | 2528 | ftrace_probe_registered = 0; |
1930 | } | 2529 | } |
1931 | 2530 | ||
@@ -2128,18 +2727,22 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd) | |||
2128 | return ret; | 2727 | return ret; |
2129 | } | 2728 | } |
2130 | 2729 | ||
2131 | static int ftrace_process_regex(char *buff, int len, int enable) | 2730 | static int ftrace_process_regex(struct ftrace_hash *hash, |
2731 | char *buff, int len, int enable) | ||
2132 | { | 2732 | { |
2133 | char *func, *command, *next = buff; | 2733 | char *func, *command, *next = buff; |
2134 | struct ftrace_func_command *p; | 2734 | struct ftrace_func_command *p; |
2135 | int ret = -EINVAL; | 2735 | int ret; |
2136 | 2736 | ||
2137 | func = strsep(&next, ":"); | 2737 | func = strsep(&next, ":"); |
2138 | 2738 | ||
2139 | if (!next) { | 2739 | if (!next) { |
2140 | if (ftrace_match_records(func, len, enable)) | 2740 | ret = ftrace_match_records(hash, func, len); |
2141 | return 0; | 2741 | if (!ret) |
2142 | return ret; | 2742 | ret = -EINVAL; |
2743 | if (ret < 0) | ||
2744 | return ret; | ||
2745 | return 0; | ||
2143 | } | 2746 | } |
2144 | 2747 | ||
2145 | /* command found */ | 2748 | /* command found */ |
@@ -2187,7 +2790,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2187 | 2790 | ||
2188 | if (read >= 0 && trace_parser_loaded(parser) && | 2791 | if (read >= 0 && trace_parser_loaded(parser) && |
2189 | !trace_parser_cont(parser)) { | 2792 | !trace_parser_cont(parser)) { |
2190 | ret = ftrace_process_regex(parser->buffer, | 2793 | ret = ftrace_process_regex(iter->hash, parser->buffer, |
2191 | parser->idx, enable); | 2794 | parser->idx, enable); |
2192 | trace_parser_clear(parser); | 2795 | trace_parser_clear(parser); |
2193 | if (ret) | 2796 | if (ret) |
@@ -2215,22 +2818,49 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, | |||
2215 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | 2818 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); |
2216 | } | 2819 | } |
2217 | 2820 | ||
2218 | static void | 2821 | static int |
2219 | ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | 2822 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
2823 | int reset, int enable) | ||
2220 | { | 2824 | { |
2825 | struct ftrace_hash **orig_hash; | ||
2826 | struct ftrace_hash *hash; | ||
2827 | int ret; | ||
2828 | |||
2829 | /* All global ops uses the global ops filters */ | ||
2830 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) | ||
2831 | ops = &global_ops; | ||
2832 | |||
2221 | if (unlikely(ftrace_disabled)) | 2833 | if (unlikely(ftrace_disabled)) |
2222 | return; | 2834 | return -ENODEV; |
2835 | |||
2836 | if (enable) | ||
2837 | orig_hash = &ops->filter_hash; | ||
2838 | else | ||
2839 | orig_hash = &ops->notrace_hash; | ||
2840 | |||
2841 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | ||
2842 | if (!hash) | ||
2843 | return -ENOMEM; | ||
2223 | 2844 | ||
2224 | mutex_lock(&ftrace_regex_lock); | 2845 | mutex_lock(&ftrace_regex_lock); |
2225 | if (reset) | 2846 | if (reset) |
2226 | ftrace_filter_reset(enable); | 2847 | ftrace_filter_reset(hash); |
2227 | if (buf) | 2848 | if (buf) |
2228 | ftrace_match_records(buf, len, enable); | 2849 | ftrace_match_records(hash, buf, len); |
2850 | |||
2851 | mutex_lock(&ftrace_lock); | ||
2852 | ret = ftrace_hash_move(orig_hash, hash); | ||
2853 | mutex_unlock(&ftrace_lock); | ||
2854 | |||
2229 | mutex_unlock(&ftrace_regex_lock); | 2855 | mutex_unlock(&ftrace_regex_lock); |
2856 | |||
2857 | free_ftrace_hash(hash); | ||
2858 | return ret; | ||
2230 | } | 2859 | } |
2231 | 2860 | ||
2232 | /** | 2861 | /** |
2233 | * ftrace_set_filter - set a function to filter on in ftrace | 2862 | * ftrace_set_filter - set a function to filter on in ftrace |
2863 | * @ops - the ops to set the filter with | ||
2234 | * @buf - the string that holds the function filter text. | 2864 | * @buf - the string that holds the function filter text. |
2235 | * @len - the length of the string. | 2865 | * @len - the length of the string. |
2236 | * @reset - non zero to reset all filters before applying this filter. | 2866 | * @reset - non zero to reset all filters before applying this filter. |
@@ -2238,13 +2868,16 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | |||
2238 | * Filters denote which functions should be enabled when tracing is enabled. | 2868 | * Filters denote which functions should be enabled when tracing is enabled. |
2239 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | 2869 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
2240 | */ | 2870 | */ |
2241 | void ftrace_set_filter(unsigned char *buf, int len, int reset) | 2871 | void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
2872 | int len, int reset) | ||
2242 | { | 2873 | { |
2243 | ftrace_set_regex(buf, len, reset, 1); | 2874 | ftrace_set_regex(ops, buf, len, reset, 1); |
2244 | } | 2875 | } |
2876 | EXPORT_SYMBOL_GPL(ftrace_set_filter); | ||
2245 | 2877 | ||
2246 | /** | 2878 | /** |
2247 | * ftrace_set_notrace - set a function to not trace in ftrace | 2879 | * ftrace_set_notrace - set a function to not trace in ftrace |
2880 | * @ops - the ops to set the notrace filter with | ||
2248 | * @buf - the string that holds the function notrace text. | 2881 | * @buf - the string that holds the function notrace text. |
2249 | * @len - the length of the string. | 2882 | * @len - the length of the string. |
2250 | * @reset - non zero to reset all filters before applying this filter. | 2883 | * @reset - non zero to reset all filters before applying this filter. |
@@ -2253,10 +2886,44 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset) | |||
2253 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | 2886 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
2254 | * for tracing. | 2887 | * for tracing. |
2255 | */ | 2888 | */ |
2256 | void ftrace_set_notrace(unsigned char *buf, int len, int reset) | 2889 | void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
2890 | int len, int reset) | ||
2257 | { | 2891 | { |
2258 | ftrace_set_regex(buf, len, reset, 0); | 2892 | ftrace_set_regex(ops, buf, len, reset, 0); |
2259 | } | 2893 | } |
2894 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | ||
2895 | /** | ||
2896 | * ftrace_set_filter - set a function to filter on in ftrace | ||
2897 | * @ops - the ops to set the filter with | ||
2898 | * @buf - the string that holds the function filter text. | ||
2899 | * @len - the length of the string. | ||
2900 | * @reset - non zero to reset all filters before applying this filter. | ||
2901 | * | ||
2902 | * Filters denote which functions should be enabled when tracing is enabled. | ||
2903 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | ||
2904 | */ | ||
2905 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) | ||
2906 | { | ||
2907 | ftrace_set_regex(&global_ops, buf, len, reset, 1); | ||
2908 | } | ||
2909 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); | ||
2910 | |||
2911 | /** | ||
2912 | * ftrace_set_notrace - set a function to not trace in ftrace | ||
2913 | * @ops - the ops to set the notrace filter with | ||
2914 | * @buf - the string that holds the function notrace text. | ||
2915 | * @len - the length of the string. | ||
2916 | * @reset - non zero to reset all filters before applying this filter. | ||
2917 | * | ||
2918 | * Notrace Filters denote which functions should not be enabled when tracing | ||
2919 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | ||
2920 | * for tracing. | ||
2921 | */ | ||
2922 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) | ||
2923 | { | ||
2924 | ftrace_set_regex(&global_ops, buf, len, reset, 0); | ||
2925 | } | ||
2926 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); | ||
2260 | 2927 | ||
2261 | /* | 2928 | /* |
2262 | * command line interface to allow users to set filters on boot up. | 2929 | * command line interface to allow users to set filters on boot up. |
@@ -2307,22 +2974,23 @@ static void __init set_ftrace_early_graph(char *buf) | |||
2307 | } | 2974 | } |
2308 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2975 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2309 | 2976 | ||
2310 | static void __init set_ftrace_early_filter(char *buf, int enable) | 2977 | static void __init |
2978 | set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable) | ||
2311 | { | 2979 | { |
2312 | char *func; | 2980 | char *func; |
2313 | 2981 | ||
2314 | while (buf) { | 2982 | while (buf) { |
2315 | func = strsep(&buf, ","); | 2983 | func = strsep(&buf, ","); |
2316 | ftrace_set_regex(func, strlen(func), 0, enable); | 2984 | ftrace_set_regex(ops, func, strlen(func), 0, enable); |
2317 | } | 2985 | } |
2318 | } | 2986 | } |
2319 | 2987 | ||
2320 | static void __init set_ftrace_early_filters(void) | 2988 | static void __init set_ftrace_early_filters(void) |
2321 | { | 2989 | { |
2322 | if (ftrace_filter_buf[0]) | 2990 | if (ftrace_filter_buf[0]) |
2323 | set_ftrace_early_filter(ftrace_filter_buf, 1); | 2991 | set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1); |
2324 | if (ftrace_notrace_buf[0]) | 2992 | if (ftrace_notrace_buf[0]) |
2325 | set_ftrace_early_filter(ftrace_notrace_buf, 0); | 2993 | set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0); |
2326 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2994 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2327 | if (ftrace_graph_buf[0]) | 2995 | if (ftrace_graph_buf[0]) |
2328 | set_ftrace_early_graph(ftrace_graph_buf); | 2996 | set_ftrace_early_graph(ftrace_graph_buf); |
@@ -2330,11 +2998,14 @@ static void __init set_ftrace_early_filters(void) | |||
2330 | } | 2998 | } |
2331 | 2999 | ||
2332 | static int | 3000 | static int |
2333 | ftrace_regex_release(struct inode *inode, struct file *file, int enable) | 3001 | ftrace_regex_release(struct inode *inode, struct file *file) |
2334 | { | 3002 | { |
2335 | struct seq_file *m = (struct seq_file *)file->private_data; | 3003 | struct seq_file *m = (struct seq_file *)file->private_data; |
2336 | struct ftrace_iterator *iter; | 3004 | struct ftrace_iterator *iter; |
3005 | struct ftrace_hash **orig_hash; | ||
2337 | struct trace_parser *parser; | 3006 | struct trace_parser *parser; |
3007 | int filter_hash; | ||
3008 | int ret; | ||
2338 | 3009 | ||
2339 | mutex_lock(&ftrace_regex_lock); | 3010 | mutex_lock(&ftrace_regex_lock); |
2340 | if (file->f_mode & FMODE_READ) { | 3011 | if (file->f_mode & FMODE_READ) { |
@@ -2347,35 +3018,41 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
2347 | parser = &iter->parser; | 3018 | parser = &iter->parser; |
2348 | if (trace_parser_loaded(parser)) { | 3019 | if (trace_parser_loaded(parser)) { |
2349 | parser->buffer[parser->idx] = 0; | 3020 | parser->buffer[parser->idx] = 0; |
2350 | ftrace_match_records(parser->buffer, parser->idx, enable); | 3021 | ftrace_match_records(iter->hash, parser->buffer, parser->idx); |
2351 | } | 3022 | } |
2352 | 3023 | ||
2353 | trace_parser_put(parser); | 3024 | trace_parser_put(parser); |
2354 | kfree(iter); | ||
2355 | 3025 | ||
2356 | if (file->f_mode & FMODE_WRITE) { | 3026 | if (file->f_mode & FMODE_WRITE) { |
3027 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | ||
3028 | |||
3029 | if (filter_hash) | ||
3030 | orig_hash = &iter->ops->filter_hash; | ||
3031 | else | ||
3032 | orig_hash = &iter->ops->notrace_hash; | ||
3033 | |||
2357 | mutex_lock(&ftrace_lock); | 3034 | mutex_lock(&ftrace_lock); |
2358 | if (ftrace_start_up && ftrace_enabled) | 3035 | /* |
2359 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 3036 | * Remove the current set, update the hash and add |
3037 | * them back. | ||
3038 | */ | ||
3039 | ftrace_hash_rec_disable(iter->ops, filter_hash); | ||
3040 | ret = ftrace_hash_move(orig_hash, iter->hash); | ||
3041 | if (!ret) { | ||
3042 | ftrace_hash_rec_enable(iter->ops, filter_hash); | ||
3043 | if (iter->ops->flags & FTRACE_OPS_FL_ENABLED | ||
3044 | && ftrace_enabled) | ||
3045 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | ||
3046 | } | ||
2360 | mutex_unlock(&ftrace_lock); | 3047 | mutex_unlock(&ftrace_lock); |
2361 | } | 3048 | } |
3049 | free_ftrace_hash(iter->hash); | ||
3050 | kfree(iter); | ||
2362 | 3051 | ||
2363 | mutex_unlock(&ftrace_regex_lock); | 3052 | mutex_unlock(&ftrace_regex_lock); |
2364 | return 0; | 3053 | return 0; |
2365 | } | 3054 | } |
2366 | 3055 | ||
2367 | static int | ||
2368 | ftrace_filter_release(struct inode *inode, struct file *file) | ||
2369 | { | ||
2370 | return ftrace_regex_release(inode, file, 1); | ||
2371 | } | ||
2372 | |||
2373 | static int | ||
2374 | ftrace_notrace_release(struct inode *inode, struct file *file) | ||
2375 | { | ||
2376 | return ftrace_regex_release(inode, file, 0); | ||
2377 | } | ||
2378 | |||
2379 | static const struct file_operations ftrace_avail_fops = { | 3056 | static const struct file_operations ftrace_avail_fops = { |
2380 | .open = ftrace_avail_open, | 3057 | .open = ftrace_avail_open, |
2381 | .read = seq_read, | 3058 | .read = seq_read, |
@@ -2383,12 +3060,19 @@ static const struct file_operations ftrace_avail_fops = { | |||
2383 | .release = seq_release_private, | 3060 | .release = seq_release_private, |
2384 | }; | 3061 | }; |
2385 | 3062 | ||
3063 | static const struct file_operations ftrace_enabled_fops = { | ||
3064 | .open = ftrace_enabled_open, | ||
3065 | .read = seq_read, | ||
3066 | .llseek = seq_lseek, | ||
3067 | .release = seq_release_private, | ||
3068 | }; | ||
3069 | |||
2386 | static const struct file_operations ftrace_filter_fops = { | 3070 | static const struct file_operations ftrace_filter_fops = { |
2387 | .open = ftrace_filter_open, | 3071 | .open = ftrace_filter_open, |
2388 | .read = seq_read, | 3072 | .read = seq_read, |
2389 | .write = ftrace_filter_write, | 3073 | .write = ftrace_filter_write, |
2390 | .llseek = ftrace_regex_lseek, | 3074 | .llseek = ftrace_regex_lseek, |
2391 | .release = ftrace_filter_release, | 3075 | .release = ftrace_regex_release, |
2392 | }; | 3076 | }; |
2393 | 3077 | ||
2394 | static const struct file_operations ftrace_notrace_fops = { | 3078 | static const struct file_operations ftrace_notrace_fops = { |
@@ -2396,7 +3080,7 @@ static const struct file_operations ftrace_notrace_fops = { | |||
2396 | .read = seq_read, | 3080 | .read = seq_read, |
2397 | .write = ftrace_notrace_write, | 3081 | .write = ftrace_notrace_write, |
2398 | .llseek = ftrace_regex_lseek, | 3082 | .llseek = ftrace_regex_lseek, |
2399 | .release = ftrace_notrace_release, | 3083 | .release = ftrace_regex_release, |
2400 | }; | 3084 | }; |
2401 | 3085 | ||
2402 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3086 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -2614,6 +3298,9 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
2614 | trace_create_file("available_filter_functions", 0444, | 3298 | trace_create_file("available_filter_functions", 0444, |
2615 | d_tracer, NULL, &ftrace_avail_fops); | 3299 | d_tracer, NULL, &ftrace_avail_fops); |
2616 | 3300 | ||
3301 | trace_create_file("enabled_functions", 0444, | ||
3302 | d_tracer, NULL, &ftrace_enabled_fops); | ||
3303 | |||
2617 | trace_create_file("set_ftrace_filter", 0644, d_tracer, | 3304 | trace_create_file("set_ftrace_filter", 0644, d_tracer, |
2618 | NULL, &ftrace_filter_fops); | 3305 | NULL, &ftrace_filter_fops); |
2619 | 3306 | ||
@@ -2765,6 +3452,10 @@ void __init ftrace_init(void) | |||
2765 | 3452 | ||
2766 | #else | 3453 | #else |
2767 | 3454 | ||
3455 | static struct ftrace_ops global_ops = { | ||
3456 | .func = ftrace_stub, | ||
3457 | }; | ||
3458 | |||
2768 | static int __init ftrace_nodyn_init(void) | 3459 | static int __init ftrace_nodyn_init(void) |
2769 | { | 3460 | { |
2770 | ftrace_enabled = 1; | 3461 | ftrace_enabled = 1; |
@@ -2775,12 +3466,38 @@ device_initcall(ftrace_nodyn_init); | |||
2775 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 3466 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
2776 | static inline void ftrace_startup_enable(int command) { } | 3467 | static inline void ftrace_startup_enable(int command) { } |
2777 | /* Keep as macros so we do not need to define the commands */ | 3468 | /* Keep as macros so we do not need to define the commands */ |
2778 | # define ftrace_startup(command) do { } while (0) | 3469 | # define ftrace_startup(ops, command) do { } while (0) |
2779 | # define ftrace_shutdown(command) do { } while (0) | 3470 | # define ftrace_shutdown(ops, command) do { } while (0) |
2780 | # define ftrace_startup_sysctl() do { } while (0) | 3471 | # define ftrace_startup_sysctl() do { } while (0) |
2781 | # define ftrace_shutdown_sysctl() do { } while (0) | 3472 | # define ftrace_shutdown_sysctl() do { } while (0) |
3473 | |||
3474 | static inline int | ||
3475 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | ||
3476 | { | ||
3477 | return 1; | ||
3478 | } | ||
3479 | |||
2782 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 3480 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
2783 | 3481 | ||
3482 | static void | ||
3483 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | ||
3484 | { | ||
3485 | struct ftrace_ops *op; | ||
3486 | |||
3487 | /* | ||
3488 | * Some of the ops may be dynamically allocated, | ||
3489 | * they must be freed after a synchronize_sched(). | ||
3490 | */ | ||
3491 | preempt_disable_notrace(); | ||
3492 | op = rcu_dereference_raw(ftrace_ops_list); | ||
3493 | while (op != &ftrace_list_end) { | ||
3494 | if (ftrace_ops_test(op, ip)) | ||
3495 | op->func(ip, parent_ip); | ||
3496 | op = rcu_dereference_raw(op->next); | ||
3497 | }; | ||
3498 | preempt_enable_notrace(); | ||
3499 | } | ||
3500 | |||
2784 | static void clear_ftrace_swapper(void) | 3501 | static void clear_ftrace_swapper(void) |
2785 | { | 3502 | { |
2786 | struct task_struct *p; | 3503 | struct task_struct *p; |
@@ -3081,12 +3798,15 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
3081 | goto out_unlock; | 3798 | goto out_unlock; |
3082 | 3799 | ||
3083 | ret = __register_ftrace_function(ops); | 3800 | ret = __register_ftrace_function(ops); |
3084 | ftrace_startup(0); | 3801 | if (!ret) |
3802 | ftrace_startup(ops, 0); | ||
3803 | |||
3085 | 3804 | ||
3086 | out_unlock: | 3805 | out_unlock: |
3087 | mutex_unlock(&ftrace_lock); | 3806 | mutex_unlock(&ftrace_lock); |
3088 | return ret; | 3807 | return ret; |
3089 | } | 3808 | } |
3809 | EXPORT_SYMBOL_GPL(register_ftrace_function); | ||
3090 | 3810 | ||
3091 | /** | 3811 | /** |
3092 | * unregister_ftrace_function - unregister a function for profiling. | 3812 | * unregister_ftrace_function - unregister a function for profiling. |
@@ -3100,11 +3820,13 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
3100 | 3820 | ||
3101 | mutex_lock(&ftrace_lock); | 3821 | mutex_lock(&ftrace_lock); |
3102 | ret = __unregister_ftrace_function(ops); | 3822 | ret = __unregister_ftrace_function(ops); |
3103 | ftrace_shutdown(0); | 3823 | if (!ret) |
3824 | ftrace_shutdown(ops, 0); | ||
3104 | mutex_unlock(&ftrace_lock); | 3825 | mutex_unlock(&ftrace_lock); |
3105 | 3826 | ||
3106 | return ret; | 3827 | return ret; |
3107 | } | 3828 | } |
3829 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); | ||
3108 | 3830 | ||
3109 | int | 3831 | int |
3110 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 3832 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
@@ -3130,11 +3852,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
3130 | ftrace_startup_sysctl(); | 3852 | ftrace_startup_sysctl(); |
3131 | 3853 | ||
3132 | /* we are starting ftrace again */ | 3854 | /* we are starting ftrace again */ |
3133 | if (ftrace_list != &ftrace_list_end) { | 3855 | if (ftrace_ops_list != &ftrace_list_end) { |
3134 | if (ftrace_list->next == &ftrace_list_end) | 3856 | if (ftrace_ops_list->next == &ftrace_list_end) |
3135 | ftrace_trace_function = ftrace_list->func; | 3857 | ftrace_trace_function = ftrace_ops_list->func; |
3136 | else | 3858 | else |
3137 | ftrace_trace_function = ftrace_list_func; | 3859 | ftrace_trace_function = ftrace_ops_list_func; |
3138 | } | 3860 | } |
3139 | 3861 | ||
3140 | } else { | 3862 | } else { |
@@ -3323,7 +4045,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
3323 | ftrace_graph_return = retfunc; | 4045 | ftrace_graph_return = retfunc; |
3324 | ftrace_graph_entry = entryfunc; | 4046 | ftrace_graph_entry = entryfunc; |
3325 | 4047 | ||
3326 | ftrace_startup(FTRACE_START_FUNC_RET); | 4048 | ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); |
3327 | 4049 | ||
3328 | out: | 4050 | out: |
3329 | mutex_unlock(&ftrace_lock); | 4051 | mutex_unlock(&ftrace_lock); |
@@ -3340,7 +4062,7 @@ void unregister_ftrace_graph(void) | |||
3340 | ftrace_graph_active--; | 4062 | ftrace_graph_active--; |
3341 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 4063 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
3342 | ftrace_graph_entry = ftrace_graph_entry_stub; | 4064 | ftrace_graph_entry = ftrace_graph_entry_stub; |
3343 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 4065 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); |
3344 | unregister_pm_notifier(&ftrace_suspend_notifier); | 4066 | unregister_pm_notifier(&ftrace_suspend_notifier); |
3345 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 4067 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
3346 | 4068 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 5e9dfc6286dd..6b69c4bd306f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -419,6 +419,8 @@ extern void trace_find_cmdline(int pid, char comm[]); | |||
419 | extern unsigned long ftrace_update_tot_cnt; | 419 | extern unsigned long ftrace_update_tot_cnt; |
420 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 420 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
421 | extern int DYN_FTRACE_TEST_NAME(void); | 421 | extern int DYN_FTRACE_TEST_NAME(void); |
422 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 | ||
423 | extern int DYN_FTRACE_TEST_NAME2(void); | ||
422 | #endif | 424 | #endif |
423 | 425 | ||
424 | extern int ring_buffer_expanded; | 426 | extern int ring_buffer_expanded; |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 16aee4d44e8f..8d0e1cc4e974 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -149,11 +149,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
149 | static struct ftrace_ops trace_ops __read_mostly = | 149 | static struct ftrace_ops trace_ops __read_mostly = |
150 | { | 150 | { |
151 | .func = function_trace_call, | 151 | .func = function_trace_call, |
152 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
152 | }; | 153 | }; |
153 | 154 | ||
154 | static struct ftrace_ops trace_stack_ops __read_mostly = | 155 | static struct ftrace_ops trace_stack_ops __read_mostly = |
155 | { | 156 | { |
156 | .func = function_stack_trace_call, | 157 | .func = function_stack_trace_call, |
158 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
157 | }; | 159 | }; |
158 | 160 | ||
159 | /* Our two options */ | 161 | /* Our two options */ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a4969b47afc1..c77424be284d 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -153,6 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
153 | static struct ftrace_ops trace_ops __read_mostly = | 153 | static struct ftrace_ops trace_ops __read_mostly = |
154 | { | 154 | { |
155 | .func = irqsoff_tracer_call, | 155 | .func = irqsoff_tracer_call, |
156 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
156 | }; | 157 | }; |
157 | #endif /* CONFIG_FUNCTION_TRACER */ | 158 | #endif /* CONFIG_FUNCTION_TRACER */ |
158 | 159 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 7319559ed59f..f029dd4fd2ca 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -129,6 +129,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
129 | static struct ftrace_ops trace_ops __read_mostly = | 129 | static struct ftrace_ops trace_ops __read_mostly = |
130 | { | 130 | { |
131 | .func = wakeup_tracer_call, | 131 | .func = wakeup_tracer_call, |
132 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
132 | }; | 133 | }; |
133 | #endif /* CONFIG_FUNCTION_TRACER */ | 134 | #endif /* CONFIG_FUNCTION_TRACER */ |
134 | 135 | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 659732eba07c..288541f977fb 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -101,6 +101,206 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | |||
101 | 101 | ||
102 | #ifdef CONFIG_DYNAMIC_FTRACE | 102 | #ifdef CONFIG_DYNAMIC_FTRACE |
103 | 103 | ||
104 | static int trace_selftest_test_probe1_cnt; | ||
105 | static void trace_selftest_test_probe1_func(unsigned long ip, | ||
106 | unsigned long pip) | ||
107 | { | ||
108 | trace_selftest_test_probe1_cnt++; | ||
109 | } | ||
110 | |||
111 | static int trace_selftest_test_probe2_cnt; | ||
112 | static void trace_selftest_test_probe2_func(unsigned long ip, | ||
113 | unsigned long pip) | ||
114 | { | ||
115 | trace_selftest_test_probe2_cnt++; | ||
116 | } | ||
117 | |||
118 | static int trace_selftest_test_probe3_cnt; | ||
119 | static void trace_selftest_test_probe3_func(unsigned long ip, | ||
120 | unsigned long pip) | ||
121 | { | ||
122 | trace_selftest_test_probe3_cnt++; | ||
123 | } | ||
124 | |||
125 | static int trace_selftest_test_global_cnt; | ||
126 | static void trace_selftest_test_global_func(unsigned long ip, | ||
127 | unsigned long pip) | ||
128 | { | ||
129 | trace_selftest_test_global_cnt++; | ||
130 | } | ||
131 | |||
132 | static int trace_selftest_test_dyn_cnt; | ||
133 | static void trace_selftest_test_dyn_func(unsigned long ip, | ||
134 | unsigned long pip) | ||
135 | { | ||
136 | trace_selftest_test_dyn_cnt++; | ||
137 | } | ||
138 | |||
139 | static struct ftrace_ops test_probe1 = { | ||
140 | .func = trace_selftest_test_probe1_func, | ||
141 | }; | ||
142 | |||
143 | static struct ftrace_ops test_probe2 = { | ||
144 | .func = trace_selftest_test_probe2_func, | ||
145 | }; | ||
146 | |||
147 | static struct ftrace_ops test_probe3 = { | ||
148 | .func = trace_selftest_test_probe3_func, | ||
149 | }; | ||
150 | |||
151 | static struct ftrace_ops test_global = { | ||
152 | .func = trace_selftest_test_global_func, | ||
153 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
154 | }; | ||
155 | |||
156 | static void print_counts(void) | ||
157 | { | ||
158 | printk("(%d %d %d %d %d) ", | ||
159 | trace_selftest_test_probe1_cnt, | ||
160 | trace_selftest_test_probe2_cnt, | ||
161 | trace_selftest_test_probe3_cnt, | ||
162 | trace_selftest_test_global_cnt, | ||
163 | trace_selftest_test_dyn_cnt); | ||
164 | } | ||
165 | |||
166 | static void reset_counts(void) | ||
167 | { | ||
168 | trace_selftest_test_probe1_cnt = 0; | ||
169 | trace_selftest_test_probe2_cnt = 0; | ||
170 | trace_selftest_test_probe3_cnt = 0; | ||
171 | trace_selftest_test_global_cnt = 0; | ||
172 | trace_selftest_test_dyn_cnt = 0; | ||
173 | } | ||
174 | |||
175 | static int trace_selftest_ops(int cnt) | ||
176 | { | ||
177 | int save_ftrace_enabled = ftrace_enabled; | ||
178 | struct ftrace_ops *dyn_ops; | ||
179 | char *func1_name; | ||
180 | char *func2_name; | ||
181 | int len1; | ||
182 | int len2; | ||
183 | int ret = -1; | ||
184 | |||
185 | printk(KERN_CONT "PASSED\n"); | ||
186 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | ||
187 | |||
188 | ftrace_enabled = 1; | ||
189 | reset_counts(); | ||
190 | |||
191 | /* Handle PPC64 '.' name */ | ||
192 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | ||
193 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | ||
194 | len1 = strlen(func1_name); | ||
195 | len2 = strlen(func2_name); | ||
196 | |||
197 | /* | ||
198 | * Probe 1 will trace function 1. | ||
199 | * Probe 2 will trace function 2. | ||
200 | * Probe 3 will trace functions 1 and 2. | ||
201 | */ | ||
202 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | ||
203 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | ||
204 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | ||
205 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | ||
206 | |||
207 | register_ftrace_function(&test_probe1); | ||
208 | register_ftrace_function(&test_probe2); | ||
209 | register_ftrace_function(&test_probe3); | ||
210 | register_ftrace_function(&test_global); | ||
211 | |||
212 | DYN_FTRACE_TEST_NAME(); | ||
213 | |||
214 | print_counts(); | ||
215 | |||
216 | if (trace_selftest_test_probe1_cnt != 1) | ||
217 | goto out; | ||
218 | if (trace_selftest_test_probe2_cnt != 0) | ||
219 | goto out; | ||
220 | if (trace_selftest_test_probe3_cnt != 1) | ||
221 | goto out; | ||
222 | if (trace_selftest_test_global_cnt == 0) | ||
223 | goto out; | ||
224 | |||
225 | DYN_FTRACE_TEST_NAME2(); | ||
226 | |||
227 | print_counts(); | ||
228 | |||
229 | if (trace_selftest_test_probe1_cnt != 1) | ||
230 | goto out; | ||
231 | if (trace_selftest_test_probe2_cnt != 1) | ||
232 | goto out; | ||
233 | if (trace_selftest_test_probe3_cnt != 2) | ||
234 | goto out; | ||
235 | |||
236 | /* Add a dynamic probe */ | ||
237 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | ||
238 | if (!dyn_ops) { | ||
239 | printk("MEMORY ERROR "); | ||
240 | goto out; | ||
241 | } | ||
242 | |||
243 | dyn_ops->func = trace_selftest_test_dyn_func; | ||
244 | |||
245 | register_ftrace_function(dyn_ops); | ||
246 | |||
247 | trace_selftest_test_global_cnt = 0; | ||
248 | |||
249 | DYN_FTRACE_TEST_NAME(); | ||
250 | |||
251 | print_counts(); | ||
252 | |||
253 | if (trace_selftest_test_probe1_cnt != 2) | ||
254 | goto out_free; | ||
255 | if (trace_selftest_test_probe2_cnt != 1) | ||
256 | goto out_free; | ||
257 | if (trace_selftest_test_probe3_cnt != 3) | ||
258 | goto out_free; | ||
259 | if (trace_selftest_test_global_cnt == 0) | ||
260 | goto out; | ||
261 | if (trace_selftest_test_dyn_cnt == 0) | ||
262 | goto out_free; | ||
263 | |||
264 | DYN_FTRACE_TEST_NAME2(); | ||
265 | |||
266 | print_counts(); | ||
267 | |||
268 | if (trace_selftest_test_probe1_cnt != 2) | ||
269 | goto out_free; | ||
270 | if (trace_selftest_test_probe2_cnt != 2) | ||
271 | goto out_free; | ||
272 | if (trace_selftest_test_probe3_cnt != 4) | ||
273 | goto out_free; | ||
274 | |||
275 | ret = 0; | ||
276 | out_free: | ||
277 | unregister_ftrace_function(dyn_ops); | ||
278 | kfree(dyn_ops); | ||
279 | |||
280 | out: | ||
281 | /* Purposely unregister in the same order */ | ||
282 | unregister_ftrace_function(&test_probe1); | ||
283 | unregister_ftrace_function(&test_probe2); | ||
284 | unregister_ftrace_function(&test_probe3); | ||
285 | unregister_ftrace_function(&test_global); | ||
286 | |||
287 | /* Make sure everything is off */ | ||
288 | reset_counts(); | ||
289 | DYN_FTRACE_TEST_NAME(); | ||
290 | DYN_FTRACE_TEST_NAME(); | ||
291 | |||
292 | if (trace_selftest_test_probe1_cnt || | ||
293 | trace_selftest_test_probe2_cnt || | ||
294 | trace_selftest_test_probe3_cnt || | ||
295 | trace_selftest_test_global_cnt || | ||
296 | trace_selftest_test_dyn_cnt) | ||
297 | ret = -1; | ||
298 | |||
299 | ftrace_enabled = save_ftrace_enabled; | ||
300 | |||
301 | return ret; | ||
302 | } | ||
303 | |||
104 | /* Test dynamic code modification and ftrace filters */ | 304 | /* Test dynamic code modification and ftrace filters */ |
105 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 305 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
106 | struct trace_array *tr, | 306 | struct trace_array *tr, |
@@ -131,7 +331,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
131 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 331 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
132 | 332 | ||
133 | /* filter only on our function */ | 333 | /* filter only on our function */ |
134 | ftrace_set_filter(func_name, strlen(func_name), 1); | 334 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
135 | 335 | ||
136 | /* enable tracing */ | 336 | /* enable tracing */ |
137 | ret = tracer_init(trace, tr); | 337 | ret = tracer_init(trace, tr); |
@@ -166,22 +366,30 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
166 | 366 | ||
167 | /* check the trace buffer */ | 367 | /* check the trace buffer */ |
168 | ret = trace_test_buffer(tr, &count); | 368 | ret = trace_test_buffer(tr, &count); |
169 | trace->reset(tr); | ||
170 | tracing_start(); | 369 | tracing_start(); |
171 | 370 | ||
172 | /* we should only have one item */ | 371 | /* we should only have one item */ |
173 | if (!ret && count != 1) { | 372 | if (!ret && count != 1) { |
373 | trace->reset(tr); | ||
174 | printk(KERN_CONT ".. filter failed count=%ld ..", count); | 374 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
175 | ret = -1; | 375 | ret = -1; |
176 | goto out; | 376 | goto out; |
177 | } | 377 | } |
178 | 378 | ||
379 | /* Test the ops with global tracing running */ | ||
380 | ret = trace_selftest_ops(1); | ||
381 | trace->reset(tr); | ||
382 | |||
179 | out: | 383 | out: |
180 | ftrace_enabled = save_ftrace_enabled; | 384 | ftrace_enabled = save_ftrace_enabled; |
181 | tracer_enabled = save_tracer_enabled; | 385 | tracer_enabled = save_tracer_enabled; |
182 | 386 | ||
183 | /* Enable tracing on all functions again */ | 387 | /* Enable tracing on all functions again */ |
184 | ftrace_set_filter(NULL, 0, 1); | 388 | ftrace_set_global_filter(NULL, 0, 1); |
389 | |||
390 | /* Test the ops with global tracing off */ | ||
391 | if (!ret) | ||
392 | ret = trace_selftest_ops(2); | ||
185 | 393 | ||
186 | return ret; | 394 | return ret; |
187 | } | 395 | } |
diff --git a/kernel/trace/trace_selftest_dynamic.c b/kernel/trace/trace_selftest_dynamic.c index 54dd77cce5bf..b4c475a0a48b 100644 --- a/kernel/trace/trace_selftest_dynamic.c +++ b/kernel/trace/trace_selftest_dynamic.c | |||
@@ -5,3 +5,9 @@ int DYN_FTRACE_TEST_NAME(void) | |||
5 | /* used to call mcount */ | 5 | /* used to call mcount */ |
6 | return 0; | 6 | return 0; |
7 | } | 7 | } |
8 | |||
9 | int DYN_FTRACE_TEST_NAME2(void) | ||
10 | { | ||
11 | /* used to call mcount */ | ||
12 | return 0; | ||
13 | } | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4c5dead0c239..b0b53b8e4c25 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -133,6 +133,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
133 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
134 | { | 134 | { |
135 | .func = stack_trace_call, | 135 | .func = stack_trace_call, |
136 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
136 | }; | 137 | }; |
137 | 138 | ||
138 | static ssize_t | 139 | static ssize_t |