diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-07 05:15:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-07 05:15:40 -0400 |
commit | 5e34437840d33554f69380584311743b39e8fbeb (patch) | |
tree | e081135619ee146af5efb9ee883afca950df5757 /kernel/trace/ftrace.c | |
parent | 77d05632baee21b1cef8730d7c06aa69601e4dca (diff) | |
parent | d508afb437daee7cf07da085b635c44a4ebf9b38 (diff) |
Merge branch 'linus' into core/softlockup
Conflicts:
kernel/sysctl.c
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 1139 |
1 files changed, 881 insertions, 258 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9a236ffe2aa4..f1ed080406c3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #include <linux/sysctl.h> | 27 | #include <linux/sysctl.h> |
28 | #include <linux/ctype.h> | 28 | #include <linux/ctype.h> |
29 | #include <linux/list.h> | 29 | #include <linux/list.h> |
30 | #include <linux/hash.h> | ||
31 | |||
32 | #include <trace/sched.h> | ||
30 | 33 | ||
31 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
32 | 35 | ||
@@ -44,14 +47,14 @@ | |||
44 | ftrace_kill(); \ | 47 | ftrace_kill(); \ |
45 | } while (0) | 48 | } while (0) |
46 | 49 | ||
50 | /* hash bits for specific function selection */ | ||
51 | #define FTRACE_HASH_BITS 7 | ||
52 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | ||
53 | |||
47 | /* ftrace_enabled is a method to turn ftrace on or off */ | 54 | /* ftrace_enabled is a method to turn ftrace on or off */ |
48 | int ftrace_enabled __read_mostly; | 55 | int ftrace_enabled __read_mostly; |
49 | static int last_ftrace_enabled; | 56 | static int last_ftrace_enabled; |
50 | 57 | ||
51 | /* set when tracing only a pid */ | ||
52 | struct pid *ftrace_pid_trace; | ||
53 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | ||
54 | |||
55 | /* Quick disabling of function tracer. */ | 58 | /* Quick disabling of function tracer. */ |
56 | int function_trace_stop; | 59 | int function_trace_stop; |
57 | 60 | ||
@@ -61,9 +64,7 @@ int function_trace_stop; | |||
61 | */ | 64 | */ |
62 | static int ftrace_disabled __read_mostly; | 65 | static int ftrace_disabled __read_mostly; |
63 | 66 | ||
64 | static DEFINE_SPINLOCK(ftrace_lock); | 67 | static DEFINE_MUTEX(ftrace_lock); |
65 | static DEFINE_MUTEX(ftrace_sysctl_lock); | ||
66 | static DEFINE_MUTEX(ftrace_start_lock); | ||
67 | 68 | ||
68 | static struct ftrace_ops ftrace_list_end __read_mostly = | 69 | static struct ftrace_ops ftrace_list_end __read_mostly = |
69 | { | 70 | { |
@@ -134,9 +135,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | |||
134 | 135 | ||
135 | static int __register_ftrace_function(struct ftrace_ops *ops) | 136 | static int __register_ftrace_function(struct ftrace_ops *ops) |
136 | { | 137 | { |
137 | /* should not be called from interrupt context */ | ||
138 | spin_lock(&ftrace_lock); | ||
139 | |||
140 | ops->next = ftrace_list; | 138 | ops->next = ftrace_list; |
141 | /* | 139 | /* |
142 | * We are entering ops into the ftrace_list but another | 140 | * We are entering ops into the ftrace_list but another |
@@ -172,18 +170,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
172 | #endif | 170 | #endif |
173 | } | 171 | } |
174 | 172 | ||
175 | spin_unlock(&ftrace_lock); | ||
176 | |||
177 | return 0; | 173 | return 0; |
178 | } | 174 | } |
179 | 175 | ||
180 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 176 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
181 | { | 177 | { |
182 | struct ftrace_ops **p; | 178 | struct ftrace_ops **p; |
183 | int ret = 0; | ||
184 | |||
185 | /* should not be called from interrupt context */ | ||
186 | spin_lock(&ftrace_lock); | ||
187 | 179 | ||
188 | /* | 180 | /* |
189 | * If we are removing the last function, then simply point | 181 | * If we are removing the last function, then simply point |
@@ -192,17 +184,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
192 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | 184 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { |
193 | ftrace_trace_function = ftrace_stub; | 185 | ftrace_trace_function = ftrace_stub; |
194 | ftrace_list = &ftrace_list_end; | 186 | ftrace_list = &ftrace_list_end; |
195 | goto out; | 187 | return 0; |
196 | } | 188 | } |
197 | 189 | ||
198 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | 190 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) |
199 | if (*p == ops) | 191 | if (*p == ops) |
200 | break; | 192 | break; |
201 | 193 | ||
202 | if (*p != ops) { | 194 | if (*p != ops) |
203 | ret = -1; | 195 | return -1; |
204 | goto out; | ||
205 | } | ||
206 | 196 | ||
207 | *p = (*p)->next; | 197 | *p = (*p)->next; |
208 | 198 | ||
@@ -223,21 +213,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
223 | } | 213 | } |
224 | } | 214 | } |
225 | 215 | ||
226 | out: | 216 | return 0; |
227 | spin_unlock(&ftrace_lock); | ||
228 | |||
229 | return ret; | ||
230 | } | 217 | } |
231 | 218 | ||
232 | static void ftrace_update_pid_func(void) | 219 | static void ftrace_update_pid_func(void) |
233 | { | 220 | { |
234 | ftrace_func_t func; | 221 | ftrace_func_t func; |
235 | 222 | ||
236 | /* should not be called from interrupt context */ | ||
237 | spin_lock(&ftrace_lock); | ||
238 | |||
239 | if (ftrace_trace_function == ftrace_stub) | 223 | if (ftrace_trace_function == ftrace_stub) |
240 | goto out; | 224 | return; |
241 | 225 | ||
242 | func = ftrace_trace_function; | 226 | func = ftrace_trace_function; |
243 | 227 | ||
@@ -254,23 +238,29 @@ static void ftrace_update_pid_func(void) | |||
254 | #else | 238 | #else |
255 | __ftrace_trace_function = func; | 239 | __ftrace_trace_function = func; |
256 | #endif | 240 | #endif |
257 | |||
258 | out: | ||
259 | spin_unlock(&ftrace_lock); | ||
260 | } | 241 | } |
261 | 242 | ||
243 | /* set when tracing only a pid */ | ||
244 | struct pid *ftrace_pid_trace; | ||
245 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | ||
246 | |||
262 | #ifdef CONFIG_DYNAMIC_FTRACE | 247 | #ifdef CONFIG_DYNAMIC_FTRACE |
248 | |||
263 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 249 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
264 | # error Dynamic ftrace depends on MCOUNT_RECORD | 250 | # error Dynamic ftrace depends on MCOUNT_RECORD |
265 | #endif | 251 | #endif |
266 | 252 | ||
267 | /* | 253 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; |
268 | * Since MCOUNT_ADDR may point to mcount itself, we do not want | 254 | |
269 | * to get it confused by reading a reference in the code as we | 255 | struct ftrace_func_probe { |
270 | * are parsing on objcopy output of text. Use a variable for | 256 | struct hlist_node node; |
271 | * it instead. | 257 | struct ftrace_probe_ops *ops; |
272 | */ | 258 | unsigned long flags; |
273 | static unsigned long mcount_addr = MCOUNT_ADDR; | 259 | unsigned long ip; |
260 | void *data; | ||
261 | struct rcu_head rcu; | ||
262 | }; | ||
263 | |||
274 | 264 | ||
275 | enum { | 265 | enum { |
276 | FTRACE_ENABLE_CALLS = (1 << 0), | 266 | FTRACE_ENABLE_CALLS = (1 << 0), |
@@ -284,13 +274,13 @@ enum { | |||
284 | 274 | ||
285 | static int ftrace_filtered; | 275 | static int ftrace_filtered; |
286 | 276 | ||
287 | static LIST_HEAD(ftrace_new_addrs); | 277 | static struct dyn_ftrace *ftrace_new_addrs; |
288 | 278 | ||
289 | static DEFINE_MUTEX(ftrace_regex_lock); | 279 | static DEFINE_MUTEX(ftrace_regex_lock); |
290 | 280 | ||
291 | struct ftrace_page { | 281 | struct ftrace_page { |
292 | struct ftrace_page *next; | 282 | struct ftrace_page *next; |
293 | unsigned long index; | 283 | int index; |
294 | struct dyn_ftrace records[]; | 284 | struct dyn_ftrace records[]; |
295 | }; | 285 | }; |
296 | 286 | ||
@@ -305,6 +295,19 @@ static struct ftrace_page *ftrace_pages; | |||
305 | 295 | ||
306 | static struct dyn_ftrace *ftrace_free_records; | 296 | static struct dyn_ftrace *ftrace_free_records; |
307 | 297 | ||
298 | /* | ||
299 | * This is a double for. Do not use 'break' to break out of the loop, | ||
300 | * you must use a goto. | ||
301 | */ | ||
302 | #define do_for_each_ftrace_rec(pg, rec) \ | ||
303 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | ||
304 | int _____i; \ | ||
305 | for (_____i = 0; _____i < pg->index; _____i++) { \ | ||
306 | rec = &pg->records[_____i]; | ||
307 | |||
308 | #define while_for_each_ftrace_rec() \ | ||
309 | } \ | ||
310 | } | ||
308 | 311 | ||
309 | #ifdef CONFIG_KPROBES | 312 | #ifdef CONFIG_KPROBES |
310 | 313 | ||
@@ -338,7 +341,7 @@ static inline int record_frozen(struct dyn_ftrace *rec) | |||
338 | 341 | ||
339 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 342 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
340 | { | 343 | { |
341 | rec->ip = (unsigned long)ftrace_free_records; | 344 | rec->freelist = ftrace_free_records; |
342 | ftrace_free_records = rec; | 345 | ftrace_free_records = rec; |
343 | rec->flags |= FTRACE_FL_FREE; | 346 | rec->flags |= FTRACE_FL_FREE; |
344 | } | 347 | } |
@@ -349,23 +352,22 @@ void ftrace_release(void *start, unsigned long size) | |||
349 | struct ftrace_page *pg; | 352 | struct ftrace_page *pg; |
350 | unsigned long s = (unsigned long)start; | 353 | unsigned long s = (unsigned long)start; |
351 | unsigned long e = s + size; | 354 | unsigned long e = s + size; |
352 | int i; | ||
353 | 355 | ||
354 | if (ftrace_disabled || !start) | 356 | if (ftrace_disabled || !start) |
355 | return; | 357 | return; |
356 | 358 | ||
357 | /* should not be called from interrupt context */ | 359 | mutex_lock(&ftrace_lock); |
358 | spin_lock(&ftrace_lock); | 360 | do_for_each_ftrace_rec(pg, rec) { |
359 | 361 | if ((rec->ip >= s) && (rec->ip < e)) { | |
360 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 362 | /* |
361 | for (i = 0; i < pg->index; i++) { | 363 | * rec->ip is changed in ftrace_free_rec() |
362 | rec = &pg->records[i]; | 364 | * It should not between s and e if record was freed. |
363 | 365 | */ | |
364 | if ((rec->ip >= s) && (rec->ip < e)) | 366 | FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE); |
365 | ftrace_free_rec(rec); | 367 | ftrace_free_rec(rec); |
366 | } | 368 | } |
367 | } | 369 | } while_for_each_ftrace_rec(); |
368 | spin_unlock(&ftrace_lock); | 370 | mutex_unlock(&ftrace_lock); |
369 | } | 371 | } |
370 | 372 | ||
371 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 373 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
@@ -382,7 +384,7 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | |||
382 | return NULL; | 384 | return NULL; |
383 | } | 385 | } |
384 | 386 | ||
385 | ftrace_free_records = (void *)rec->ip; | 387 | ftrace_free_records = rec->freelist; |
386 | memset(rec, 0, sizeof(*rec)); | 388 | memset(rec, 0, sizeof(*rec)); |
387 | return rec; | 389 | return rec; |
388 | } | 390 | } |
@@ -414,8 +416,8 @@ ftrace_record_ip(unsigned long ip) | |||
414 | return NULL; | 416 | return NULL; |
415 | 417 | ||
416 | rec->ip = ip; | 418 | rec->ip = ip; |
417 | 419 | rec->newlist = ftrace_new_addrs; | |
418 | list_add(&rec->list, &ftrace_new_addrs); | 420 | ftrace_new_addrs = rec; |
419 | 421 | ||
420 | return rec; | 422 | return rec; |
421 | } | 423 | } |
@@ -461,10 +463,10 @@ static void ftrace_bug(int failed, unsigned long ip) | |||
461 | static int | 463 | static int |
462 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | 464 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
463 | { | 465 | { |
464 | unsigned long ip, fl; | ||
465 | unsigned long ftrace_addr; | 466 | unsigned long ftrace_addr; |
467 | unsigned long ip, fl; | ||
466 | 468 | ||
467 | ftrace_addr = (unsigned long)ftrace_caller; | 469 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
468 | 470 | ||
469 | ip = rec->ip; | 471 | ip = rec->ip; |
470 | 472 | ||
@@ -473,7 +475,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
473 | * it is not enabled then do nothing. | 475 | * it is not enabled then do nothing. |
474 | * | 476 | * |
475 | * If this record is not to be traced and | 477 | * If this record is not to be traced and |
476 | * it is enabled then disabled it. | 478 | * it is enabled then disable it. |
477 | * | 479 | * |
478 | */ | 480 | */ |
479 | if (rec->flags & FTRACE_FL_NOTRACE) { | 481 | if (rec->flags & FTRACE_FL_NOTRACE) { |
@@ -493,7 +495,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
493 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) | 495 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) |
494 | return 0; | 496 | return 0; |
495 | 497 | ||
496 | /* Record is not filtered and is not enabled do nothing */ | 498 | /* Record is not filtered or enabled, do nothing */ |
497 | if (!fl) | 499 | if (!fl) |
498 | return 0; | 500 | return 0; |
499 | 501 | ||
@@ -515,7 +517,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
515 | 517 | ||
516 | } else { | 518 | } else { |
517 | 519 | ||
518 | /* if record is not enabled do nothing */ | 520 | /* if record is not enabled, do nothing */ |
519 | if (!(rec->flags & FTRACE_FL_ENABLED)) | 521 | if (!(rec->flags & FTRACE_FL_ENABLED)) |
520 | return 0; | 522 | return 0; |
521 | 523 | ||
@@ -531,41 +533,41 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
531 | 533 | ||
532 | static void ftrace_replace_code(int enable) | 534 | static void ftrace_replace_code(int enable) |
533 | { | 535 | { |
534 | int i, failed; | ||
535 | struct dyn_ftrace *rec; | 536 | struct dyn_ftrace *rec; |
536 | struct ftrace_page *pg; | 537 | struct ftrace_page *pg; |
538 | int failed; | ||
537 | 539 | ||
538 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 540 | do_for_each_ftrace_rec(pg, rec) { |
539 | for (i = 0; i < pg->index; i++) { | 541 | /* |
540 | rec = &pg->records[i]; | 542 | * Skip over free records, records that have |
541 | 543 | * failed and not converted. | |
542 | /* | 544 | */ |
543 | * Skip over free records and records that have | 545 | if (rec->flags & FTRACE_FL_FREE || |
544 | * failed. | 546 | rec->flags & FTRACE_FL_FAILED || |
545 | */ | 547 | !(rec->flags & FTRACE_FL_CONVERTED)) |
546 | if (rec->flags & FTRACE_FL_FREE || | 548 | continue; |
547 | rec->flags & FTRACE_FL_FAILED) | ||
548 | continue; | ||
549 | 549 | ||
550 | /* ignore updates to this record's mcount site */ | 550 | /* ignore updates to this record's mcount site */ |
551 | if (get_kprobe((void *)rec->ip)) { | 551 | if (get_kprobe((void *)rec->ip)) { |
552 | freeze_record(rec); | 552 | freeze_record(rec); |
553 | continue; | 553 | continue; |
554 | } else { | 554 | } else { |
555 | unfreeze_record(rec); | 555 | unfreeze_record(rec); |
556 | } | 556 | } |
557 | 557 | ||
558 | failed = __ftrace_replace_code(rec, enable); | 558 | failed = __ftrace_replace_code(rec, enable); |
559 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 559 | if (failed) { |
560 | rec->flags |= FTRACE_FL_FAILED; | 560 | rec->flags |= FTRACE_FL_FAILED; |
561 | if ((system_state == SYSTEM_BOOTING) || | 561 | if ((system_state == SYSTEM_BOOTING) || |
562 | !core_kernel_text(rec->ip)) { | 562 | !core_kernel_text(rec->ip)) { |
563 | ftrace_free_rec(rec); | 563 | ftrace_free_rec(rec); |
564 | } else | 564 | } else { |
565 | ftrace_bug(failed, rec->ip); | 565 | ftrace_bug(failed, rec->ip); |
566 | } | 566 | /* Stop processing */ |
567 | return; | ||
568 | } | ||
567 | } | 569 | } |
568 | } | 570 | } while_for_each_ftrace_rec(); |
569 | } | 571 | } |
570 | 572 | ||
571 | static int | 573 | static int |
@@ -576,7 +578,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |||
576 | 578 | ||
577 | ip = rec->ip; | 579 | ip = rec->ip; |
578 | 580 | ||
579 | ret = ftrace_make_nop(mod, rec, mcount_addr); | 581 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
580 | if (ret) { | 582 | if (ret) { |
581 | ftrace_bug(ret, ip); | 583 | ftrace_bug(ret, ip); |
582 | rec->flags |= FTRACE_FL_FAILED; | 584 | rec->flags |= FTRACE_FL_FAILED; |
@@ -585,6 +587,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |||
585 | return 1; | 587 | return 1; |
586 | } | 588 | } |
587 | 589 | ||
590 | /* | ||
591 | * archs can override this function if they must do something | ||
592 | * before the modifying code is performed. | ||
593 | */ | ||
594 | int __weak ftrace_arch_code_modify_prepare(void) | ||
595 | { | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | /* | ||
600 | * archs can override this function if they must do something | ||
601 | * after the modifying code is performed. | ||
602 | */ | ||
603 | int __weak ftrace_arch_code_modify_post_process(void) | ||
604 | { | ||
605 | return 0; | ||
606 | } | ||
607 | |||
588 | static int __ftrace_modify_code(void *data) | 608 | static int __ftrace_modify_code(void *data) |
589 | { | 609 | { |
590 | int *command = data; | 610 | int *command = data; |
@@ -607,7 +627,17 @@ static int __ftrace_modify_code(void *data) | |||
607 | 627 | ||
608 | static void ftrace_run_update_code(int command) | 628 | static void ftrace_run_update_code(int command) |
609 | { | 629 | { |
630 | int ret; | ||
631 | |||
632 | ret = ftrace_arch_code_modify_prepare(); | ||
633 | FTRACE_WARN_ON(ret); | ||
634 | if (ret) | ||
635 | return; | ||
636 | |||
610 | stop_machine(__ftrace_modify_code, &command, NULL); | 637 | stop_machine(__ftrace_modify_code, &command, NULL); |
638 | |||
639 | ret = ftrace_arch_code_modify_post_process(); | ||
640 | FTRACE_WARN_ON(ret); | ||
611 | } | 641 | } |
612 | 642 | ||
613 | static ftrace_func_t saved_ftrace_func; | 643 | static ftrace_func_t saved_ftrace_func; |
@@ -631,13 +661,10 @@ static void ftrace_startup(int command) | |||
631 | if (unlikely(ftrace_disabled)) | 661 | if (unlikely(ftrace_disabled)) |
632 | return; | 662 | return; |
633 | 663 | ||
634 | mutex_lock(&ftrace_start_lock); | ||
635 | ftrace_start_up++; | 664 | ftrace_start_up++; |
636 | command |= FTRACE_ENABLE_CALLS; | 665 | command |= FTRACE_ENABLE_CALLS; |
637 | 666 | ||
638 | ftrace_startup_enable(command); | 667 | ftrace_startup_enable(command); |
639 | |||
640 | mutex_unlock(&ftrace_start_lock); | ||
641 | } | 668 | } |
642 | 669 | ||
643 | static void ftrace_shutdown(int command) | 670 | static void ftrace_shutdown(int command) |
@@ -645,7 +672,6 @@ static void ftrace_shutdown(int command) | |||
645 | if (unlikely(ftrace_disabled)) | 672 | if (unlikely(ftrace_disabled)) |
646 | return; | 673 | return; |
647 | 674 | ||
648 | mutex_lock(&ftrace_start_lock); | ||
649 | ftrace_start_up--; | 675 | ftrace_start_up--; |
650 | if (!ftrace_start_up) | 676 | if (!ftrace_start_up) |
651 | command |= FTRACE_DISABLE_CALLS; | 677 | command |= FTRACE_DISABLE_CALLS; |
@@ -656,11 +682,9 @@ static void ftrace_shutdown(int command) | |||
656 | } | 682 | } |
657 | 683 | ||
658 | if (!command || !ftrace_enabled) | 684 | if (!command || !ftrace_enabled) |
659 | goto out; | 685 | return; |
660 | 686 | ||
661 | ftrace_run_update_code(command); | 687 | ftrace_run_update_code(command); |
662 | out: | ||
663 | mutex_unlock(&ftrace_start_lock); | ||
664 | } | 688 | } |
665 | 689 | ||
666 | static void ftrace_startup_sysctl(void) | 690 | static void ftrace_startup_sysctl(void) |
@@ -670,7 +694,6 @@ static void ftrace_startup_sysctl(void) | |||
670 | if (unlikely(ftrace_disabled)) | 694 | if (unlikely(ftrace_disabled)) |
671 | return; | 695 | return; |
672 | 696 | ||
673 | mutex_lock(&ftrace_start_lock); | ||
674 | /* Force update next time */ | 697 | /* Force update next time */ |
675 | saved_ftrace_func = NULL; | 698 | saved_ftrace_func = NULL; |
676 | /* ftrace_start_up is true if we want ftrace running */ | 699 | /* ftrace_start_up is true if we want ftrace running */ |
@@ -678,7 +701,6 @@ static void ftrace_startup_sysctl(void) | |||
678 | command |= FTRACE_ENABLE_CALLS; | 701 | command |= FTRACE_ENABLE_CALLS; |
679 | 702 | ||
680 | ftrace_run_update_code(command); | 703 | ftrace_run_update_code(command); |
681 | mutex_unlock(&ftrace_start_lock); | ||
682 | } | 704 | } |
683 | 705 | ||
684 | static void ftrace_shutdown_sysctl(void) | 706 | static void ftrace_shutdown_sysctl(void) |
@@ -688,13 +710,11 @@ static void ftrace_shutdown_sysctl(void) | |||
688 | if (unlikely(ftrace_disabled)) | 710 | if (unlikely(ftrace_disabled)) |
689 | return; | 711 | return; |
690 | 712 | ||
691 | mutex_lock(&ftrace_start_lock); | ||
692 | /* ftrace_start_up is true if ftrace is running */ | 713 | /* ftrace_start_up is true if ftrace is running */ |
693 | if (ftrace_start_up) | 714 | if (ftrace_start_up) |
694 | command |= FTRACE_DISABLE_CALLS; | 715 | command |= FTRACE_DISABLE_CALLS; |
695 | 716 | ||
696 | ftrace_run_update_code(command); | 717 | ftrace_run_update_code(command); |
697 | mutex_unlock(&ftrace_start_lock); | ||
698 | } | 718 | } |
699 | 719 | ||
700 | static cycle_t ftrace_update_time; | 720 | static cycle_t ftrace_update_time; |
@@ -703,19 +723,21 @@ unsigned long ftrace_update_tot_cnt; | |||
703 | 723 | ||
704 | static int ftrace_update_code(struct module *mod) | 724 | static int ftrace_update_code(struct module *mod) |
705 | { | 725 | { |
706 | struct dyn_ftrace *p, *t; | 726 | struct dyn_ftrace *p; |
707 | cycle_t start, stop; | 727 | cycle_t start, stop; |
708 | 728 | ||
709 | start = ftrace_now(raw_smp_processor_id()); | 729 | start = ftrace_now(raw_smp_processor_id()); |
710 | ftrace_update_cnt = 0; | 730 | ftrace_update_cnt = 0; |
711 | 731 | ||
712 | list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { | 732 | while (ftrace_new_addrs) { |
713 | 733 | ||
714 | /* If something went wrong, bail without enabling anything */ | 734 | /* If something went wrong, bail without enabling anything */ |
715 | if (unlikely(ftrace_disabled)) | 735 | if (unlikely(ftrace_disabled)) |
716 | return -1; | 736 | return -1; |
717 | 737 | ||
718 | list_del_init(&p->list); | 738 | p = ftrace_new_addrs; |
739 | ftrace_new_addrs = p->newlist; | ||
740 | p->flags = 0L; | ||
719 | 741 | ||
720 | /* convert record (i.e, patch mcount-call with NOP) */ | 742 | /* convert record (i.e, patch mcount-call with NOP) */ |
721 | if (ftrace_code_disable(mod, p)) { | 743 | if (ftrace_code_disable(mod, p)) { |
@@ -781,13 +803,16 @@ enum { | |||
781 | FTRACE_ITER_CONT = (1 << 1), | 803 | FTRACE_ITER_CONT = (1 << 1), |
782 | FTRACE_ITER_NOTRACE = (1 << 2), | 804 | FTRACE_ITER_NOTRACE = (1 << 2), |
783 | FTRACE_ITER_FAILURES = (1 << 3), | 805 | FTRACE_ITER_FAILURES = (1 << 3), |
806 | FTRACE_ITER_PRINTALL = (1 << 4), | ||
807 | FTRACE_ITER_HASH = (1 << 5), | ||
784 | }; | 808 | }; |
785 | 809 | ||
786 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 810 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
787 | 811 | ||
788 | struct ftrace_iterator { | 812 | struct ftrace_iterator { |
789 | struct ftrace_page *pg; | 813 | struct ftrace_page *pg; |
790 | unsigned idx; | 814 | int hidx; |
815 | int idx; | ||
791 | unsigned flags; | 816 | unsigned flags; |
792 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | 817 | unsigned char buffer[FTRACE_BUFF_MAX+1]; |
793 | unsigned buffer_idx; | 818 | unsigned buffer_idx; |
@@ -795,15 +820,89 @@ struct ftrace_iterator { | |||
795 | }; | 820 | }; |
796 | 821 | ||
797 | static void * | 822 | static void * |
823 | t_hash_next(struct seq_file *m, void *v, loff_t *pos) | ||
824 | { | ||
825 | struct ftrace_iterator *iter = m->private; | ||
826 | struct hlist_node *hnd = v; | ||
827 | struct hlist_head *hhd; | ||
828 | |||
829 | WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); | ||
830 | |||
831 | (*pos)++; | ||
832 | |||
833 | retry: | ||
834 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) | ||
835 | return NULL; | ||
836 | |||
837 | hhd = &ftrace_func_hash[iter->hidx]; | ||
838 | |||
839 | if (hlist_empty(hhd)) { | ||
840 | iter->hidx++; | ||
841 | hnd = NULL; | ||
842 | goto retry; | ||
843 | } | ||
844 | |||
845 | if (!hnd) | ||
846 | hnd = hhd->first; | ||
847 | else { | ||
848 | hnd = hnd->next; | ||
849 | if (!hnd) { | ||
850 | iter->hidx++; | ||
851 | goto retry; | ||
852 | } | ||
853 | } | ||
854 | |||
855 | return hnd; | ||
856 | } | ||
857 | |||
858 | static void *t_hash_start(struct seq_file *m, loff_t *pos) | ||
859 | { | ||
860 | struct ftrace_iterator *iter = m->private; | ||
861 | void *p = NULL; | ||
862 | |||
863 | iter->flags |= FTRACE_ITER_HASH; | ||
864 | |||
865 | return t_hash_next(m, p, pos); | ||
866 | } | ||
867 | |||
868 | static int t_hash_show(struct seq_file *m, void *v) | ||
869 | { | ||
870 | struct ftrace_func_probe *rec; | ||
871 | struct hlist_node *hnd = v; | ||
872 | char str[KSYM_SYMBOL_LEN]; | ||
873 | |||
874 | rec = hlist_entry(hnd, struct ftrace_func_probe, node); | ||
875 | |||
876 | if (rec->ops->print) | ||
877 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | ||
878 | |||
879 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
880 | seq_printf(m, "%s:", str); | ||
881 | |||
882 | kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str); | ||
883 | seq_printf(m, "%s", str); | ||
884 | |||
885 | if (rec->data) | ||
886 | seq_printf(m, ":%p", rec->data); | ||
887 | seq_putc(m, '\n'); | ||
888 | |||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | static void * | ||
798 | t_next(struct seq_file *m, void *v, loff_t *pos) | 893 | t_next(struct seq_file *m, void *v, loff_t *pos) |
799 | { | 894 | { |
800 | struct ftrace_iterator *iter = m->private; | 895 | struct ftrace_iterator *iter = m->private; |
801 | struct dyn_ftrace *rec = NULL; | 896 | struct dyn_ftrace *rec = NULL; |
802 | 897 | ||
898 | if (iter->flags & FTRACE_ITER_HASH) | ||
899 | return t_hash_next(m, v, pos); | ||
900 | |||
803 | (*pos)++; | 901 | (*pos)++; |
804 | 902 | ||
805 | /* should not be called from interrupt context */ | 903 | if (iter->flags & FTRACE_ITER_PRINTALL) |
806 | spin_lock(&ftrace_lock); | 904 | return NULL; |
905 | |||
807 | retry: | 906 | retry: |
808 | if (iter->idx >= iter->pg->index) { | 907 | if (iter->idx >= iter->pg->index) { |
809 | if (iter->pg->next) { | 908 | if (iter->pg->next) { |
@@ -832,7 +931,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
832 | goto retry; | 931 | goto retry; |
833 | } | 932 | } |
834 | } | 933 | } |
835 | spin_unlock(&ftrace_lock); | ||
836 | 934 | ||
837 | return rec; | 935 | return rec; |
838 | } | 936 | } |
@@ -842,6 +940,23 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
842 | struct ftrace_iterator *iter = m->private; | 940 | struct ftrace_iterator *iter = m->private; |
843 | void *p = NULL; | 941 | void *p = NULL; |
844 | 942 | ||
943 | mutex_lock(&ftrace_lock); | ||
944 | /* | ||
945 | * For set_ftrace_filter reading, if we have the filter | ||
946 | * off, we can short cut and just print out that all | ||
947 | * functions are enabled. | ||
948 | */ | ||
949 | if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { | ||
950 | if (*pos > 0) | ||
951 | return t_hash_start(m, pos); | ||
952 | iter->flags |= FTRACE_ITER_PRINTALL; | ||
953 | (*pos)++; | ||
954 | return iter; | ||
955 | } | ||
956 | |||
957 | if (iter->flags & FTRACE_ITER_HASH) | ||
958 | return t_hash_start(m, pos); | ||
959 | |||
845 | if (*pos > 0) { | 960 | if (*pos > 0) { |
846 | if (iter->idx < 0) | 961 | if (iter->idx < 0) |
847 | return p; | 962 | return p; |
@@ -851,18 +966,31 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
851 | 966 | ||
852 | p = t_next(m, p, pos); | 967 | p = t_next(m, p, pos); |
853 | 968 | ||
969 | if (!p) | ||
970 | return t_hash_start(m, pos); | ||
971 | |||
854 | return p; | 972 | return p; |
855 | } | 973 | } |
856 | 974 | ||
857 | static void t_stop(struct seq_file *m, void *p) | 975 | static void t_stop(struct seq_file *m, void *p) |
858 | { | 976 | { |
977 | mutex_unlock(&ftrace_lock); | ||
859 | } | 978 | } |
860 | 979 | ||
861 | static int t_show(struct seq_file *m, void *v) | 980 | static int t_show(struct seq_file *m, void *v) |
862 | { | 981 | { |
982 | struct ftrace_iterator *iter = m->private; | ||
863 | struct dyn_ftrace *rec = v; | 983 | struct dyn_ftrace *rec = v; |
864 | char str[KSYM_SYMBOL_LEN]; | 984 | char str[KSYM_SYMBOL_LEN]; |
865 | 985 | ||
986 | if (iter->flags & FTRACE_ITER_HASH) | ||
987 | return t_hash_show(m, v); | ||
988 | |||
989 | if (iter->flags & FTRACE_ITER_PRINTALL) { | ||
990 | seq_printf(m, "#### all functions enabled ####\n"); | ||
991 | return 0; | ||
992 | } | ||
993 | |||
866 | if (!rec) | 994 | if (!rec) |
867 | return 0; | 995 | return 0; |
868 | 996 | ||
@@ -941,23 +1069,16 @@ static void ftrace_filter_reset(int enable) | |||
941 | struct ftrace_page *pg; | 1069 | struct ftrace_page *pg; |
942 | struct dyn_ftrace *rec; | 1070 | struct dyn_ftrace *rec; |
943 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1071 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
944 | unsigned i; | ||
945 | 1072 | ||
946 | /* should not be called from interrupt context */ | 1073 | mutex_lock(&ftrace_lock); |
947 | spin_lock(&ftrace_lock); | ||
948 | if (enable) | 1074 | if (enable) |
949 | ftrace_filtered = 0; | 1075 | ftrace_filtered = 0; |
950 | pg = ftrace_pages_start; | 1076 | do_for_each_ftrace_rec(pg, rec) { |
951 | while (pg) { | 1077 | if (rec->flags & FTRACE_FL_FAILED) |
952 | for (i = 0; i < pg->index; i++) { | 1078 | continue; |
953 | rec = &pg->records[i]; | 1079 | rec->flags &= ~type; |
954 | if (rec->flags & FTRACE_FL_FAILED) | 1080 | } while_for_each_ftrace_rec(); |
955 | continue; | 1081 | mutex_unlock(&ftrace_lock); |
956 | rec->flags &= ~type; | ||
957 | } | ||
958 | pg = pg->next; | ||
959 | } | ||
960 | spin_unlock(&ftrace_lock); | ||
961 | } | 1082 | } |
962 | 1083 | ||
963 | static int | 1084 | static int |
@@ -1008,16 +1129,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file) | |||
1008 | return ftrace_regex_open(inode, file, 0); | 1129 | return ftrace_regex_open(inode, file, 0); |
1009 | } | 1130 | } |
1010 | 1131 | ||
1011 | static ssize_t | ||
1012 | ftrace_regex_read(struct file *file, char __user *ubuf, | ||
1013 | size_t cnt, loff_t *ppos) | ||
1014 | { | ||
1015 | if (file->f_mode & FMODE_READ) | ||
1016 | return seq_read(file, ubuf, cnt, ppos); | ||
1017 | else | ||
1018 | return -EPERM; | ||
1019 | } | ||
1020 | |||
1021 | static loff_t | 1132 | static loff_t |
1022 | ftrace_regex_lseek(struct file *file, loff_t offset, int origin) | 1133 | ftrace_regex_lseek(struct file *file, loff_t offset, int origin) |
1023 | { | 1134 | { |
@@ -1038,86 +1149,536 @@ enum { | |||
1038 | MATCH_END_ONLY, | 1149 | MATCH_END_ONLY, |
1039 | }; | 1150 | }; |
1040 | 1151 | ||
1041 | static void | 1152 | /* |
1042 | ftrace_match(unsigned char *buff, int len, int enable) | 1153 | * (static function - no need for kernel doc) |
1154 | * | ||
1155 | * Pass in a buffer containing a glob and this function will | ||
1156 | * set search to point to the search part of the buffer and | ||
1157 | * return the type of search it is (see enum above). | ||
1158 | * This does modify buff. | ||
1159 | * | ||
1160 | * Returns enum type. | ||
1161 | * search returns the pointer to use for comparison. | ||
1162 | * not returns 1 if buff started with a '!' | ||
1163 | * 0 otherwise. | ||
1164 | */ | ||
1165 | static int | ||
1166 | ftrace_setup_glob(char *buff, int len, char **search, int *not) | ||
1043 | { | 1167 | { |
1044 | char str[KSYM_SYMBOL_LEN]; | ||
1045 | char *search = NULL; | ||
1046 | struct ftrace_page *pg; | ||
1047 | struct dyn_ftrace *rec; | ||
1048 | int type = MATCH_FULL; | 1168 | int type = MATCH_FULL; |
1049 | unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1169 | int i; |
1050 | unsigned i, match = 0, search_len = 0; | ||
1051 | int not = 0; | ||
1052 | 1170 | ||
1053 | if (buff[0] == '!') { | 1171 | if (buff[0] == '!') { |
1054 | not = 1; | 1172 | *not = 1; |
1055 | buff++; | 1173 | buff++; |
1056 | len--; | 1174 | len--; |
1057 | } | 1175 | } else |
1176 | *not = 0; | ||
1177 | |||
1178 | *search = buff; | ||
1058 | 1179 | ||
1059 | for (i = 0; i < len; i++) { | 1180 | for (i = 0; i < len; i++) { |
1060 | if (buff[i] == '*') { | 1181 | if (buff[i] == '*') { |
1061 | if (!i) { | 1182 | if (!i) { |
1062 | search = buff + i + 1; | 1183 | *search = buff + 1; |
1063 | type = MATCH_END_ONLY; | 1184 | type = MATCH_END_ONLY; |
1064 | search_len = len - (i + 1); | ||
1065 | } else { | 1185 | } else { |
1066 | if (type == MATCH_END_ONLY) { | 1186 | if (type == MATCH_END_ONLY) |
1067 | type = MATCH_MIDDLE_ONLY; | 1187 | type = MATCH_MIDDLE_ONLY; |
1068 | } else { | 1188 | else |
1069 | match = i; | ||
1070 | type = MATCH_FRONT_ONLY; | 1189 | type = MATCH_FRONT_ONLY; |
1071 | } | ||
1072 | buff[i] = 0; | 1190 | buff[i] = 0; |
1073 | break; | 1191 | break; |
1074 | } | 1192 | } |
1075 | } | 1193 | } |
1076 | } | 1194 | } |
1077 | 1195 | ||
1078 | /* should not be called from interrupt context */ | 1196 | return type; |
1079 | spin_lock(&ftrace_lock); | 1197 | } |
1080 | if (enable) | 1198 | |
1081 | ftrace_filtered = 1; | 1199 | static int ftrace_match(char *str, char *regex, int len, int type) |
1082 | pg = ftrace_pages_start; | 1200 | { |
1083 | while (pg) { | 1201 | int matched = 0; |
1084 | for (i = 0; i < pg->index; i++) { | 1202 | char *ptr; |
1085 | int matched = 0; | 1203 | |
1086 | char *ptr; | 1204 | switch (type) { |
1087 | 1205 | case MATCH_FULL: | |
1088 | rec = &pg->records[i]; | 1206 | if (strcmp(str, regex) == 0) |
1089 | if (rec->flags & FTRACE_FL_FAILED) | 1207 | matched = 1; |
1208 | break; | ||
1209 | case MATCH_FRONT_ONLY: | ||
1210 | if (strncmp(str, regex, len) == 0) | ||
1211 | matched = 1; | ||
1212 | break; | ||
1213 | case MATCH_MIDDLE_ONLY: | ||
1214 | if (strstr(str, regex)) | ||
1215 | matched = 1; | ||
1216 | break; | ||
1217 | case MATCH_END_ONLY: | ||
1218 | ptr = strstr(str, regex); | ||
1219 | if (ptr && (ptr[len] == 0)) | ||
1220 | matched = 1; | ||
1221 | break; | ||
1222 | } | ||
1223 | |||
1224 | return matched; | ||
1225 | } | ||
1226 | |||
1227 | static int | ||
1228 | ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) | ||
1229 | { | ||
1230 | char str[KSYM_SYMBOL_LEN]; | ||
1231 | |||
1232 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
1233 | return ftrace_match(str, regex, len, type); | ||
1234 | } | ||
1235 | |||
1236 | static void ftrace_match_records(char *buff, int len, int enable) | ||
1237 | { | ||
1238 | unsigned int search_len; | ||
1239 | struct ftrace_page *pg; | ||
1240 | struct dyn_ftrace *rec; | ||
1241 | unsigned long flag; | ||
1242 | char *search; | ||
1243 | int type; | ||
1244 | int not; | ||
1245 | |||
1246 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
1247 | type = ftrace_setup_glob(buff, len, &search, ¬); | ||
1248 | |||
1249 | search_len = strlen(search); | ||
1250 | |||
1251 | mutex_lock(&ftrace_lock); | ||
1252 | do_for_each_ftrace_rec(pg, rec) { | ||
1253 | |||
1254 | if (rec->flags & FTRACE_FL_FAILED) | ||
1255 | continue; | ||
1256 | |||
1257 | if (ftrace_match_record(rec, search, search_len, type)) { | ||
1258 | if (not) | ||
1259 | rec->flags &= ~flag; | ||
1260 | else | ||
1261 | rec->flags |= flag; | ||
1262 | } | ||
1263 | /* | ||
1264 | * Only enable filtering if we have a function that | ||
1265 | * is filtered on. | ||
1266 | */ | ||
1267 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
1268 | ftrace_filtered = 1; | ||
1269 | } while_for_each_ftrace_rec(); | ||
1270 | mutex_unlock(&ftrace_lock); | ||
1271 | } | ||
1272 | |||
1273 | static int | ||
1274 | ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, | ||
1275 | char *regex, int len, int type) | ||
1276 | { | ||
1277 | char str[KSYM_SYMBOL_LEN]; | ||
1278 | char *modname; | ||
1279 | |||
1280 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | ||
1281 | |||
1282 | if (!modname || strcmp(modname, mod)) | ||
1283 | return 0; | ||
1284 | |||
1285 | /* blank search means to match all funcs in the mod */ | ||
1286 | if (len) | ||
1287 | return ftrace_match(str, regex, len, type); | ||
1288 | else | ||
1289 | return 1; | ||
1290 | } | ||
1291 | |||
1292 | static void ftrace_match_module_records(char *buff, char *mod, int enable) | ||
1293 | { | ||
1294 | unsigned search_len = 0; | ||
1295 | struct ftrace_page *pg; | ||
1296 | struct dyn_ftrace *rec; | ||
1297 | int type = MATCH_FULL; | ||
1298 | char *search = buff; | ||
1299 | unsigned long flag; | ||
1300 | int not = 0; | ||
1301 | |||
1302 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
1303 | |||
1304 | /* blank or '*' mean the same */ | ||
1305 | if (strcmp(buff, "*") == 0) | ||
1306 | buff[0] = 0; | ||
1307 | |||
1308 | /* handle the case of 'dont filter this module' */ | ||
1309 | if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { | ||
1310 | buff[0] = 0; | ||
1311 | not = 1; | ||
1312 | } | ||
1313 | |||
1314 | if (strlen(buff)) { | ||
1315 | type = ftrace_setup_glob(buff, strlen(buff), &search, ¬); | ||
1316 | search_len = strlen(search); | ||
1317 | } | ||
1318 | |||
1319 | mutex_lock(&ftrace_lock); | ||
1320 | do_for_each_ftrace_rec(pg, rec) { | ||
1321 | |||
1322 | if (rec->flags & FTRACE_FL_FAILED) | ||
1323 | continue; | ||
1324 | |||
1325 | if (ftrace_match_module_record(rec, mod, | ||
1326 | search, search_len, type)) { | ||
1327 | if (not) | ||
1328 | rec->flags &= ~flag; | ||
1329 | else | ||
1330 | rec->flags |= flag; | ||
1331 | } | ||
1332 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
1333 | ftrace_filtered = 1; | ||
1334 | |||
1335 | } while_for_each_ftrace_rec(); | ||
1336 | mutex_unlock(&ftrace_lock); | ||
1337 | } | ||
1338 | |||
1339 | /* | ||
1340 | * We register the module command as a template to show others how | ||
1341 | * to register the a command as well. | ||
1342 | */ | ||
1343 | |||
1344 | static int | ||
1345 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | ||
1346 | { | ||
1347 | char *mod; | ||
1348 | |||
1349 | /* | ||
1350 | * cmd == 'mod' because we only registered this func | ||
1351 | * for the 'mod' ftrace_func_command. | ||
1352 | * But if you register one func with multiple commands, | ||
1353 | * you can tell which command was used by the cmd | ||
1354 | * parameter. | ||
1355 | */ | ||
1356 | |||
1357 | /* we must have a module name */ | ||
1358 | if (!param) | ||
1359 | return -EINVAL; | ||
1360 | |||
1361 | mod = strsep(¶m, ":"); | ||
1362 | if (!strlen(mod)) | ||
1363 | return -EINVAL; | ||
1364 | |||
1365 | ftrace_match_module_records(func, mod, enable); | ||
1366 | return 0; | ||
1367 | } | ||
1368 | |||
1369 | static struct ftrace_func_command ftrace_mod_cmd = { | ||
1370 | .name = "mod", | ||
1371 | .func = ftrace_mod_callback, | ||
1372 | }; | ||
1373 | |||
1374 | static int __init ftrace_mod_cmd_init(void) | ||
1375 | { | ||
1376 | return register_ftrace_command(&ftrace_mod_cmd); | ||
1377 | } | ||
1378 | device_initcall(ftrace_mod_cmd_init); | ||
1379 | |||
1380 | static void | ||
1381 | function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | ||
1382 | { | ||
1383 | struct ftrace_func_probe *entry; | ||
1384 | struct hlist_head *hhd; | ||
1385 | struct hlist_node *n; | ||
1386 | unsigned long key; | ||
1387 | int resched; | ||
1388 | |||
1389 | key = hash_long(ip, FTRACE_HASH_BITS); | ||
1390 | |||
1391 | hhd = &ftrace_func_hash[key]; | ||
1392 | |||
1393 | if (hlist_empty(hhd)) | ||
1394 | return; | ||
1395 | |||
1396 | /* | ||
1397 | * Disable preemption for these calls to prevent a RCU grace | ||
1398 | * period. This syncs the hash iteration and freeing of items | ||
1399 | * on the hash. rcu_read_lock is too dangerous here. | ||
1400 | */ | ||
1401 | resched = ftrace_preempt_disable(); | ||
1402 | hlist_for_each_entry_rcu(entry, n, hhd, node) { | ||
1403 | if (entry->ip == ip) | ||
1404 | entry->ops->func(ip, parent_ip, &entry->data); | ||
1405 | } | ||
1406 | ftrace_preempt_enable(resched); | ||
1407 | } | ||
1408 | |||
1409 | static struct ftrace_ops trace_probe_ops __read_mostly = | ||
1410 | { | ||
1411 | .func = function_trace_probe_call, | ||
1412 | }; | ||
1413 | |||
1414 | static int ftrace_probe_registered; | ||
1415 | |||
1416 | static void __enable_ftrace_function_probe(void) | ||
1417 | { | ||
1418 | int i; | ||
1419 | |||
1420 | if (ftrace_probe_registered) | ||
1421 | return; | ||
1422 | |||
1423 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | ||
1424 | struct hlist_head *hhd = &ftrace_func_hash[i]; | ||
1425 | if (hhd->first) | ||
1426 | break; | ||
1427 | } | ||
1428 | /* Nothing registered? */ | ||
1429 | if (i == FTRACE_FUNC_HASHSIZE) | ||
1430 | return; | ||
1431 | |||
1432 | __register_ftrace_function(&trace_probe_ops); | ||
1433 | ftrace_startup(0); | ||
1434 | ftrace_probe_registered = 1; | ||
1435 | } | ||
1436 | |||
1437 | static void __disable_ftrace_function_probe(void) | ||
1438 | { | ||
1439 | int i; | ||
1440 | |||
1441 | if (!ftrace_probe_registered) | ||
1442 | return; | ||
1443 | |||
1444 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | ||
1445 | struct hlist_head *hhd = &ftrace_func_hash[i]; | ||
1446 | if (hhd->first) | ||
1447 | return; | ||
1448 | } | ||
1449 | |||
1450 | /* no more funcs left */ | ||
1451 | __unregister_ftrace_function(&trace_probe_ops); | ||
1452 | ftrace_shutdown(0); | ||
1453 | ftrace_probe_registered = 0; | ||
1454 | } | ||
1455 | |||
1456 | |||
1457 | static void ftrace_free_entry_rcu(struct rcu_head *rhp) | ||
1458 | { | ||
1459 | struct ftrace_func_probe *entry = | ||
1460 | container_of(rhp, struct ftrace_func_probe, rcu); | ||
1461 | |||
1462 | if (entry->ops->free) | ||
1463 | entry->ops->free(&entry->data); | ||
1464 | kfree(entry); | ||
1465 | } | ||
1466 | |||
1467 | |||
1468 | int | ||
1469 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
1470 | void *data) | ||
1471 | { | ||
1472 | struct ftrace_func_probe *entry; | ||
1473 | struct ftrace_page *pg; | ||
1474 | struct dyn_ftrace *rec; | ||
1475 | int type, len, not; | ||
1476 | unsigned long key; | ||
1477 | int count = 0; | ||
1478 | char *search; | ||
1479 | |||
1480 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | ||
1481 | len = strlen(search); | ||
1482 | |||
1483 | /* we do not support '!' for function probes */ | ||
1484 | if (WARN_ON(not)) | ||
1485 | return -EINVAL; | ||
1486 | |||
1487 | mutex_lock(&ftrace_lock); | ||
1488 | do_for_each_ftrace_rec(pg, rec) { | ||
1489 | |||
1490 | if (rec->flags & FTRACE_FL_FAILED) | ||
1491 | continue; | ||
1492 | |||
1493 | if (!ftrace_match_record(rec, search, len, type)) | ||
1494 | continue; | ||
1495 | |||
1496 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
1497 | if (!entry) { | ||
1498 | /* If we did not process any, then return error */ | ||
1499 | if (!count) | ||
1500 | count = -ENOMEM; | ||
1501 | goto out_unlock; | ||
1502 | } | ||
1503 | |||
1504 | count++; | ||
1505 | |||
1506 | entry->data = data; | ||
1507 | |||
1508 | /* | ||
1509 | * The caller might want to do something special | ||
1510 | * for each function we find. We call the callback | ||
1511 | * to give the caller an opportunity to do so. | ||
1512 | */ | ||
1513 | if (ops->callback) { | ||
1514 | if (ops->callback(rec->ip, &entry->data) < 0) { | ||
1515 | /* caller does not like this func */ | ||
1516 | kfree(entry); | ||
1090 | continue; | 1517 | continue; |
1091 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
1092 | switch (type) { | ||
1093 | case MATCH_FULL: | ||
1094 | if (strcmp(str, buff) == 0) | ||
1095 | matched = 1; | ||
1096 | break; | ||
1097 | case MATCH_FRONT_ONLY: | ||
1098 | if (memcmp(str, buff, match) == 0) | ||
1099 | matched = 1; | ||
1100 | break; | ||
1101 | case MATCH_MIDDLE_ONLY: | ||
1102 | if (strstr(str, search)) | ||
1103 | matched = 1; | ||
1104 | break; | ||
1105 | case MATCH_END_ONLY: | ||
1106 | ptr = strstr(str, search); | ||
1107 | if (ptr && (ptr[search_len] == 0)) | ||
1108 | matched = 1; | ||
1109 | break; | ||
1110 | } | 1518 | } |
1111 | if (matched) { | 1519 | } |
1112 | if (not) | 1520 | |
1113 | rec->flags &= ~flag; | 1521 | entry->ops = ops; |
1114 | else | 1522 | entry->ip = rec->ip; |
1115 | rec->flags |= flag; | 1523 | |
1524 | key = hash_long(entry->ip, FTRACE_HASH_BITS); | ||
1525 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); | ||
1526 | |||
1527 | } while_for_each_ftrace_rec(); | ||
1528 | __enable_ftrace_function_probe(); | ||
1529 | |||
1530 | out_unlock: | ||
1531 | mutex_unlock(&ftrace_lock); | ||
1532 | |||
1533 | return count; | ||
1534 | } | ||
1535 | |||
1536 | enum { | ||
1537 | PROBE_TEST_FUNC = 1, | ||
1538 | PROBE_TEST_DATA = 2 | ||
1539 | }; | ||
1540 | |||
1541 | static void | ||
1542 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
1543 | void *data, int flags) | ||
1544 | { | ||
1545 | struct ftrace_func_probe *entry; | ||
1546 | struct hlist_node *n, *tmp; | ||
1547 | char str[KSYM_SYMBOL_LEN]; | ||
1548 | int type = MATCH_FULL; | ||
1549 | int i, len = 0; | ||
1550 | char *search; | ||
1551 | |||
1552 | if (glob && (strcmp(glob, "*") || !strlen(glob))) | ||
1553 | glob = NULL; | ||
1554 | else { | ||
1555 | int not; | ||
1556 | |||
1557 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | ||
1558 | len = strlen(search); | ||
1559 | |||
1560 | /* we do not support '!' for function probes */ | ||
1561 | if (WARN_ON(not)) | ||
1562 | return; | ||
1563 | } | ||
1564 | |||
1565 | mutex_lock(&ftrace_lock); | ||
1566 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | ||
1567 | struct hlist_head *hhd = &ftrace_func_hash[i]; | ||
1568 | |||
1569 | hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { | ||
1570 | |||
1571 | /* break up if statements for readability */ | ||
1572 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) | ||
1573 | continue; | ||
1574 | |||
1575 | if ((flags & PROBE_TEST_DATA) && entry->data != data) | ||
1576 | continue; | ||
1577 | |||
1578 | /* do this last, since it is the most expensive */ | ||
1579 | if (glob) { | ||
1580 | kallsyms_lookup(entry->ip, NULL, NULL, | ||
1581 | NULL, str); | ||
1582 | if (!ftrace_match(str, glob, len, type)) | ||
1583 | continue; | ||
1116 | } | 1584 | } |
1585 | |||
1586 | hlist_del(&entry->node); | ||
1587 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); | ||
1117 | } | 1588 | } |
1118 | pg = pg->next; | ||
1119 | } | 1589 | } |
1120 | spin_unlock(&ftrace_lock); | 1590 | __disable_ftrace_function_probe(); |
1591 | mutex_unlock(&ftrace_lock); | ||
1592 | } | ||
1593 | |||
1594 | void | ||
1595 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
1596 | void *data) | ||
1597 | { | ||
1598 | __unregister_ftrace_function_probe(glob, ops, data, | ||
1599 | PROBE_TEST_FUNC | PROBE_TEST_DATA); | ||
1600 | } | ||
1601 | |||
1602 | void | ||
1603 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) | ||
1604 | { | ||
1605 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); | ||
1606 | } | ||
1607 | |||
1608 | void unregister_ftrace_function_probe_all(char *glob) | ||
1609 | { | ||
1610 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); | ||
1611 | } | ||
1612 | |||
1613 | static LIST_HEAD(ftrace_commands); | ||
1614 | static DEFINE_MUTEX(ftrace_cmd_mutex); | ||
1615 | |||
1616 | int register_ftrace_command(struct ftrace_func_command *cmd) | ||
1617 | { | ||
1618 | struct ftrace_func_command *p; | ||
1619 | int ret = 0; | ||
1620 | |||
1621 | mutex_lock(&ftrace_cmd_mutex); | ||
1622 | list_for_each_entry(p, &ftrace_commands, list) { | ||
1623 | if (strcmp(cmd->name, p->name) == 0) { | ||
1624 | ret = -EBUSY; | ||
1625 | goto out_unlock; | ||
1626 | } | ||
1627 | } | ||
1628 | list_add(&cmd->list, &ftrace_commands); | ||
1629 | out_unlock: | ||
1630 | mutex_unlock(&ftrace_cmd_mutex); | ||
1631 | |||
1632 | return ret; | ||
1633 | } | ||
1634 | |||
1635 | int unregister_ftrace_command(struct ftrace_func_command *cmd) | ||
1636 | { | ||
1637 | struct ftrace_func_command *p, *n; | ||
1638 | int ret = -ENODEV; | ||
1639 | |||
1640 | mutex_lock(&ftrace_cmd_mutex); | ||
1641 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { | ||
1642 | if (strcmp(cmd->name, p->name) == 0) { | ||
1643 | ret = 0; | ||
1644 | list_del_init(&p->list); | ||
1645 | goto out_unlock; | ||
1646 | } | ||
1647 | } | ||
1648 | out_unlock: | ||
1649 | mutex_unlock(&ftrace_cmd_mutex); | ||
1650 | |||
1651 | return ret; | ||
1652 | } | ||
1653 | |||
1654 | static int ftrace_process_regex(char *buff, int len, int enable) | ||
1655 | { | ||
1656 | char *func, *command, *next = buff; | ||
1657 | struct ftrace_func_command *p; | ||
1658 | int ret = -EINVAL; | ||
1659 | |||
1660 | func = strsep(&next, ":"); | ||
1661 | |||
1662 | if (!next) { | ||
1663 | ftrace_match_records(func, len, enable); | ||
1664 | return 0; | ||
1665 | } | ||
1666 | |||
1667 | /* command found */ | ||
1668 | |||
1669 | command = strsep(&next, ":"); | ||
1670 | |||
1671 | mutex_lock(&ftrace_cmd_mutex); | ||
1672 | list_for_each_entry(p, &ftrace_commands, list) { | ||
1673 | if (strcmp(p->name, command) == 0) { | ||
1674 | ret = p->func(func, command, next, enable); | ||
1675 | goto out_unlock; | ||
1676 | } | ||
1677 | } | ||
1678 | out_unlock: | ||
1679 | mutex_unlock(&ftrace_cmd_mutex); | ||
1680 | |||
1681 | return ret; | ||
1121 | } | 1682 | } |
1122 | 1683 | ||
1123 | static ssize_t | 1684 | static ssize_t |
@@ -1187,7 +1748,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
1187 | if (isspace(ch)) { | 1748 | if (isspace(ch)) { |
1188 | iter->filtered++; | 1749 | iter->filtered++; |
1189 | iter->buffer[iter->buffer_idx] = 0; | 1750 | iter->buffer[iter->buffer_idx] = 0; |
1190 | ftrace_match(iter->buffer, iter->buffer_idx, enable); | 1751 | ret = ftrace_process_regex(iter->buffer, |
1752 | iter->buffer_idx, enable); | ||
1753 | if (ret) | ||
1754 | goto out; | ||
1191 | iter->buffer_idx = 0; | 1755 | iter->buffer_idx = 0; |
1192 | } else | 1756 | } else |
1193 | iter->flags |= FTRACE_ITER_CONT; | 1757 | iter->flags |= FTRACE_ITER_CONT; |
@@ -1226,7 +1790,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | |||
1226 | if (reset) | 1790 | if (reset) |
1227 | ftrace_filter_reset(enable); | 1791 | ftrace_filter_reset(enable); |
1228 | if (buf) | 1792 | if (buf) |
1229 | ftrace_match(buf, len, enable); | 1793 | ftrace_match_records(buf, len, enable); |
1230 | mutex_unlock(&ftrace_regex_lock); | 1794 | mutex_unlock(&ftrace_regex_lock); |
1231 | } | 1795 | } |
1232 | 1796 | ||
@@ -1276,15 +1840,13 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1276 | if (iter->buffer_idx) { | 1840 | if (iter->buffer_idx) { |
1277 | iter->filtered++; | 1841 | iter->filtered++; |
1278 | iter->buffer[iter->buffer_idx] = 0; | 1842 | iter->buffer[iter->buffer_idx] = 0; |
1279 | ftrace_match(iter->buffer, iter->buffer_idx, enable); | 1843 | ftrace_match_records(iter->buffer, iter->buffer_idx, enable); |
1280 | } | 1844 | } |
1281 | 1845 | ||
1282 | mutex_lock(&ftrace_sysctl_lock); | 1846 | mutex_lock(&ftrace_lock); |
1283 | mutex_lock(&ftrace_start_lock); | ||
1284 | if (ftrace_start_up && ftrace_enabled) | 1847 | if (ftrace_start_up && ftrace_enabled) |
1285 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1848 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1286 | mutex_unlock(&ftrace_start_lock); | 1849 | mutex_unlock(&ftrace_lock); |
1287 | mutex_unlock(&ftrace_sysctl_lock); | ||
1288 | 1850 | ||
1289 | kfree(iter); | 1851 | kfree(iter); |
1290 | mutex_unlock(&ftrace_regex_lock); | 1852 | mutex_unlock(&ftrace_regex_lock); |
@@ -1303,31 +1865,31 @@ ftrace_notrace_release(struct inode *inode, struct file *file) | |||
1303 | return ftrace_regex_release(inode, file, 0); | 1865 | return ftrace_regex_release(inode, file, 0); |
1304 | } | 1866 | } |
1305 | 1867 | ||
1306 | static struct file_operations ftrace_avail_fops = { | 1868 | static const struct file_operations ftrace_avail_fops = { |
1307 | .open = ftrace_avail_open, | 1869 | .open = ftrace_avail_open, |
1308 | .read = seq_read, | 1870 | .read = seq_read, |
1309 | .llseek = seq_lseek, | 1871 | .llseek = seq_lseek, |
1310 | .release = ftrace_avail_release, | 1872 | .release = ftrace_avail_release, |
1311 | }; | 1873 | }; |
1312 | 1874 | ||
1313 | static struct file_operations ftrace_failures_fops = { | 1875 | static const struct file_operations ftrace_failures_fops = { |
1314 | .open = ftrace_failures_open, | 1876 | .open = ftrace_failures_open, |
1315 | .read = seq_read, | 1877 | .read = seq_read, |
1316 | .llseek = seq_lseek, | 1878 | .llseek = seq_lseek, |
1317 | .release = ftrace_avail_release, | 1879 | .release = ftrace_avail_release, |
1318 | }; | 1880 | }; |
1319 | 1881 | ||
1320 | static struct file_operations ftrace_filter_fops = { | 1882 | static const struct file_operations ftrace_filter_fops = { |
1321 | .open = ftrace_filter_open, | 1883 | .open = ftrace_filter_open, |
1322 | .read = ftrace_regex_read, | 1884 | .read = seq_read, |
1323 | .write = ftrace_filter_write, | 1885 | .write = ftrace_filter_write, |
1324 | .llseek = ftrace_regex_lseek, | 1886 | .llseek = ftrace_regex_lseek, |
1325 | .release = ftrace_filter_release, | 1887 | .release = ftrace_filter_release, |
1326 | }; | 1888 | }; |
1327 | 1889 | ||
1328 | static struct file_operations ftrace_notrace_fops = { | 1890 | static const struct file_operations ftrace_notrace_fops = { |
1329 | .open = ftrace_notrace_open, | 1891 | .open = ftrace_notrace_open, |
1330 | .read = ftrace_regex_read, | 1892 | .read = seq_read, |
1331 | .write = ftrace_notrace_write, | 1893 | .write = ftrace_notrace_write, |
1332 | .llseek = ftrace_regex_lseek, | 1894 | .llseek = ftrace_regex_lseek, |
1333 | .release = ftrace_notrace_release, | 1895 | .release = ftrace_notrace_release, |
@@ -1360,6 +1922,10 @@ static void *g_start(struct seq_file *m, loff_t *pos) | |||
1360 | 1922 | ||
1361 | mutex_lock(&graph_lock); | 1923 | mutex_lock(&graph_lock); |
1362 | 1924 | ||
1925 | /* Nothing, tell g_show to print all functions are enabled */ | ||
1926 | if (!ftrace_graph_count && !*pos) | ||
1927 | return (void *)1; | ||
1928 | |||
1363 | p = g_next(m, p, pos); | 1929 | p = g_next(m, p, pos); |
1364 | 1930 | ||
1365 | return p; | 1931 | return p; |
@@ -1378,6 +1944,11 @@ static int g_show(struct seq_file *m, void *v) | |||
1378 | if (!ptr) | 1944 | if (!ptr) |
1379 | return 0; | 1945 | return 0; |
1380 | 1946 | ||
1947 | if (ptr == (unsigned long *)1) { | ||
1948 | seq_printf(m, "#### all functions enabled ####\n"); | ||
1949 | return 0; | ||
1950 | } | ||
1951 | |||
1381 | kallsyms_lookup(*ptr, NULL, NULL, NULL, str); | 1952 | kallsyms_lookup(*ptr, NULL, NULL, NULL, str); |
1382 | 1953 | ||
1383 | seq_printf(m, "%s\n", str); | 1954 | seq_printf(m, "%s\n", str); |
@@ -1420,53 +1991,53 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
1420 | return ret; | 1991 | return ret; |
1421 | } | 1992 | } |
1422 | 1993 | ||
1423 | static ssize_t | ||
1424 | ftrace_graph_read(struct file *file, char __user *ubuf, | ||
1425 | size_t cnt, loff_t *ppos) | ||
1426 | { | ||
1427 | if (file->f_mode & FMODE_READ) | ||
1428 | return seq_read(file, ubuf, cnt, ppos); | ||
1429 | else | ||
1430 | return -EPERM; | ||
1431 | } | ||
1432 | |||
1433 | static int | 1994 | static int |
1434 | ftrace_set_func(unsigned long *array, int idx, char *buffer) | 1995 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) |
1435 | { | 1996 | { |
1436 | char str[KSYM_SYMBOL_LEN]; | ||
1437 | struct dyn_ftrace *rec; | 1997 | struct dyn_ftrace *rec; |
1438 | struct ftrace_page *pg; | 1998 | struct ftrace_page *pg; |
1999 | int search_len; | ||
1439 | int found = 0; | 2000 | int found = 0; |
1440 | int i, j; | 2001 | int type, not; |
2002 | char *search; | ||
2003 | bool exists; | ||
2004 | int i; | ||
1441 | 2005 | ||
1442 | if (ftrace_disabled) | 2006 | if (ftrace_disabled) |
1443 | return -ENODEV; | 2007 | return -ENODEV; |
1444 | 2008 | ||
1445 | /* should not be called from interrupt context */ | 2009 | /* decode regex */ |
1446 | spin_lock(&ftrace_lock); | 2010 | type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬); |
2011 | if (not) | ||
2012 | return -EINVAL; | ||
2013 | |||
2014 | search_len = strlen(search); | ||
1447 | 2015 | ||
1448 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 2016 | mutex_lock(&ftrace_lock); |
1449 | for (i = 0; i < pg->index; i++) { | 2017 | do_for_each_ftrace_rec(pg, rec) { |
1450 | rec = &pg->records[i]; | ||
1451 | 2018 | ||
1452 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | 2019 | if (*idx >= FTRACE_GRAPH_MAX_FUNCS) |
1453 | continue; | 2020 | break; |
1454 | 2021 | ||
1455 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 2022 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) |
1456 | if (strcmp(str, buffer) == 0) { | 2023 | continue; |
2024 | |||
2025 | if (ftrace_match_record(rec, search, search_len, type)) { | ||
2026 | /* ensure it is not already in the array */ | ||
2027 | exists = false; | ||
2028 | for (i = 0; i < *idx; i++) | ||
2029 | if (array[i] == rec->ip) { | ||
2030 | exists = true; | ||
2031 | break; | ||
2032 | } | ||
2033 | if (!exists) { | ||
2034 | array[(*idx)++] = rec->ip; | ||
1457 | found = 1; | 2035 | found = 1; |
1458 | for (j = 0; j < idx; j++) | ||
1459 | if (array[j] == rec->ip) { | ||
1460 | found = 0; | ||
1461 | break; | ||
1462 | } | ||
1463 | if (found) | ||
1464 | array[idx] = rec->ip; | ||
1465 | break; | ||
1466 | } | 2036 | } |
1467 | } | 2037 | } |
1468 | } | 2038 | } while_for_each_ftrace_rec(); |
1469 | spin_unlock(&ftrace_lock); | 2039 | |
2040 | mutex_unlock(&ftrace_lock); | ||
1470 | 2041 | ||
1471 | return found ? 0 : -EINVAL; | 2042 | return found ? 0 : -EINVAL; |
1472 | } | 2043 | } |
@@ -1534,13 +2105,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
1534 | } | 2105 | } |
1535 | buffer[index] = 0; | 2106 | buffer[index] = 0; |
1536 | 2107 | ||
1537 | /* we allow only one at a time */ | 2108 | /* we allow only one expression at a time */ |
1538 | ret = ftrace_set_func(array, ftrace_graph_count, buffer); | 2109 | ret = ftrace_set_func(array, &ftrace_graph_count, buffer); |
1539 | if (ret) | 2110 | if (ret) |
1540 | goto out; | 2111 | goto out; |
1541 | 2112 | ||
1542 | ftrace_graph_count++; | ||
1543 | |||
1544 | file->f_pos += read; | 2113 | file->f_pos += read; |
1545 | 2114 | ||
1546 | ret = read; | 2115 | ret = read; |
@@ -1552,7 +2121,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
1552 | 2121 | ||
1553 | static const struct file_operations ftrace_graph_fops = { | 2122 | static const struct file_operations ftrace_graph_fops = { |
1554 | .open = ftrace_graph_open, | 2123 | .open = ftrace_graph_open, |
1555 | .read = ftrace_graph_read, | 2124 | .read = seq_read, |
1556 | .write = ftrace_graph_write, | 2125 | .write = ftrace_graph_write, |
1557 | }; | 2126 | }; |
1558 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2127 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
@@ -1604,7 +2173,7 @@ static int ftrace_convert_nops(struct module *mod, | |||
1604 | unsigned long addr; | 2173 | unsigned long addr; |
1605 | unsigned long flags; | 2174 | unsigned long flags; |
1606 | 2175 | ||
1607 | mutex_lock(&ftrace_start_lock); | 2176 | mutex_lock(&ftrace_lock); |
1608 | p = start; | 2177 | p = start; |
1609 | while (p < end) { | 2178 | while (p < end) { |
1610 | addr = ftrace_call_adjust(*p++); | 2179 | addr = ftrace_call_adjust(*p++); |
@@ -1623,7 +2192,7 @@ static int ftrace_convert_nops(struct module *mod, | |||
1623 | local_irq_save(flags); | 2192 | local_irq_save(flags); |
1624 | ftrace_update_code(mod); | 2193 | ftrace_update_code(mod); |
1625 | local_irq_restore(flags); | 2194 | local_irq_restore(flags); |
1626 | mutex_unlock(&ftrace_start_lock); | 2195 | mutex_unlock(&ftrace_lock); |
1627 | 2196 | ||
1628 | return 0; | 2197 | return 0; |
1629 | } | 2198 | } |
@@ -1700,7 +2269,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf, | |||
1700 | if (ftrace_pid_trace == ftrace_swapper_pid) | 2269 | if (ftrace_pid_trace == ftrace_swapper_pid) |
1701 | r = sprintf(buf, "swapper tasks\n"); | 2270 | r = sprintf(buf, "swapper tasks\n"); |
1702 | else if (ftrace_pid_trace) | 2271 | else if (ftrace_pid_trace) |
1703 | r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace)); | 2272 | r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace)); |
1704 | else | 2273 | else |
1705 | r = sprintf(buf, "no pid\n"); | 2274 | r = sprintf(buf, "no pid\n"); |
1706 | 2275 | ||
@@ -1796,7 +2365,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
1796 | if (ret < 0) | 2365 | if (ret < 0) |
1797 | return ret; | 2366 | return ret; |
1798 | 2367 | ||
1799 | mutex_lock(&ftrace_start_lock); | 2368 | mutex_lock(&ftrace_lock); |
1800 | if (val < 0) { | 2369 | if (val < 0) { |
1801 | /* disable pid tracing */ | 2370 | /* disable pid tracing */ |
1802 | if (!ftrace_pid_trace) | 2371 | if (!ftrace_pid_trace) |
@@ -1835,12 +2404,12 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
1835 | ftrace_startup_enable(0); | 2404 | ftrace_startup_enable(0); |
1836 | 2405 | ||
1837 | out: | 2406 | out: |
1838 | mutex_unlock(&ftrace_start_lock); | 2407 | mutex_unlock(&ftrace_lock); |
1839 | 2408 | ||
1840 | return cnt; | 2409 | return cnt; |
1841 | } | 2410 | } |
1842 | 2411 | ||
1843 | static struct file_operations ftrace_pid_fops = { | 2412 | static const struct file_operations ftrace_pid_fops = { |
1844 | .read = ftrace_pid_read, | 2413 | .read = ftrace_pid_read, |
1845 | .write = ftrace_pid_write, | 2414 | .write = ftrace_pid_write, |
1846 | }; | 2415 | }; |
@@ -1863,7 +2432,6 @@ static __init int ftrace_init_debugfs(void) | |||
1863 | "'set_ftrace_pid' entry\n"); | 2432 | "'set_ftrace_pid' entry\n"); |
1864 | return 0; | 2433 | return 0; |
1865 | } | 2434 | } |
1866 | |||
1867 | fs_initcall(ftrace_init_debugfs); | 2435 | fs_initcall(ftrace_init_debugfs); |
1868 | 2436 | ||
1869 | /** | 2437 | /** |
@@ -1898,17 +2466,17 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
1898 | if (unlikely(ftrace_disabled)) | 2466 | if (unlikely(ftrace_disabled)) |
1899 | return -1; | 2467 | return -1; |
1900 | 2468 | ||
1901 | mutex_lock(&ftrace_sysctl_lock); | 2469 | mutex_lock(&ftrace_lock); |
1902 | 2470 | ||
1903 | ret = __register_ftrace_function(ops); | 2471 | ret = __register_ftrace_function(ops); |
1904 | ftrace_startup(0); | 2472 | ftrace_startup(0); |
1905 | 2473 | ||
1906 | mutex_unlock(&ftrace_sysctl_lock); | 2474 | mutex_unlock(&ftrace_lock); |
1907 | return ret; | 2475 | return ret; |
1908 | } | 2476 | } |
1909 | 2477 | ||
1910 | /** | 2478 | /** |
1911 | * unregister_ftrace_function - unresgister a function for profiling. | 2479 | * unregister_ftrace_function - unregister a function for profiling. |
1912 | * @ops - ops structure that holds the function to unregister | 2480 | * @ops - ops structure that holds the function to unregister |
1913 | * | 2481 | * |
1914 | * Unregister a function that was added to be called by ftrace profiling. | 2482 | * Unregister a function that was added to be called by ftrace profiling. |
@@ -1917,10 +2485,10 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
1917 | { | 2485 | { |
1918 | int ret; | 2486 | int ret; |
1919 | 2487 | ||
1920 | mutex_lock(&ftrace_sysctl_lock); | 2488 | mutex_lock(&ftrace_lock); |
1921 | ret = __unregister_ftrace_function(ops); | 2489 | ret = __unregister_ftrace_function(ops); |
1922 | ftrace_shutdown(0); | 2490 | ftrace_shutdown(0); |
1923 | mutex_unlock(&ftrace_sysctl_lock); | 2491 | mutex_unlock(&ftrace_lock); |
1924 | 2492 | ||
1925 | return ret; | 2493 | return ret; |
1926 | } | 2494 | } |
@@ -1935,7 +2503,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1935 | if (unlikely(ftrace_disabled)) | 2503 | if (unlikely(ftrace_disabled)) |
1936 | return -ENODEV; | 2504 | return -ENODEV; |
1937 | 2505 | ||
1938 | mutex_lock(&ftrace_sysctl_lock); | 2506 | mutex_lock(&ftrace_lock); |
1939 | 2507 | ||
1940 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 2508 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); |
1941 | 2509 | ||
@@ -1964,7 +2532,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1964 | } | 2532 | } |
1965 | 2533 | ||
1966 | out: | 2534 | out: |
1967 | mutex_unlock(&ftrace_sysctl_lock); | 2535 | mutex_unlock(&ftrace_lock); |
1968 | return ret; | 2536 | return ret; |
1969 | } | 2537 | } |
1970 | 2538 | ||
@@ -2029,11 +2597,43 @@ free: | |||
2029 | return ret; | 2597 | return ret; |
2030 | } | 2598 | } |
2031 | 2599 | ||
2600 | static void | ||
2601 | ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev, | ||
2602 | struct task_struct *next) | ||
2603 | { | ||
2604 | unsigned long long timestamp; | ||
2605 | int index; | ||
2606 | |||
2607 | /* | ||
2608 | * Does the user want to count the time a function was asleep. | ||
2609 | * If so, do not update the time stamps. | ||
2610 | */ | ||
2611 | if (trace_flags & TRACE_ITER_SLEEP_TIME) | ||
2612 | return; | ||
2613 | |||
2614 | timestamp = trace_clock_local(); | ||
2615 | |||
2616 | prev->ftrace_timestamp = timestamp; | ||
2617 | |||
2618 | /* only process tasks that we timestamped */ | ||
2619 | if (!next->ftrace_timestamp) | ||
2620 | return; | ||
2621 | |||
2622 | /* | ||
2623 | * Update all the counters in next to make up for the | ||
2624 | * time next was sleeping. | ||
2625 | */ | ||
2626 | timestamp -= next->ftrace_timestamp; | ||
2627 | |||
2628 | for (index = next->curr_ret_stack; index >= 0; index--) | ||
2629 | next->ret_stack[index].calltime += timestamp; | ||
2630 | } | ||
2631 | |||
2032 | /* Allocate a return stack for each task */ | 2632 | /* Allocate a return stack for each task */ |
2033 | static int start_graph_tracing(void) | 2633 | static int start_graph_tracing(void) |
2034 | { | 2634 | { |
2035 | struct ftrace_ret_stack **ret_stack_list; | 2635 | struct ftrace_ret_stack **ret_stack_list; |
2036 | int ret; | 2636 | int ret, cpu; |
2037 | 2637 | ||
2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | 2638 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
2039 | sizeof(struct ftrace_ret_stack *), | 2639 | sizeof(struct ftrace_ret_stack *), |
@@ -2042,10 +2642,21 @@ static int start_graph_tracing(void) | |||
2042 | if (!ret_stack_list) | 2642 | if (!ret_stack_list) |
2043 | return -ENOMEM; | 2643 | return -ENOMEM; |
2044 | 2644 | ||
2645 | /* The cpu_boot init_task->ret_stack will never be freed */ | ||
2646 | for_each_online_cpu(cpu) | ||
2647 | ftrace_graph_init_task(idle_task(cpu)); | ||
2648 | |||
2045 | do { | 2649 | do { |
2046 | ret = alloc_retstack_tasklist(ret_stack_list); | 2650 | ret = alloc_retstack_tasklist(ret_stack_list); |
2047 | } while (ret == -EAGAIN); | 2651 | } while (ret == -EAGAIN); |
2048 | 2652 | ||
2653 | if (!ret) { | ||
2654 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch); | ||
2655 | if (ret) | ||
2656 | pr_info("ftrace_graph: Couldn't activate tracepoint" | ||
2657 | " probe to kernel_sched_switch\n"); | ||
2658 | } | ||
2659 | |||
2049 | kfree(ret_stack_list); | 2660 | kfree(ret_stack_list); |
2050 | return ret; | 2661 | return ret; |
2051 | } | 2662 | } |
@@ -2076,7 +2687,13 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2076 | { | 2687 | { |
2077 | int ret = 0; | 2688 | int ret = 0; |
2078 | 2689 | ||
2079 | mutex_lock(&ftrace_sysctl_lock); | 2690 | mutex_lock(&ftrace_lock); |
2691 | |||
2692 | /* we currently allow only one tracer registered at a time */ | ||
2693 | if (atomic_read(&ftrace_graph_active)) { | ||
2694 | ret = -EBUSY; | ||
2695 | goto out; | ||
2696 | } | ||
2080 | 2697 | ||
2081 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | 2698 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; |
2082 | register_pm_notifier(&ftrace_suspend_notifier); | 2699 | register_pm_notifier(&ftrace_suspend_notifier); |
@@ -2094,21 +2711,26 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2094 | ftrace_startup(FTRACE_START_FUNC_RET); | 2711 | ftrace_startup(FTRACE_START_FUNC_RET); |
2095 | 2712 | ||
2096 | out: | 2713 | out: |
2097 | mutex_unlock(&ftrace_sysctl_lock); | 2714 | mutex_unlock(&ftrace_lock); |
2098 | return ret; | 2715 | return ret; |
2099 | } | 2716 | } |
2100 | 2717 | ||
2101 | void unregister_ftrace_graph(void) | 2718 | void unregister_ftrace_graph(void) |
2102 | { | 2719 | { |
2103 | mutex_lock(&ftrace_sysctl_lock); | 2720 | mutex_lock(&ftrace_lock); |
2721 | |||
2722 | if (!unlikely(atomic_read(&ftrace_graph_active))) | ||
2723 | goto out; | ||
2104 | 2724 | ||
2105 | atomic_dec(&ftrace_graph_active); | 2725 | atomic_dec(&ftrace_graph_active); |
2726 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); | ||
2106 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 2727 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
2107 | ftrace_graph_entry = ftrace_graph_entry_stub; | 2728 | ftrace_graph_entry = ftrace_graph_entry_stub; |
2108 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 2729 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
2109 | unregister_pm_notifier(&ftrace_suspend_notifier); | 2730 | unregister_pm_notifier(&ftrace_suspend_notifier); |
2110 | 2731 | ||
2111 | mutex_unlock(&ftrace_sysctl_lock); | 2732 | out: |
2733 | mutex_unlock(&ftrace_lock); | ||
2112 | } | 2734 | } |
2113 | 2735 | ||
2114 | /* Allocate a return stack for newly created task */ | 2736 | /* Allocate a return stack for newly created task */ |
@@ -2123,6 +2745,7 @@ void ftrace_graph_init_task(struct task_struct *t) | |||
2123 | t->curr_ret_stack = -1; | 2745 | t->curr_ret_stack = -1; |
2124 | atomic_set(&t->tracing_graph_pause, 0); | 2746 | atomic_set(&t->tracing_graph_pause, 0); |
2125 | atomic_set(&t->trace_overrun, 0); | 2747 | atomic_set(&t->trace_overrun, 0); |
2748 | t->ftrace_timestamp = 0; | ||
2126 | } else | 2749 | } else |
2127 | t->ret_stack = NULL; | 2750 | t->ret_stack = NULL; |
2128 | } | 2751 | } |