diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-13 06:25:51 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-13 06:25:51 -0500 |
commit | 968ea6d80e395cf11a51143cfa1b9a14ada676df (patch) | |
tree | dc2acec8c9bdced33afe1e273ee5e0b0b93d2703 /kernel/trace/ftrace.c | |
parent | 7be7585393d311866653564fbcd10a3232773c0b (diff) | |
parent | 8299608f140ae321e4eb5d1306184265d2b9511e (diff) |
Merge ../linux-2.6-x86
Conflicts:
arch/x86/kernel/io_apic.c
kernel/sched.c
kernel/sched_stats.h
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 914 |
1 files changed, 783 insertions, 131 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 78db083390f0..a12f80efceaa 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -47,6 +47,13 @@ | |||
47 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
48 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
49 | 49 | ||
50 | /* set when tracing only a pid */ | ||
51 | struct pid *ftrace_pid_trace; | ||
52 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | ||
53 | |||
54 | /* Quick disabling of function tracer. */ | ||
55 | int function_trace_stop; | ||
56 | |||
50 | /* | 57 | /* |
51 | * ftrace_disabled is set when an anomaly is discovered. | 58 | * ftrace_disabled is set when an anomaly is discovered. |
52 | * ftrace_disabled is much stronger than ftrace_enabled. | 59 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -55,6 +62,7 @@ static int ftrace_disabled __read_mostly; | |||
55 | 62 | ||
56 | static DEFINE_SPINLOCK(ftrace_lock); | 63 | static DEFINE_SPINLOCK(ftrace_lock); |
57 | static DEFINE_MUTEX(ftrace_sysctl_lock); | 64 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
65 | static DEFINE_MUTEX(ftrace_start_lock); | ||
58 | 66 | ||
59 | static struct ftrace_ops ftrace_list_end __read_mostly = | 67 | static struct ftrace_ops ftrace_list_end __read_mostly = |
60 | { | 68 | { |
@@ -63,6 +71,8 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
63 | 71 | ||
64 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 72 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
65 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 73 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
74 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | ||
75 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | ||
66 | 76 | ||
67 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 77 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
68 | { | 78 | { |
@@ -79,6 +89,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
79 | }; | 89 | }; |
80 | } | 90 | } |
81 | 91 | ||
92 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | ||
93 | { | ||
94 | if (!test_tsk_trace_trace(current)) | ||
95 | return; | ||
96 | |||
97 | ftrace_pid_function(ip, parent_ip); | ||
98 | } | ||
99 | |||
100 | static void set_ftrace_pid_function(ftrace_func_t func) | ||
101 | { | ||
102 | /* do not set ftrace_pid_function to itself! */ | ||
103 | if (func != ftrace_pid_func) | ||
104 | ftrace_pid_function = func; | ||
105 | } | ||
106 | |||
82 | /** | 107 | /** |
83 | * clear_ftrace_function - reset the ftrace function | 108 | * clear_ftrace_function - reset the ftrace function |
84 | * | 109 | * |
@@ -88,7 +113,23 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
88 | void clear_ftrace_function(void) | 113 | void clear_ftrace_function(void) |
89 | { | 114 | { |
90 | ftrace_trace_function = ftrace_stub; | 115 | ftrace_trace_function = ftrace_stub; |
116 | __ftrace_trace_function = ftrace_stub; | ||
117 | ftrace_pid_function = ftrace_stub; | ||
118 | } | ||
119 | |||
120 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
121 | /* | ||
122 | * For those archs that do not test ftrace_trace_stop in their | ||
123 | * mcount call site, we need to do it from C. | ||
124 | */ | ||
125 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | ||
126 | { | ||
127 | if (function_trace_stop) | ||
128 | return; | ||
129 | |||
130 | __ftrace_trace_function(ip, parent_ip); | ||
91 | } | 131 | } |
132 | #endif | ||
92 | 133 | ||
93 | static int __register_ftrace_function(struct ftrace_ops *ops) | 134 | static int __register_ftrace_function(struct ftrace_ops *ops) |
94 | { | 135 | { |
@@ -106,14 +147,28 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
106 | ftrace_list = ops; | 147 | ftrace_list = ops; |
107 | 148 | ||
108 | if (ftrace_enabled) { | 149 | if (ftrace_enabled) { |
150 | ftrace_func_t func; | ||
151 | |||
152 | if (ops->next == &ftrace_list_end) | ||
153 | func = ops->func; | ||
154 | else | ||
155 | func = ftrace_list_func; | ||
156 | |||
157 | if (ftrace_pid_trace) { | ||
158 | set_ftrace_pid_function(func); | ||
159 | func = ftrace_pid_func; | ||
160 | } | ||
161 | |||
109 | /* | 162 | /* |
110 | * For one func, simply call it directly. | 163 | * For one func, simply call it directly. |
111 | * For more than one func, call the chain. | 164 | * For more than one func, call the chain. |
112 | */ | 165 | */ |
113 | if (ops->next == &ftrace_list_end) | 166 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
114 | ftrace_trace_function = ops->func; | 167 | ftrace_trace_function = func; |
115 | else | 168 | #else |
116 | ftrace_trace_function = ftrace_list_func; | 169 | __ftrace_trace_function = func; |
170 | ftrace_trace_function = ftrace_test_stop_func; | ||
171 | #endif | ||
117 | } | 172 | } |
118 | 173 | ||
119 | spin_unlock(&ftrace_lock); | 174 | spin_unlock(&ftrace_lock); |
@@ -152,9 +207,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
152 | 207 | ||
153 | if (ftrace_enabled) { | 208 | if (ftrace_enabled) { |
154 | /* If we only have one func left, then call that directly */ | 209 | /* If we only have one func left, then call that directly */ |
155 | if (ftrace_list == &ftrace_list_end || | 210 | if (ftrace_list->next == &ftrace_list_end) { |
156 | ftrace_list->next == &ftrace_list_end) | 211 | ftrace_func_t func = ftrace_list->func; |
157 | ftrace_trace_function = ftrace_list->func; | 212 | |
213 | if (ftrace_pid_trace) { | ||
214 | set_ftrace_pid_function(func); | ||
215 | func = ftrace_pid_func; | ||
216 | } | ||
217 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
218 | ftrace_trace_function = func; | ||
219 | #else | ||
220 | __ftrace_trace_function = func; | ||
221 | #endif | ||
222 | } | ||
158 | } | 223 | } |
159 | 224 | ||
160 | out: | 225 | out: |
@@ -163,6 +228,36 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
163 | return ret; | 228 | return ret; |
164 | } | 229 | } |
165 | 230 | ||
231 | static void ftrace_update_pid_func(void) | ||
232 | { | ||
233 | ftrace_func_t func; | ||
234 | |||
235 | /* should not be called from interrupt context */ | ||
236 | spin_lock(&ftrace_lock); | ||
237 | |||
238 | if (ftrace_trace_function == ftrace_stub) | ||
239 | goto out; | ||
240 | |||
241 | func = ftrace_trace_function; | ||
242 | |||
243 | if (ftrace_pid_trace) { | ||
244 | set_ftrace_pid_function(func); | ||
245 | func = ftrace_pid_func; | ||
246 | } else { | ||
247 | if (func == ftrace_pid_func) | ||
248 | func = ftrace_pid_function; | ||
249 | } | ||
250 | |||
251 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
252 | ftrace_trace_function = func; | ||
253 | #else | ||
254 | __ftrace_trace_function = func; | ||
255 | #endif | ||
256 | |||
257 | out: | ||
258 | spin_unlock(&ftrace_lock); | ||
259 | } | ||
260 | |||
166 | #ifdef CONFIG_DYNAMIC_FTRACE | 261 | #ifdef CONFIG_DYNAMIC_FTRACE |
167 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 262 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
168 | # error Dynamic ftrace depends on MCOUNT_RECORD | 263 | # error Dynamic ftrace depends on MCOUNT_RECORD |
@@ -182,6 +277,8 @@ enum { | |||
182 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 277 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
183 | FTRACE_ENABLE_MCOUNT = (1 << 3), | 278 | FTRACE_ENABLE_MCOUNT = (1 << 3), |
184 | FTRACE_DISABLE_MCOUNT = (1 << 4), | 279 | FTRACE_DISABLE_MCOUNT = (1 << 4), |
280 | FTRACE_START_FUNC_RET = (1 << 5), | ||
281 | FTRACE_STOP_FUNC_RET = (1 << 6), | ||
185 | }; | 282 | }; |
186 | 283 | ||
187 | static int ftrace_filtered; | 284 | static int ftrace_filtered; |
@@ -308,7 +405,7 @@ ftrace_record_ip(unsigned long ip) | |||
308 | { | 405 | { |
309 | struct dyn_ftrace *rec; | 406 | struct dyn_ftrace *rec; |
310 | 407 | ||
311 | if (!ftrace_enabled || ftrace_disabled) | 408 | if (ftrace_disabled) |
312 | return NULL; | 409 | return NULL; |
313 | 410 | ||
314 | rec = ftrace_alloc_dyn_node(ip); | 411 | rec = ftrace_alloc_dyn_node(ip); |
@@ -322,14 +419,51 @@ ftrace_record_ip(unsigned long ip) | |||
322 | return rec; | 419 | return rec; |
323 | } | 420 | } |
324 | 421 | ||
325 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 422 | static void print_ip_ins(const char *fmt, unsigned char *p) |
423 | { | ||
424 | int i; | ||
425 | |||
426 | printk(KERN_CONT "%s", fmt); | ||
427 | |||
428 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
429 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
430 | } | ||
431 | |||
432 | static void ftrace_bug(int failed, unsigned long ip) | ||
433 | { | ||
434 | switch (failed) { | ||
435 | case -EFAULT: | ||
436 | FTRACE_WARN_ON_ONCE(1); | ||
437 | pr_info("ftrace faulted on modifying "); | ||
438 | print_ip_sym(ip); | ||
439 | break; | ||
440 | case -EINVAL: | ||
441 | FTRACE_WARN_ON_ONCE(1); | ||
442 | pr_info("ftrace failed to modify "); | ||
443 | print_ip_sym(ip); | ||
444 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
445 | printk(KERN_CONT "\n"); | ||
446 | break; | ||
447 | case -EPERM: | ||
448 | FTRACE_WARN_ON_ONCE(1); | ||
449 | pr_info("ftrace faulted on writing "); | ||
450 | print_ip_sym(ip); | ||
451 | break; | ||
452 | default: | ||
453 | FTRACE_WARN_ON_ONCE(1); | ||
454 | pr_info("ftrace faulted on unknown error "); | ||
455 | print_ip_sym(ip); | ||
456 | } | ||
457 | } | ||
458 | |||
326 | 459 | ||
327 | static int | 460 | static int |
328 | __ftrace_replace_code(struct dyn_ftrace *rec, | 461 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
329 | unsigned char *nop, int enable) | ||
330 | { | 462 | { |
331 | unsigned long ip, fl; | 463 | unsigned long ip, fl; |
332 | unsigned char *call, *old, *new; | 464 | unsigned long ftrace_addr; |
465 | |||
466 | ftrace_addr = (unsigned long)ftrace_caller; | ||
333 | 467 | ||
334 | ip = rec->ip; | 468 | ip = rec->ip; |
335 | 469 | ||
@@ -388,34 +522,28 @@ __ftrace_replace_code(struct dyn_ftrace *rec, | |||
388 | } | 522 | } |
389 | } | 523 | } |
390 | 524 | ||
391 | call = ftrace_call_replace(ip, FTRACE_ADDR); | 525 | if (rec->flags & FTRACE_FL_ENABLED) |
392 | 526 | return ftrace_make_call(rec, ftrace_addr); | |
393 | if (rec->flags & FTRACE_FL_ENABLED) { | 527 | else |
394 | old = nop; | 528 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
395 | new = call; | ||
396 | } else { | ||
397 | old = call; | ||
398 | new = nop; | ||
399 | } | ||
400 | |||
401 | return ftrace_modify_code(ip, old, new); | ||
402 | } | 529 | } |
403 | 530 | ||
404 | static void ftrace_replace_code(int enable) | 531 | static void ftrace_replace_code(int enable) |
405 | { | 532 | { |
406 | int i, failed; | 533 | int i, failed; |
407 | unsigned char *nop = NULL; | ||
408 | struct dyn_ftrace *rec; | 534 | struct dyn_ftrace *rec; |
409 | struct ftrace_page *pg; | 535 | struct ftrace_page *pg; |
410 | 536 | ||
411 | nop = ftrace_nop_replace(); | ||
412 | |||
413 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 537 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
414 | for (i = 0; i < pg->index; i++) { | 538 | for (i = 0; i < pg->index; i++) { |
415 | rec = &pg->records[i]; | 539 | rec = &pg->records[i]; |
416 | 540 | ||
417 | /* don't modify code that has already faulted */ | 541 | /* |
418 | if (rec->flags & FTRACE_FL_FAILED) | 542 | * Skip over free records and records that have |
543 | * failed. | ||
544 | */ | ||
545 | if (rec->flags & FTRACE_FL_FREE || | ||
546 | rec->flags & FTRACE_FL_FAILED) | ||
419 | continue; | 547 | continue; |
420 | 548 | ||
421 | /* ignore updates to this record's mcount site */ | 549 | /* ignore updates to this record's mcount site */ |
@@ -426,68 +554,30 @@ static void ftrace_replace_code(int enable) | |||
426 | unfreeze_record(rec); | 554 | unfreeze_record(rec); |
427 | } | 555 | } |
428 | 556 | ||
429 | failed = __ftrace_replace_code(rec, nop, enable); | 557 | failed = __ftrace_replace_code(rec, enable); |
430 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 558 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { |
431 | rec->flags |= FTRACE_FL_FAILED; | 559 | rec->flags |= FTRACE_FL_FAILED; |
432 | if ((system_state == SYSTEM_BOOTING) || | 560 | if ((system_state == SYSTEM_BOOTING) || |
433 | !core_kernel_text(rec->ip)) { | 561 | !core_kernel_text(rec->ip)) { |
434 | ftrace_free_rec(rec); | 562 | ftrace_free_rec(rec); |
435 | } | 563 | } else |
564 | ftrace_bug(failed, rec->ip); | ||
436 | } | 565 | } |
437 | } | 566 | } |
438 | } | 567 | } |
439 | } | 568 | } |
440 | 569 | ||
441 | static void print_ip_ins(const char *fmt, unsigned char *p) | ||
442 | { | ||
443 | int i; | ||
444 | |||
445 | printk(KERN_CONT "%s", fmt); | ||
446 | |||
447 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
448 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
449 | } | ||
450 | |||
451 | static int | 570 | static int |
452 | ftrace_code_disable(struct dyn_ftrace *rec) | 571 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
453 | { | 572 | { |
454 | unsigned long ip; | 573 | unsigned long ip; |
455 | unsigned char *nop, *call; | ||
456 | int ret; | 574 | int ret; |
457 | 575 | ||
458 | ip = rec->ip; | 576 | ip = rec->ip; |
459 | 577 | ||
460 | nop = ftrace_nop_replace(); | 578 | ret = ftrace_make_nop(mod, rec, mcount_addr); |
461 | call = ftrace_call_replace(ip, mcount_addr); | ||
462 | |||
463 | ret = ftrace_modify_code(ip, call, nop); | ||
464 | if (ret) { | 579 | if (ret) { |
465 | switch (ret) { | 580 | ftrace_bug(ret, ip); |
466 | case -EFAULT: | ||
467 | FTRACE_WARN_ON_ONCE(1); | ||
468 | pr_info("ftrace faulted on modifying "); | ||
469 | print_ip_sym(ip); | ||
470 | break; | ||
471 | case -EINVAL: | ||
472 | FTRACE_WARN_ON_ONCE(1); | ||
473 | pr_info("ftrace failed to modify "); | ||
474 | print_ip_sym(ip); | ||
475 | print_ip_ins(" expected: ", call); | ||
476 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
477 | print_ip_ins(" replace: ", nop); | ||
478 | printk(KERN_CONT "\n"); | ||
479 | break; | ||
480 | case -EPERM: | ||
481 | FTRACE_WARN_ON_ONCE(1); | ||
482 | pr_info("ftrace faulted on writing "); | ||
483 | print_ip_sym(ip); | ||
484 | break; | ||
485 | default: | ||
486 | FTRACE_WARN_ON_ONCE(1); | ||
487 | pr_info("ftrace faulted on unknown error "); | ||
488 | print_ip_sym(ip); | ||
489 | } | ||
490 | |||
491 | rec->flags |= FTRACE_FL_FAILED; | 581 | rec->flags |= FTRACE_FL_FAILED; |
492 | return 0; | 582 | return 0; |
493 | } | 583 | } |
@@ -506,6 +596,11 @@ static int __ftrace_modify_code(void *data) | |||
506 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 596 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
507 | ftrace_update_ftrace_func(ftrace_trace_function); | 597 | ftrace_update_ftrace_func(ftrace_trace_function); |
508 | 598 | ||
599 | if (*command & FTRACE_START_FUNC_RET) | ||
600 | ftrace_enable_ftrace_graph_caller(); | ||
601 | else if (*command & FTRACE_STOP_FUNC_RET) | ||
602 | ftrace_disable_ftrace_graph_caller(); | ||
603 | |||
509 | return 0; | 604 | return 0; |
510 | } | 605 | } |
511 | 606 | ||
@@ -515,43 +610,43 @@ static void ftrace_run_update_code(int command) | |||
515 | } | 610 | } |
516 | 611 | ||
517 | static ftrace_func_t saved_ftrace_func; | 612 | static ftrace_func_t saved_ftrace_func; |
518 | static int ftrace_start; | 613 | static int ftrace_start_up; |
519 | static DEFINE_MUTEX(ftrace_start_lock); | ||
520 | 614 | ||
521 | static void ftrace_startup(void) | 615 | static void ftrace_startup_enable(int command) |
522 | { | 616 | { |
523 | int command = 0; | ||
524 | |||
525 | if (unlikely(ftrace_disabled)) | ||
526 | return; | ||
527 | |||
528 | mutex_lock(&ftrace_start_lock); | ||
529 | ftrace_start++; | ||
530 | command |= FTRACE_ENABLE_CALLS; | ||
531 | |||
532 | if (saved_ftrace_func != ftrace_trace_function) { | 617 | if (saved_ftrace_func != ftrace_trace_function) { |
533 | saved_ftrace_func = ftrace_trace_function; | 618 | saved_ftrace_func = ftrace_trace_function; |
534 | command |= FTRACE_UPDATE_TRACE_FUNC; | 619 | command |= FTRACE_UPDATE_TRACE_FUNC; |
535 | } | 620 | } |
536 | 621 | ||
537 | if (!command || !ftrace_enabled) | 622 | if (!command || !ftrace_enabled) |
538 | goto out; | 623 | return; |
539 | 624 | ||
540 | ftrace_run_update_code(command); | 625 | ftrace_run_update_code(command); |
541 | out: | ||
542 | mutex_unlock(&ftrace_start_lock); | ||
543 | } | 626 | } |
544 | 627 | ||
545 | static void ftrace_shutdown(void) | 628 | static void ftrace_startup(int command) |
546 | { | 629 | { |
547 | int command = 0; | 630 | if (unlikely(ftrace_disabled)) |
631 | return; | ||
548 | 632 | ||
633 | mutex_lock(&ftrace_start_lock); | ||
634 | ftrace_start_up++; | ||
635 | command |= FTRACE_ENABLE_CALLS; | ||
636 | |||
637 | ftrace_startup_enable(command); | ||
638 | |||
639 | mutex_unlock(&ftrace_start_lock); | ||
640 | } | ||
641 | |||
642 | static void ftrace_shutdown(int command) | ||
643 | { | ||
549 | if (unlikely(ftrace_disabled)) | 644 | if (unlikely(ftrace_disabled)) |
550 | return; | 645 | return; |
551 | 646 | ||
552 | mutex_lock(&ftrace_start_lock); | 647 | mutex_lock(&ftrace_start_lock); |
553 | ftrace_start--; | 648 | ftrace_start_up--; |
554 | if (!ftrace_start) | 649 | if (!ftrace_start_up) |
555 | command |= FTRACE_DISABLE_CALLS; | 650 | command |= FTRACE_DISABLE_CALLS; |
556 | 651 | ||
557 | if (saved_ftrace_func != ftrace_trace_function) { | 652 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -577,8 +672,8 @@ static void ftrace_startup_sysctl(void) | |||
577 | mutex_lock(&ftrace_start_lock); | 672 | mutex_lock(&ftrace_start_lock); |
578 | /* Force update next time */ | 673 | /* Force update next time */ |
579 | saved_ftrace_func = NULL; | 674 | saved_ftrace_func = NULL; |
580 | /* ftrace_start is true if we want ftrace running */ | 675 | /* ftrace_start_up is true if we want ftrace running */ |
581 | if (ftrace_start) | 676 | if (ftrace_start_up) |
582 | command |= FTRACE_ENABLE_CALLS; | 677 | command |= FTRACE_ENABLE_CALLS; |
583 | 678 | ||
584 | ftrace_run_update_code(command); | 679 | ftrace_run_update_code(command); |
@@ -593,8 +688,8 @@ static void ftrace_shutdown_sysctl(void) | |||
593 | return; | 688 | return; |
594 | 689 | ||
595 | mutex_lock(&ftrace_start_lock); | 690 | mutex_lock(&ftrace_start_lock); |
596 | /* ftrace_start is true if ftrace is running */ | 691 | /* ftrace_start_up is true if ftrace is running */ |
597 | if (ftrace_start) | 692 | if (ftrace_start_up) |
598 | command |= FTRACE_DISABLE_CALLS; | 693 | command |= FTRACE_DISABLE_CALLS; |
599 | 694 | ||
600 | ftrace_run_update_code(command); | 695 | ftrace_run_update_code(command); |
@@ -605,7 +700,7 @@ static cycle_t ftrace_update_time; | |||
605 | static unsigned long ftrace_update_cnt; | 700 | static unsigned long ftrace_update_cnt; |
606 | unsigned long ftrace_update_tot_cnt; | 701 | unsigned long ftrace_update_tot_cnt; |
607 | 702 | ||
608 | static int ftrace_update_code(void) | 703 | static int ftrace_update_code(struct module *mod) |
609 | { | 704 | { |
610 | struct dyn_ftrace *p, *t; | 705 | struct dyn_ftrace *p, *t; |
611 | cycle_t start, stop; | 706 | cycle_t start, stop; |
@@ -622,7 +717,7 @@ static int ftrace_update_code(void) | |||
622 | list_del_init(&p->list); | 717 | list_del_init(&p->list); |
623 | 718 | ||
624 | /* convert record (i.e, patch mcount-call with NOP) */ | 719 | /* convert record (i.e, patch mcount-call with NOP) */ |
625 | if (ftrace_code_disable(p)) { | 720 | if (ftrace_code_disable(mod, p)) { |
626 | p->flags |= FTRACE_FL_CONVERTED; | 721 | p->flags |= FTRACE_FL_CONVERTED; |
627 | ftrace_update_cnt++; | 722 | ftrace_update_cnt++; |
628 | } else | 723 | } else |
@@ -690,7 +785,6 @@ enum { | |||
690 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 785 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
691 | 786 | ||
692 | struct ftrace_iterator { | 787 | struct ftrace_iterator { |
693 | loff_t pos; | ||
694 | struct ftrace_page *pg; | 788 | struct ftrace_page *pg; |
695 | unsigned idx; | 789 | unsigned idx; |
696 | unsigned flags; | 790 | unsigned flags; |
@@ -715,6 +809,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
715 | iter->pg = iter->pg->next; | 809 | iter->pg = iter->pg->next; |
716 | iter->idx = 0; | 810 | iter->idx = 0; |
717 | goto retry; | 811 | goto retry; |
812 | } else { | ||
813 | iter->idx = -1; | ||
718 | } | 814 | } |
719 | } else { | 815 | } else { |
720 | rec = &iter->pg->records[iter->idx++]; | 816 | rec = &iter->pg->records[iter->idx++]; |
@@ -737,8 +833,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
737 | } | 833 | } |
738 | spin_unlock(&ftrace_lock); | 834 | spin_unlock(&ftrace_lock); |
739 | 835 | ||
740 | iter->pos = *pos; | ||
741 | |||
742 | return rec; | 836 | return rec; |
743 | } | 837 | } |
744 | 838 | ||
@@ -746,13 +840,15 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
746 | { | 840 | { |
747 | struct ftrace_iterator *iter = m->private; | 841 | struct ftrace_iterator *iter = m->private; |
748 | void *p = NULL; | 842 | void *p = NULL; |
749 | loff_t l = -1; | ||
750 | 843 | ||
751 | if (*pos > iter->pos) | 844 | if (*pos > 0) { |
752 | *pos = iter->pos; | 845 | if (iter->idx < 0) |
846 | return p; | ||
847 | (*pos)--; | ||
848 | iter->idx--; | ||
849 | } | ||
753 | 850 | ||
754 | l = *pos; | 851 | p = t_next(m, p, pos); |
755 | p = t_next(m, p, &l); | ||
756 | 852 | ||
757 | return p; | 853 | return p; |
758 | } | 854 | } |
@@ -763,21 +859,15 @@ static void t_stop(struct seq_file *m, void *p) | |||
763 | 859 | ||
764 | static int t_show(struct seq_file *m, void *v) | 860 | static int t_show(struct seq_file *m, void *v) |
765 | { | 861 | { |
766 | struct ftrace_iterator *iter = m->private; | ||
767 | struct dyn_ftrace *rec = v; | 862 | struct dyn_ftrace *rec = v; |
768 | char str[KSYM_SYMBOL_LEN]; | 863 | char str[KSYM_SYMBOL_LEN]; |
769 | int ret = 0; | ||
770 | 864 | ||
771 | if (!rec) | 865 | if (!rec) |
772 | return 0; | 866 | return 0; |
773 | 867 | ||
774 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 868 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
775 | 869 | ||
776 | ret = seq_printf(m, "%s\n", str); | 870 | seq_printf(m, "%s\n", str); |
777 | if (ret < 0) { | ||
778 | iter->pos--; | ||
779 | iter->idx--; | ||
780 | } | ||
781 | 871 | ||
782 | return 0; | 872 | return 0; |
783 | } | 873 | } |
@@ -803,7 +893,6 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
803 | return -ENOMEM; | 893 | return -ENOMEM; |
804 | 894 | ||
805 | iter->pg = ftrace_pages_start; | 895 | iter->pg = ftrace_pages_start; |
806 | iter->pos = 0; | ||
807 | 896 | ||
808 | ret = seq_open(file, &show_ftrace_seq_ops); | 897 | ret = seq_open(file, &show_ftrace_seq_ops); |
809 | if (!ret) { | 898 | if (!ret) { |
@@ -890,7 +979,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
890 | 979 | ||
891 | if (file->f_mode & FMODE_READ) { | 980 | if (file->f_mode & FMODE_READ) { |
892 | iter->pg = ftrace_pages_start; | 981 | iter->pg = ftrace_pages_start; |
893 | iter->pos = 0; | ||
894 | iter->flags = enable ? FTRACE_ITER_FILTER : | 982 | iter->flags = enable ? FTRACE_ITER_FILTER : |
895 | FTRACE_ITER_NOTRACE; | 983 | FTRACE_ITER_NOTRACE; |
896 | 984 | ||
@@ -1181,7 +1269,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1181 | 1269 | ||
1182 | mutex_lock(&ftrace_sysctl_lock); | 1270 | mutex_lock(&ftrace_sysctl_lock); |
1183 | mutex_lock(&ftrace_start_lock); | 1271 | mutex_lock(&ftrace_start_lock); |
1184 | if (ftrace_start && ftrace_enabled) | 1272 | if (ftrace_start_up && ftrace_enabled) |
1185 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1273 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1186 | mutex_unlock(&ftrace_start_lock); | 1274 | mutex_unlock(&ftrace_start_lock); |
1187 | mutex_unlock(&ftrace_sysctl_lock); | 1275 | mutex_unlock(&ftrace_sysctl_lock); |
@@ -1233,12 +1321,233 @@ static struct file_operations ftrace_notrace_fops = { | |||
1233 | .release = ftrace_notrace_release, | 1321 | .release = ftrace_notrace_release, |
1234 | }; | 1322 | }; |
1235 | 1323 | ||
1236 | static __init int ftrace_init_debugfs(void) | 1324 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1325 | |||
1326 | static DEFINE_MUTEX(graph_lock); | ||
1327 | |||
1328 | int ftrace_graph_count; | ||
1329 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | ||
1330 | |||
1331 | static void * | ||
1332 | g_next(struct seq_file *m, void *v, loff_t *pos) | ||
1237 | { | 1333 | { |
1238 | struct dentry *d_tracer; | 1334 | unsigned long *array = m->private; |
1239 | struct dentry *entry; | 1335 | int index = *pos; |
1240 | 1336 | ||
1241 | d_tracer = tracing_init_dentry(); | 1337 | (*pos)++; |
1338 | |||
1339 | if (index >= ftrace_graph_count) | ||
1340 | return NULL; | ||
1341 | |||
1342 | return &array[index]; | ||
1343 | } | ||
1344 | |||
1345 | static void *g_start(struct seq_file *m, loff_t *pos) | ||
1346 | { | ||
1347 | void *p = NULL; | ||
1348 | |||
1349 | mutex_lock(&graph_lock); | ||
1350 | |||
1351 | p = g_next(m, p, pos); | ||
1352 | |||
1353 | return p; | ||
1354 | } | ||
1355 | |||
1356 | static void g_stop(struct seq_file *m, void *p) | ||
1357 | { | ||
1358 | mutex_unlock(&graph_lock); | ||
1359 | } | ||
1360 | |||
1361 | static int g_show(struct seq_file *m, void *v) | ||
1362 | { | ||
1363 | unsigned long *ptr = v; | ||
1364 | char str[KSYM_SYMBOL_LEN]; | ||
1365 | |||
1366 | if (!ptr) | ||
1367 | return 0; | ||
1368 | |||
1369 | kallsyms_lookup(*ptr, NULL, NULL, NULL, str); | ||
1370 | |||
1371 | seq_printf(m, "%s\n", str); | ||
1372 | |||
1373 | return 0; | ||
1374 | } | ||
1375 | |||
1376 | static struct seq_operations ftrace_graph_seq_ops = { | ||
1377 | .start = g_start, | ||
1378 | .next = g_next, | ||
1379 | .stop = g_stop, | ||
1380 | .show = g_show, | ||
1381 | }; | ||
1382 | |||
1383 | static int | ||
1384 | ftrace_graph_open(struct inode *inode, struct file *file) | ||
1385 | { | ||
1386 | int ret = 0; | ||
1387 | |||
1388 | if (unlikely(ftrace_disabled)) | ||
1389 | return -ENODEV; | ||
1390 | |||
1391 | mutex_lock(&graph_lock); | ||
1392 | if ((file->f_mode & FMODE_WRITE) && | ||
1393 | !(file->f_flags & O_APPEND)) { | ||
1394 | ftrace_graph_count = 0; | ||
1395 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | ||
1396 | } | ||
1397 | |||
1398 | if (file->f_mode & FMODE_READ) { | ||
1399 | ret = seq_open(file, &ftrace_graph_seq_ops); | ||
1400 | if (!ret) { | ||
1401 | struct seq_file *m = file->private_data; | ||
1402 | m->private = ftrace_graph_funcs; | ||
1403 | } | ||
1404 | } else | ||
1405 | file->private_data = ftrace_graph_funcs; | ||
1406 | mutex_unlock(&graph_lock); | ||
1407 | |||
1408 | return ret; | ||
1409 | } | ||
1410 | |||
1411 | static ssize_t | ||
1412 | ftrace_graph_read(struct file *file, char __user *ubuf, | ||
1413 | size_t cnt, loff_t *ppos) | ||
1414 | { | ||
1415 | if (file->f_mode & FMODE_READ) | ||
1416 | return seq_read(file, ubuf, cnt, ppos); | ||
1417 | else | ||
1418 | return -EPERM; | ||
1419 | } | ||
1420 | |||
1421 | static int | ||
1422 | ftrace_set_func(unsigned long *array, int idx, char *buffer) | ||
1423 | { | ||
1424 | char str[KSYM_SYMBOL_LEN]; | ||
1425 | struct dyn_ftrace *rec; | ||
1426 | struct ftrace_page *pg; | ||
1427 | int found = 0; | ||
1428 | int i, j; | ||
1429 | |||
1430 | if (ftrace_disabled) | ||
1431 | return -ENODEV; | ||
1432 | |||
1433 | /* should not be called from interrupt context */ | ||
1434 | spin_lock(&ftrace_lock); | ||
1435 | |||
1436 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | ||
1437 | for (i = 0; i < pg->index; i++) { | ||
1438 | rec = &pg->records[i]; | ||
1439 | |||
1440 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | ||
1441 | continue; | ||
1442 | |||
1443 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
1444 | if (strcmp(str, buffer) == 0) { | ||
1445 | found = 1; | ||
1446 | for (j = 0; j < idx; j++) | ||
1447 | if (array[j] == rec->ip) { | ||
1448 | found = 0; | ||
1449 | break; | ||
1450 | } | ||
1451 | if (found) | ||
1452 | array[idx] = rec->ip; | ||
1453 | break; | ||
1454 | } | ||
1455 | } | ||
1456 | } | ||
1457 | spin_unlock(&ftrace_lock); | ||
1458 | |||
1459 | return found ? 0 : -EINVAL; | ||
1460 | } | ||
1461 | |||
1462 | static ssize_t | ||
1463 | ftrace_graph_write(struct file *file, const char __user *ubuf, | ||
1464 | size_t cnt, loff_t *ppos) | ||
1465 | { | ||
1466 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | ||
1467 | unsigned long *array; | ||
1468 | size_t read = 0; | ||
1469 | ssize_t ret; | ||
1470 | int index = 0; | ||
1471 | char ch; | ||
1472 | |||
1473 | if (!cnt || cnt < 0) | ||
1474 | return 0; | ||
1475 | |||
1476 | mutex_lock(&graph_lock); | ||
1477 | |||
1478 | if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { | ||
1479 | ret = -EBUSY; | ||
1480 | goto out; | ||
1481 | } | ||
1482 | |||
1483 | if (file->f_mode & FMODE_READ) { | ||
1484 | struct seq_file *m = file->private_data; | ||
1485 | array = m->private; | ||
1486 | } else | ||
1487 | array = file->private_data; | ||
1488 | |||
1489 | ret = get_user(ch, ubuf++); | ||
1490 | if (ret) | ||
1491 | goto out; | ||
1492 | read++; | ||
1493 | cnt--; | ||
1494 | |||
1495 | /* skip white space */ | ||
1496 | while (cnt && isspace(ch)) { | ||
1497 | ret = get_user(ch, ubuf++); | ||
1498 | if (ret) | ||
1499 | goto out; | ||
1500 | read++; | ||
1501 | cnt--; | ||
1502 | } | ||
1503 | |||
1504 | if (isspace(ch)) { | ||
1505 | *ppos += read; | ||
1506 | ret = read; | ||
1507 | goto out; | ||
1508 | } | ||
1509 | |||
1510 | while (cnt && !isspace(ch)) { | ||
1511 | if (index < FTRACE_BUFF_MAX) | ||
1512 | buffer[index++] = ch; | ||
1513 | else { | ||
1514 | ret = -EINVAL; | ||
1515 | goto out; | ||
1516 | } | ||
1517 | ret = get_user(ch, ubuf++); | ||
1518 | if (ret) | ||
1519 | goto out; | ||
1520 | read++; | ||
1521 | cnt--; | ||
1522 | } | ||
1523 | buffer[index] = 0; | ||
1524 | |||
1525 | /* we allow only one at a time */ | ||
1526 | ret = ftrace_set_func(array, ftrace_graph_count, buffer); | ||
1527 | if (ret) | ||
1528 | goto out; | ||
1529 | |||
1530 | ftrace_graph_count++; | ||
1531 | |||
1532 | file->f_pos += read; | ||
1533 | |||
1534 | ret = read; | ||
1535 | out: | ||
1536 | mutex_unlock(&graph_lock); | ||
1537 | |||
1538 | return ret; | ||
1539 | } | ||
1540 | |||
1541 | static const struct file_operations ftrace_graph_fops = { | ||
1542 | .open = ftrace_graph_open, | ||
1543 | .read = ftrace_graph_read, | ||
1544 | .write = ftrace_graph_write, | ||
1545 | }; | ||
1546 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1547 | |||
1548 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | ||
1549 | { | ||
1550 | struct dentry *entry; | ||
1242 | 1551 | ||
1243 | entry = debugfs_create_file("available_filter_functions", 0444, | 1552 | entry = debugfs_create_file("available_filter_functions", 0444, |
1244 | d_tracer, NULL, &ftrace_avail_fops); | 1553 | d_tracer, NULL, &ftrace_avail_fops); |
@@ -1263,12 +1572,20 @@ static __init int ftrace_init_debugfs(void) | |||
1263 | pr_warning("Could not create debugfs " | 1572 | pr_warning("Could not create debugfs " |
1264 | "'set_ftrace_notrace' entry\n"); | 1573 | "'set_ftrace_notrace' entry\n"); |
1265 | 1574 | ||
1575 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1576 | entry = debugfs_create_file("set_graph_function", 0444, d_tracer, | ||
1577 | NULL, | ||
1578 | &ftrace_graph_fops); | ||
1579 | if (!entry) | ||
1580 | pr_warning("Could not create debugfs " | ||
1581 | "'set_graph_function' entry\n"); | ||
1582 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1583 | |||
1266 | return 0; | 1584 | return 0; |
1267 | } | 1585 | } |
1268 | 1586 | ||
1269 | fs_initcall(ftrace_init_debugfs); | 1587 | static int ftrace_convert_nops(struct module *mod, |
1270 | 1588 | unsigned long *start, | |
1271 | static int ftrace_convert_nops(unsigned long *start, | ||
1272 | unsigned long *end) | 1589 | unsigned long *end) |
1273 | { | 1590 | { |
1274 | unsigned long *p; | 1591 | unsigned long *p; |
@@ -1279,23 +1596,32 @@ static int ftrace_convert_nops(unsigned long *start, | |||
1279 | p = start; | 1596 | p = start; |
1280 | while (p < end) { | 1597 | while (p < end) { |
1281 | addr = ftrace_call_adjust(*p++); | 1598 | addr = ftrace_call_adjust(*p++); |
1599 | /* | ||
1600 | * Some architecture linkers will pad between | ||
1601 | * the different mcount_loc sections of different | ||
1602 | * object files to satisfy alignments. | ||
1603 | * Skip any NULL pointers. | ||
1604 | */ | ||
1605 | if (!addr) | ||
1606 | continue; | ||
1282 | ftrace_record_ip(addr); | 1607 | ftrace_record_ip(addr); |
1283 | } | 1608 | } |
1284 | 1609 | ||
1285 | /* disable interrupts to prevent kstop machine */ | 1610 | /* disable interrupts to prevent kstop machine */ |
1286 | local_irq_save(flags); | 1611 | local_irq_save(flags); |
1287 | ftrace_update_code(); | 1612 | ftrace_update_code(mod); |
1288 | local_irq_restore(flags); | 1613 | local_irq_restore(flags); |
1289 | mutex_unlock(&ftrace_start_lock); | 1614 | mutex_unlock(&ftrace_start_lock); |
1290 | 1615 | ||
1291 | return 0; | 1616 | return 0; |
1292 | } | 1617 | } |
1293 | 1618 | ||
1294 | void ftrace_init_module(unsigned long *start, unsigned long *end) | 1619 | void ftrace_init_module(struct module *mod, |
1620 | unsigned long *start, unsigned long *end) | ||
1295 | { | 1621 | { |
1296 | if (ftrace_disabled || start == end) | 1622 | if (ftrace_disabled || start == end) |
1297 | return; | 1623 | return; |
1298 | ftrace_convert_nops(start, end); | 1624 | ftrace_convert_nops(mod, start, end); |
1299 | } | 1625 | } |
1300 | 1626 | ||
1301 | extern unsigned long __start_mcount_loc[]; | 1627 | extern unsigned long __start_mcount_loc[]; |
@@ -1325,7 +1651,8 @@ void __init ftrace_init(void) | |||
1325 | 1651 | ||
1326 | last_ftrace_enabled = ftrace_enabled = 1; | 1652 | last_ftrace_enabled = ftrace_enabled = 1; |
1327 | 1653 | ||
1328 | ret = ftrace_convert_nops(__start_mcount_loc, | 1654 | ret = ftrace_convert_nops(NULL, |
1655 | __start_mcount_loc, | ||
1329 | __stop_mcount_loc); | 1656 | __stop_mcount_loc); |
1330 | 1657 | ||
1331 | return; | 1658 | return; |
@@ -1342,12 +1669,186 @@ static int __init ftrace_nodyn_init(void) | |||
1342 | } | 1669 | } |
1343 | device_initcall(ftrace_nodyn_init); | 1670 | device_initcall(ftrace_nodyn_init); |
1344 | 1671 | ||
1345 | # define ftrace_startup() do { } while (0) | 1672 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
1346 | # define ftrace_shutdown() do { } while (0) | 1673 | static inline void ftrace_startup_enable(int command) { } |
1674 | /* Keep as macros so we do not need to define the commands */ | ||
1675 | # define ftrace_startup(command) do { } while (0) | ||
1676 | # define ftrace_shutdown(command) do { } while (0) | ||
1347 | # define ftrace_startup_sysctl() do { } while (0) | 1677 | # define ftrace_startup_sysctl() do { } while (0) |
1348 | # define ftrace_shutdown_sysctl() do { } while (0) | 1678 | # define ftrace_shutdown_sysctl() do { } while (0) |
1349 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1679 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1350 | 1680 | ||
1681 | static ssize_t | ||
1682 | ftrace_pid_read(struct file *file, char __user *ubuf, | ||
1683 | size_t cnt, loff_t *ppos) | ||
1684 | { | ||
1685 | char buf[64]; | ||
1686 | int r; | ||
1687 | |||
1688 | if (ftrace_pid_trace == ftrace_swapper_pid) | ||
1689 | r = sprintf(buf, "swapper tasks\n"); | ||
1690 | else if (ftrace_pid_trace) | ||
1691 | r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace)); | ||
1692 | else | ||
1693 | r = sprintf(buf, "no pid\n"); | ||
1694 | |||
1695 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
1696 | } | ||
1697 | |||
1698 | static void clear_ftrace_swapper(void) | ||
1699 | { | ||
1700 | struct task_struct *p; | ||
1701 | int cpu; | ||
1702 | |||
1703 | get_online_cpus(); | ||
1704 | for_each_online_cpu(cpu) { | ||
1705 | p = idle_task(cpu); | ||
1706 | clear_tsk_trace_trace(p); | ||
1707 | } | ||
1708 | put_online_cpus(); | ||
1709 | } | ||
1710 | |||
1711 | static void set_ftrace_swapper(void) | ||
1712 | { | ||
1713 | struct task_struct *p; | ||
1714 | int cpu; | ||
1715 | |||
1716 | get_online_cpus(); | ||
1717 | for_each_online_cpu(cpu) { | ||
1718 | p = idle_task(cpu); | ||
1719 | set_tsk_trace_trace(p); | ||
1720 | } | ||
1721 | put_online_cpus(); | ||
1722 | } | ||
1723 | |||
1724 | static void clear_ftrace_pid(struct pid *pid) | ||
1725 | { | ||
1726 | struct task_struct *p; | ||
1727 | |||
1728 | do_each_pid_task(pid, PIDTYPE_PID, p) { | ||
1729 | clear_tsk_trace_trace(p); | ||
1730 | } while_each_pid_task(pid, PIDTYPE_PID, p); | ||
1731 | put_pid(pid); | ||
1732 | } | ||
1733 | |||
1734 | static void set_ftrace_pid(struct pid *pid) | ||
1735 | { | ||
1736 | struct task_struct *p; | ||
1737 | |||
1738 | do_each_pid_task(pid, PIDTYPE_PID, p) { | ||
1739 | set_tsk_trace_trace(p); | ||
1740 | } while_each_pid_task(pid, PIDTYPE_PID, p); | ||
1741 | } | ||
1742 | |||
1743 | static void clear_ftrace_pid_task(struct pid **pid) | ||
1744 | { | ||
1745 | if (*pid == ftrace_swapper_pid) | ||
1746 | clear_ftrace_swapper(); | ||
1747 | else | ||
1748 | clear_ftrace_pid(*pid); | ||
1749 | |||
1750 | *pid = NULL; | ||
1751 | } | ||
1752 | |||
1753 | static void set_ftrace_pid_task(struct pid *pid) | ||
1754 | { | ||
1755 | if (pid == ftrace_swapper_pid) | ||
1756 | set_ftrace_swapper(); | ||
1757 | else | ||
1758 | set_ftrace_pid(pid); | ||
1759 | } | ||
1760 | |||
1761 | static ssize_t | ||
1762 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | ||
1763 | size_t cnt, loff_t *ppos) | ||
1764 | { | ||
1765 | struct pid *pid; | ||
1766 | char buf[64]; | ||
1767 | long val; | ||
1768 | int ret; | ||
1769 | |||
1770 | if (cnt >= sizeof(buf)) | ||
1771 | return -EINVAL; | ||
1772 | |||
1773 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1774 | return -EFAULT; | ||
1775 | |||
1776 | buf[cnt] = 0; | ||
1777 | |||
1778 | ret = strict_strtol(buf, 10, &val); | ||
1779 | if (ret < 0) | ||
1780 | return ret; | ||
1781 | |||
1782 | mutex_lock(&ftrace_start_lock); | ||
1783 | if (val < 0) { | ||
1784 | /* disable pid tracing */ | ||
1785 | if (!ftrace_pid_trace) | ||
1786 | goto out; | ||
1787 | |||
1788 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
1789 | |||
1790 | } else { | ||
1791 | /* swapper task is special */ | ||
1792 | if (!val) { | ||
1793 | pid = ftrace_swapper_pid; | ||
1794 | if (pid == ftrace_pid_trace) | ||
1795 | goto out; | ||
1796 | } else { | ||
1797 | pid = find_get_pid(val); | ||
1798 | |||
1799 | if (pid == ftrace_pid_trace) { | ||
1800 | put_pid(pid); | ||
1801 | goto out; | ||
1802 | } | ||
1803 | } | ||
1804 | |||
1805 | if (ftrace_pid_trace) | ||
1806 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
1807 | |||
1808 | if (!pid) | ||
1809 | goto out; | ||
1810 | |||
1811 | ftrace_pid_trace = pid; | ||
1812 | |||
1813 | set_ftrace_pid_task(ftrace_pid_trace); | ||
1814 | } | ||
1815 | |||
1816 | /* update the function call */ | ||
1817 | ftrace_update_pid_func(); | ||
1818 | ftrace_startup_enable(0); | ||
1819 | |||
1820 | out: | ||
1821 | mutex_unlock(&ftrace_start_lock); | ||
1822 | |||
1823 | return cnt; | ||
1824 | } | ||
1825 | |||
1826 | static struct file_operations ftrace_pid_fops = { | ||
1827 | .read = ftrace_pid_read, | ||
1828 | .write = ftrace_pid_write, | ||
1829 | }; | ||
1830 | |||
1831 | static __init int ftrace_init_debugfs(void) | ||
1832 | { | ||
1833 | struct dentry *d_tracer; | ||
1834 | struct dentry *entry; | ||
1835 | |||
1836 | d_tracer = tracing_init_dentry(); | ||
1837 | if (!d_tracer) | ||
1838 | return 0; | ||
1839 | |||
1840 | ftrace_init_dyn_debugfs(d_tracer); | ||
1841 | |||
1842 | entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, | ||
1843 | NULL, &ftrace_pid_fops); | ||
1844 | if (!entry) | ||
1845 | pr_warning("Could not create debugfs " | ||
1846 | "'set_ftrace_pid' entry\n"); | ||
1847 | return 0; | ||
1848 | } | ||
1849 | |||
1850 | fs_initcall(ftrace_init_debugfs); | ||
1851 | |||
1351 | /** | 1852 | /** |
1352 | * ftrace_kill - kill ftrace | 1853 | * ftrace_kill - kill ftrace |
1353 | * | 1854 | * |
@@ -1381,10 +1882,11 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
1381 | return -1; | 1882 | return -1; |
1382 | 1883 | ||
1383 | mutex_lock(&ftrace_sysctl_lock); | 1884 | mutex_lock(&ftrace_sysctl_lock); |
1885 | |||
1384 | ret = __register_ftrace_function(ops); | 1886 | ret = __register_ftrace_function(ops); |
1385 | ftrace_startup(); | 1887 | ftrace_startup(0); |
1386 | mutex_unlock(&ftrace_sysctl_lock); | ||
1387 | 1888 | ||
1889 | mutex_unlock(&ftrace_sysctl_lock); | ||
1388 | return ret; | 1890 | return ret; |
1389 | } | 1891 | } |
1390 | 1892 | ||
@@ -1400,7 +1902,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
1400 | 1902 | ||
1401 | mutex_lock(&ftrace_sysctl_lock); | 1903 | mutex_lock(&ftrace_sysctl_lock); |
1402 | ret = __unregister_ftrace_function(ops); | 1904 | ret = __unregister_ftrace_function(ops); |
1403 | ftrace_shutdown(); | 1905 | ftrace_shutdown(0); |
1404 | mutex_unlock(&ftrace_sysctl_lock); | 1906 | mutex_unlock(&ftrace_sysctl_lock); |
1405 | 1907 | ||
1406 | return ret; | 1908 | return ret; |
@@ -1449,3 +1951,153 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1449 | return ret; | 1951 | return ret; |
1450 | } | 1952 | } |
1451 | 1953 | ||
1954 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1955 | |||
1956 | static atomic_t ftrace_graph_active; | ||
1957 | |||
1958 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | ||
1959 | { | ||
1960 | return 0; | ||
1961 | } | ||
1962 | |||
1963 | /* The callbacks that hook a function */ | ||
1964 | trace_func_graph_ret_t ftrace_graph_return = | ||
1965 | (trace_func_graph_ret_t)ftrace_stub; | ||
1966 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; | ||
1967 | |||
1968 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | ||
1969 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | ||
1970 | { | ||
1971 | int i; | ||
1972 | int ret = 0; | ||
1973 | unsigned long flags; | ||
1974 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; | ||
1975 | struct task_struct *g, *t; | ||
1976 | |||
1977 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { | ||
1978 | ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH | ||
1979 | * sizeof(struct ftrace_ret_stack), | ||
1980 | GFP_KERNEL); | ||
1981 | if (!ret_stack_list[i]) { | ||
1982 | start = 0; | ||
1983 | end = i; | ||
1984 | ret = -ENOMEM; | ||
1985 | goto free; | ||
1986 | } | ||
1987 | } | ||
1988 | |||
1989 | read_lock_irqsave(&tasklist_lock, flags); | ||
1990 | do_each_thread(g, t) { | ||
1991 | if (start == end) { | ||
1992 | ret = -EAGAIN; | ||
1993 | goto unlock; | ||
1994 | } | ||
1995 | |||
1996 | if (t->ret_stack == NULL) { | ||
1997 | t->curr_ret_stack = -1; | ||
1998 | /* Make sure IRQs see the -1 first: */ | ||
1999 | barrier(); | ||
2000 | t->ret_stack = ret_stack_list[start++]; | ||
2001 | atomic_set(&t->tracing_graph_pause, 0); | ||
2002 | atomic_set(&t->trace_overrun, 0); | ||
2003 | } | ||
2004 | } while_each_thread(g, t); | ||
2005 | |||
2006 | unlock: | ||
2007 | read_unlock_irqrestore(&tasklist_lock, flags); | ||
2008 | free: | ||
2009 | for (i = start; i < end; i++) | ||
2010 | kfree(ret_stack_list[i]); | ||
2011 | return ret; | ||
2012 | } | ||
2013 | |||
2014 | /* Allocate a return stack for each task */ | ||
2015 | static int start_graph_tracing(void) | ||
2016 | { | ||
2017 | struct ftrace_ret_stack **ret_stack_list; | ||
2018 | int ret; | ||
2019 | |||
2020 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | ||
2021 | sizeof(struct ftrace_ret_stack *), | ||
2022 | GFP_KERNEL); | ||
2023 | |||
2024 | if (!ret_stack_list) | ||
2025 | return -ENOMEM; | ||
2026 | |||
2027 | do { | ||
2028 | ret = alloc_retstack_tasklist(ret_stack_list); | ||
2029 | } while (ret == -EAGAIN); | ||
2030 | |||
2031 | kfree(ret_stack_list); | ||
2032 | return ret; | ||
2033 | } | ||
2034 | |||
2035 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | ||
2036 | trace_func_graph_ent_t entryfunc) | ||
2037 | { | ||
2038 | int ret = 0; | ||
2039 | |||
2040 | mutex_lock(&ftrace_sysctl_lock); | ||
2041 | |||
2042 | atomic_inc(&ftrace_graph_active); | ||
2043 | ret = start_graph_tracing(); | ||
2044 | if (ret) { | ||
2045 | atomic_dec(&ftrace_graph_active); | ||
2046 | goto out; | ||
2047 | } | ||
2048 | |||
2049 | ftrace_graph_return = retfunc; | ||
2050 | ftrace_graph_entry = entryfunc; | ||
2051 | |||
2052 | ftrace_startup(FTRACE_START_FUNC_RET); | ||
2053 | |||
2054 | out: | ||
2055 | mutex_unlock(&ftrace_sysctl_lock); | ||
2056 | return ret; | ||
2057 | } | ||
2058 | |||
2059 | void unregister_ftrace_graph(void) | ||
2060 | { | ||
2061 | mutex_lock(&ftrace_sysctl_lock); | ||
2062 | |||
2063 | atomic_dec(&ftrace_graph_active); | ||
2064 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | ||
2065 | ftrace_graph_entry = ftrace_graph_entry_stub; | ||
2066 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | ||
2067 | |||
2068 | mutex_unlock(&ftrace_sysctl_lock); | ||
2069 | } | ||
2070 | |||
2071 | /* Allocate a return stack for newly created task */ | ||
2072 | void ftrace_graph_init_task(struct task_struct *t) | ||
2073 | { | ||
2074 | if (atomic_read(&ftrace_graph_active)) { | ||
2075 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | ||
2076 | * sizeof(struct ftrace_ret_stack), | ||
2077 | GFP_KERNEL); | ||
2078 | if (!t->ret_stack) | ||
2079 | return; | ||
2080 | t->curr_ret_stack = -1; | ||
2081 | atomic_set(&t->tracing_graph_pause, 0); | ||
2082 | atomic_set(&t->trace_overrun, 0); | ||
2083 | } else | ||
2084 | t->ret_stack = NULL; | ||
2085 | } | ||
2086 | |||
2087 | void ftrace_graph_exit_task(struct task_struct *t) | ||
2088 | { | ||
2089 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | ||
2090 | |||
2091 | t->ret_stack = NULL; | ||
2092 | /* NULL must become visible to IRQs before we free it: */ | ||
2093 | barrier(); | ||
2094 | |||
2095 | kfree(ret_stack); | ||
2096 | } | ||
2097 | |||
2098 | void ftrace_graph_stop(void) | ||
2099 | { | ||
2100 | ftrace_stop(); | ||
2101 | } | ||
2102 | #endif | ||
2103 | |||