aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c997
1 files changed, 822 insertions, 175 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4a39d24568c8..a12f80efceaa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,6 +47,13 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* set when tracing only a pid */
51struct pid *ftrace_pid_trace;
52static struct pid * const ftrace_swapper_pid = &init_struct_pid;
53
54/* Quick disabling of function tracer. */
55int function_trace_stop;
56
50/* 57/*
51 * ftrace_disabled is set when an anomaly is discovered. 58 * ftrace_disabled is set when an anomaly is discovered.
52 * ftrace_disabled is much stronger than ftrace_enabled. 59 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -55,6 +62,7 @@ static int ftrace_disabled __read_mostly;
55 62
56static DEFINE_SPINLOCK(ftrace_lock); 63static DEFINE_SPINLOCK(ftrace_lock);
57static DEFINE_MUTEX(ftrace_sysctl_lock); 64static DEFINE_MUTEX(ftrace_sysctl_lock);
65static DEFINE_MUTEX(ftrace_start_lock);
58 66
59static struct ftrace_ops ftrace_list_end __read_mostly = 67static struct ftrace_ops ftrace_list_end __read_mostly =
60{ 68{
@@ -63,6 +71,8 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
63 71
64static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
65ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
66 76
67static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
68{ 78{
@@ -79,6 +89,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 }; 89 };
80} 90}
81 91
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
94 if (!test_tsk_trace_trace(current))
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
82/** 107/**
83 * clear_ftrace_function - reset the ftrace function 108 * clear_ftrace_function - reset the ftrace function
84 * 109 *
@@ -88,8 +113,24 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88void clear_ftrace_function(void) 113void clear_ftrace_function(void)
89{ 114{
90 ftrace_trace_function = ftrace_stub; 115 ftrace_trace_function = ftrace_stub;
116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
91} 118}
92 119
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
131}
132#endif
133
93static int __register_ftrace_function(struct ftrace_ops *ops) 134static int __register_ftrace_function(struct ftrace_ops *ops)
94{ 135{
95 /* should not be called from interrupt context */ 136 /* should not be called from interrupt context */
@@ -106,14 +147,28 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
106 ftrace_list = ops; 147 ftrace_list = ops;
107 148
108 if (ftrace_enabled) { 149 if (ftrace_enabled) {
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
157 if (ftrace_pid_trace) {
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
109 /* 162 /*
110 * For one func, simply call it directly. 163 * For one func, simply call it directly.
111 * For more than one func, call the chain. 164 * For more than one func, call the chain.
112 */ 165 */
113 if (ops->next == &ftrace_list_end) 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
114 ftrace_trace_function = ops->func; 167 ftrace_trace_function = func;
115 else 168#else
116 ftrace_trace_function = ftrace_list_func; 169 __ftrace_trace_function = func;
170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
117 } 172 }
118 173
119 spin_unlock(&ftrace_lock); 174 spin_unlock(&ftrace_lock);
@@ -152,9 +207,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
152 207
153 if (ftrace_enabled) { 208 if (ftrace_enabled) {
154 /* If we only have one func left, then call that directly */ 209 /* If we only have one func left, then call that directly */
155 if (ftrace_list == &ftrace_list_end || 210 if (ftrace_list->next == &ftrace_list_end) {
156 ftrace_list->next == &ftrace_list_end) 211 ftrace_func_t func = ftrace_list->func;
157 ftrace_trace_function = ftrace_list->func; 212
213 if (ftrace_pid_trace) {
214 set_ftrace_pid_function(func);
215 func = ftrace_pid_func;
216 }
217#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function = func;
219#else
220 __ftrace_trace_function = func;
221#endif
222 }
158 } 223 }
159 224
160 out: 225 out:
@@ -163,6 +228,36 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
163 return ret; 228 return ret;
164} 229}
165 230
231static void ftrace_update_pid_func(void)
232{
233 ftrace_func_t func;
234
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock);
237
238 if (ftrace_trace_function == ftrace_stub)
239 goto out;
240
241 func = ftrace_trace_function;
242
243 if (ftrace_pid_trace) {
244 set_ftrace_pid_function(func);
245 func = ftrace_pid_func;
246 } else {
247 if (func == ftrace_pid_func)
248 func = ftrace_pid_function;
249 }
250
251#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function = func;
253#else
254 __ftrace_trace_function = func;
255#endif
256
257 out:
258 spin_unlock(&ftrace_lock);
259}
260
166#ifdef CONFIG_DYNAMIC_FTRACE 261#ifdef CONFIG_DYNAMIC_FTRACE
167#ifndef CONFIG_FTRACE_MCOUNT_RECORD 262#ifndef CONFIG_FTRACE_MCOUNT_RECORD
168# error Dynamic ftrace depends on MCOUNT_RECORD 263# error Dynamic ftrace depends on MCOUNT_RECORD
@@ -182,10 +277,11 @@ enum {
182 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 277 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
183 FTRACE_ENABLE_MCOUNT = (1 << 3), 278 FTRACE_ENABLE_MCOUNT = (1 << 3),
184 FTRACE_DISABLE_MCOUNT = (1 << 4), 279 FTRACE_DISABLE_MCOUNT = (1 << 4),
280 FTRACE_START_FUNC_RET = (1 << 5),
281 FTRACE_STOP_FUNC_RET = (1 << 6),
185}; 282};
186 283
187static int ftrace_filtered; 284static int ftrace_filtered;
188static int tracing_on;
189 285
190static LIST_HEAD(ftrace_new_addrs); 286static LIST_HEAD(ftrace_new_addrs);
191 287
@@ -309,7 +405,7 @@ ftrace_record_ip(unsigned long ip)
309{ 405{
310 struct dyn_ftrace *rec; 406 struct dyn_ftrace *rec;
311 407
312 if (!ftrace_enabled || ftrace_disabled) 408 if (ftrace_disabled)
313 return NULL; 409 return NULL;
314 410
315 rec = ftrace_alloc_dyn_node(ip); 411 rec = ftrace_alloc_dyn_node(ip);
@@ -323,107 +419,131 @@ ftrace_record_ip(unsigned long ip)
323 return rec; 419 return rec;
324} 420}
325 421
326#define FTRACE_ADDR ((long)(ftrace_caller)) 422static void print_ip_ins(const char *fmt, unsigned char *p)
423{
424 int i;
425
426 printk(KERN_CONT "%s", fmt);
427
428 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
429 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
430}
431
432static void ftrace_bug(int failed, unsigned long ip)
433{
434 switch (failed) {
435 case -EFAULT:
436 FTRACE_WARN_ON_ONCE(1);
437 pr_info("ftrace faulted on modifying ");
438 print_ip_sym(ip);
439 break;
440 case -EINVAL:
441 FTRACE_WARN_ON_ONCE(1);
442 pr_info("ftrace failed to modify ");
443 print_ip_sym(ip);
444 print_ip_ins(" actual: ", (unsigned char *)ip);
445 printk(KERN_CONT "\n");
446 break;
447 case -EPERM:
448 FTRACE_WARN_ON_ONCE(1);
449 pr_info("ftrace faulted on writing ");
450 print_ip_sym(ip);
451 break;
452 default:
453 FTRACE_WARN_ON_ONCE(1);
454 pr_info("ftrace faulted on unknown error ");
455 print_ip_sym(ip);
456 }
457}
458
327 459
328static int 460static int
329__ftrace_replace_code(struct dyn_ftrace *rec, 461__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
330 unsigned char *old, unsigned char *new, int enable)
331{ 462{
332 unsigned long ip, fl; 463 unsigned long ip, fl;
464 unsigned long ftrace_addr;
465
466 ftrace_addr = (unsigned long)ftrace_caller;
333 467
334 ip = rec->ip; 468 ip = rec->ip;
335 469
336 if (ftrace_filtered && enable) { 470 /*
471 * If this record is not to be traced and
472 * it is not enabled then do nothing.
473 *
474 * If this record is not to be traced and
475 * it is enabled then disabled it.
476 *
477 */
478 if (rec->flags & FTRACE_FL_NOTRACE) {
479 if (rec->flags & FTRACE_FL_ENABLED)
480 rec->flags &= ~FTRACE_FL_ENABLED;
481 else
482 return 0;
483
484 } else if (ftrace_filtered && enable) {
337 /* 485 /*
338 * If filtering is on: 486 * Filtering is on:
339 *
340 * If this record is set to be filtered and
341 * is enabled then do nothing.
342 *
343 * If this record is set to be filtered and
344 * it is not enabled, enable it.
345 *
346 * If this record is not set to be filtered
347 * and it is not enabled do nothing.
348 *
349 * If this record is set not to trace then
350 * do nothing.
351 *
352 * If this record is set not to trace and
353 * it is enabled then disable it.
354 *
355 * If this record is not set to be filtered and
356 * it is enabled, disable it.
357 */ 487 */
358 488
359 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | 489 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
360 FTRACE_FL_ENABLED);
361 490
362 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || 491 /* Record is filtered and enabled, do nothing */
363 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || 492 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
364 !fl || (fl == FTRACE_FL_NOTRACE))
365 return 0; 493 return 0;
366 494
367 /* 495 /* Record is not filtered and is not enabled do nothing */
368 * If it is enabled disable it, 496 if (!fl)
369 * otherwise enable it! 497 return 0;
370 */ 498
371 if (fl & FTRACE_FL_ENABLED) { 499 /* Record is not filtered but enabled, disable it */
372 /* swap new and old */ 500 if (fl == FTRACE_FL_ENABLED)
373 new = old;
374 old = ftrace_call_replace(ip, FTRACE_ADDR);
375 rec->flags &= ~FTRACE_FL_ENABLED; 501 rec->flags &= ~FTRACE_FL_ENABLED;
376 } else { 502 else
377 new = ftrace_call_replace(ip, FTRACE_ADDR); 503 /* Otherwise record is filtered but not enabled, enable it */
378 rec->flags |= FTRACE_FL_ENABLED; 504 rec->flags |= FTRACE_FL_ENABLED;
379 }
380 } else { 505 } else {
506 /* Disable or not filtered */
381 507
382 if (enable) { 508 if (enable) {
383 /* 509 /* if record is enabled, do nothing */
384 * If this record is set not to trace and is
385 * not enabled, do nothing.
386 */
387 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
388 if (fl == FTRACE_FL_NOTRACE)
389 return 0;
390
391 new = ftrace_call_replace(ip, FTRACE_ADDR);
392 } else
393 old = ftrace_call_replace(ip, FTRACE_ADDR);
394
395 if (enable) {
396 if (rec->flags & FTRACE_FL_ENABLED) 510 if (rec->flags & FTRACE_FL_ENABLED)
397 return 0; 511 return 0;
512
398 rec->flags |= FTRACE_FL_ENABLED; 513 rec->flags |= FTRACE_FL_ENABLED;
514
399 } else { 515 } else {
516
517 /* if record is not enabled do nothing */
400 if (!(rec->flags & FTRACE_FL_ENABLED)) 518 if (!(rec->flags & FTRACE_FL_ENABLED))
401 return 0; 519 return 0;
520
402 rec->flags &= ~FTRACE_FL_ENABLED; 521 rec->flags &= ~FTRACE_FL_ENABLED;
403 } 522 }
404 } 523 }
405 524
406 return ftrace_modify_code(ip, old, new); 525 if (rec->flags & FTRACE_FL_ENABLED)
526 return ftrace_make_call(rec, ftrace_addr);
527 else
528 return ftrace_make_nop(NULL, rec, ftrace_addr);
407} 529}
408 530
409static void ftrace_replace_code(int enable) 531static void ftrace_replace_code(int enable)
410{ 532{
411 int i, failed; 533 int i, failed;
412 unsigned char *new = NULL, *old = NULL;
413 struct dyn_ftrace *rec; 534 struct dyn_ftrace *rec;
414 struct ftrace_page *pg; 535 struct ftrace_page *pg;
415 536
416 if (enable)
417 old = ftrace_nop_replace();
418 else
419 new = ftrace_nop_replace();
420
421 for (pg = ftrace_pages_start; pg; pg = pg->next) { 537 for (pg = ftrace_pages_start; pg; pg = pg->next) {
422 for (i = 0; i < pg->index; i++) { 538 for (i = 0; i < pg->index; i++) {
423 rec = &pg->records[i]; 539 rec = &pg->records[i];
424 540
425 /* don't modify code that has already faulted */ 541 /*
426 if (rec->flags & FTRACE_FL_FAILED) 542 * Skip over free records and records that have
543 * failed.
544 */
545 if (rec->flags & FTRACE_FL_FREE ||
546 rec->flags & FTRACE_FL_FAILED)
427 continue; 547 continue;
428 548
429 /* ignore updates to this record's mcount site */ 549 /* ignore updates to this record's mcount site */
@@ -434,68 +554,30 @@ static void ftrace_replace_code(int enable)
434 unfreeze_record(rec); 554 unfreeze_record(rec);
435 } 555 }
436 556
437 failed = __ftrace_replace_code(rec, old, new, enable); 557 failed = __ftrace_replace_code(rec, enable);
438 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 558 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
439 rec->flags |= FTRACE_FL_FAILED; 559 rec->flags |= FTRACE_FL_FAILED;
440 if ((system_state == SYSTEM_BOOTING) || 560 if ((system_state == SYSTEM_BOOTING) ||
441 !core_kernel_text(rec->ip)) { 561 !core_kernel_text(rec->ip)) {
442 ftrace_free_rec(rec); 562 ftrace_free_rec(rec);
443 } 563 } else
564 ftrace_bug(failed, rec->ip);
444 } 565 }
445 } 566 }
446 } 567 }
447} 568}
448 569
449static void print_ip_ins(const char *fmt, unsigned char *p)
450{
451 int i;
452
453 printk(KERN_CONT "%s", fmt);
454
455 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
456 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
457}
458
459static int 570static int
460ftrace_code_disable(struct dyn_ftrace *rec) 571ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
461{ 572{
462 unsigned long ip; 573 unsigned long ip;
463 unsigned char *nop, *call;
464 int ret; 574 int ret;
465 575
466 ip = rec->ip; 576 ip = rec->ip;
467 577
468 nop = ftrace_nop_replace(); 578 ret = ftrace_make_nop(mod, rec, mcount_addr);
469 call = ftrace_call_replace(ip, mcount_addr);
470
471 ret = ftrace_modify_code(ip, call, nop);
472 if (ret) { 579 if (ret) {
473 switch (ret) { 580 ftrace_bug(ret, ip);
474 case -EFAULT:
475 FTRACE_WARN_ON_ONCE(1);
476 pr_info("ftrace faulted on modifying ");
477 print_ip_sym(ip);
478 break;
479 case -EINVAL:
480 FTRACE_WARN_ON_ONCE(1);
481 pr_info("ftrace failed to modify ");
482 print_ip_sym(ip);
483 print_ip_ins(" expected: ", call);
484 print_ip_ins(" actual: ", (unsigned char *)ip);
485 print_ip_ins(" replace: ", nop);
486 printk(KERN_CONT "\n");
487 break;
488 case -EPERM:
489 FTRACE_WARN_ON_ONCE(1);
490 pr_info("ftrace faulted on writing ");
491 print_ip_sym(ip);
492 break;
493 default:
494 FTRACE_WARN_ON_ONCE(1);
495 pr_info("ftrace faulted on unknown error ");
496 print_ip_sym(ip);
497 }
498
499 rec->flags |= FTRACE_FL_FAILED; 581 rec->flags |= FTRACE_FL_FAILED;
500 return 0; 582 return 0;
501 } 583 }
@@ -506,17 +588,19 @@ static int __ftrace_modify_code(void *data)
506{ 588{
507 int *command = data; 589 int *command = data;
508 590
509 if (*command & FTRACE_ENABLE_CALLS) { 591 if (*command & FTRACE_ENABLE_CALLS)
510 ftrace_replace_code(1); 592 ftrace_replace_code(1);
511 tracing_on = 1; 593 else if (*command & FTRACE_DISABLE_CALLS)
512 } else if (*command & FTRACE_DISABLE_CALLS) {
513 ftrace_replace_code(0); 594 ftrace_replace_code(0);
514 tracing_on = 0;
515 }
516 595
517 if (*command & FTRACE_UPDATE_TRACE_FUNC) 596 if (*command & FTRACE_UPDATE_TRACE_FUNC)
518 ftrace_update_ftrace_func(ftrace_trace_function); 597 ftrace_update_ftrace_func(ftrace_trace_function);
519 598
599 if (*command & FTRACE_START_FUNC_RET)
600 ftrace_enable_ftrace_graph_caller();
601 else if (*command & FTRACE_STOP_FUNC_RET)
602 ftrace_disable_ftrace_graph_caller();
603
520 return 0; 604 return 0;
521} 605}
522 606
@@ -526,44 +610,43 @@ static void ftrace_run_update_code(int command)
526} 610}
527 611
528static ftrace_func_t saved_ftrace_func; 612static ftrace_func_t saved_ftrace_func;
529static int ftrace_start; 613static int ftrace_start_up;
530static DEFINE_MUTEX(ftrace_start_lock);
531 614
532static void ftrace_startup(void) 615static void ftrace_startup_enable(int command)
533{ 616{
534 int command = 0;
535
536 if (unlikely(ftrace_disabled))
537 return;
538
539 mutex_lock(&ftrace_start_lock);
540 ftrace_start++;
541 if (ftrace_start == 1)
542 command |= FTRACE_ENABLE_CALLS;
543
544 if (saved_ftrace_func != ftrace_trace_function) { 617 if (saved_ftrace_func != ftrace_trace_function) {
545 saved_ftrace_func = ftrace_trace_function; 618 saved_ftrace_func = ftrace_trace_function;
546 command |= FTRACE_UPDATE_TRACE_FUNC; 619 command |= FTRACE_UPDATE_TRACE_FUNC;
547 } 620 }
548 621
549 if (!command || !ftrace_enabled) 622 if (!command || !ftrace_enabled)
550 goto out; 623 return;
551 624
552 ftrace_run_update_code(command); 625 ftrace_run_update_code(command);
553 out:
554 mutex_unlock(&ftrace_start_lock);
555} 626}
556 627
557static void ftrace_shutdown(void) 628static void ftrace_startup(int command)
558{ 629{
559 int command = 0; 630 if (unlikely(ftrace_disabled))
631 return;
632
633 mutex_lock(&ftrace_start_lock);
634 ftrace_start_up++;
635 command |= FTRACE_ENABLE_CALLS;
636
637 ftrace_startup_enable(command);
560 638
639 mutex_unlock(&ftrace_start_lock);
640}
641
642static void ftrace_shutdown(int command)
643{
561 if (unlikely(ftrace_disabled)) 644 if (unlikely(ftrace_disabled))
562 return; 645 return;
563 646
564 mutex_lock(&ftrace_start_lock); 647 mutex_lock(&ftrace_start_lock);
565 ftrace_start--; 648 ftrace_start_up--;
566 if (!ftrace_start) 649 if (!ftrace_start_up)
567 command |= FTRACE_DISABLE_CALLS; 650 command |= FTRACE_DISABLE_CALLS;
568 651
569 if (saved_ftrace_func != ftrace_trace_function) { 652 if (saved_ftrace_func != ftrace_trace_function) {
@@ -589,8 +672,8 @@ static void ftrace_startup_sysctl(void)
589 mutex_lock(&ftrace_start_lock); 672 mutex_lock(&ftrace_start_lock);
590 /* Force update next time */ 673 /* Force update next time */
591 saved_ftrace_func = NULL; 674 saved_ftrace_func = NULL;
592 /* ftrace_start is true if we want ftrace running */ 675 /* ftrace_start_up is true if we want ftrace running */
593 if (ftrace_start) 676 if (ftrace_start_up)
594 command |= FTRACE_ENABLE_CALLS; 677 command |= FTRACE_ENABLE_CALLS;
595 678
596 ftrace_run_update_code(command); 679 ftrace_run_update_code(command);
@@ -605,8 +688,8 @@ static void ftrace_shutdown_sysctl(void)
605 return; 688 return;
606 689
607 mutex_lock(&ftrace_start_lock); 690 mutex_lock(&ftrace_start_lock);
608 /* ftrace_start is true if ftrace is running */ 691 /* ftrace_start_up is true if ftrace is running */
609 if (ftrace_start) 692 if (ftrace_start_up)
610 command |= FTRACE_DISABLE_CALLS; 693 command |= FTRACE_DISABLE_CALLS;
611 694
612 ftrace_run_update_code(command); 695 ftrace_run_update_code(command);
@@ -617,7 +700,7 @@ static cycle_t ftrace_update_time;
617static unsigned long ftrace_update_cnt; 700static unsigned long ftrace_update_cnt;
618unsigned long ftrace_update_tot_cnt; 701unsigned long ftrace_update_tot_cnt;
619 702
620static int ftrace_update_code(void) 703static int ftrace_update_code(struct module *mod)
621{ 704{
622 struct dyn_ftrace *p, *t; 705 struct dyn_ftrace *p, *t;
623 cycle_t start, stop; 706 cycle_t start, stop;
@@ -634,7 +717,7 @@ static int ftrace_update_code(void)
634 list_del_init(&p->list); 717 list_del_init(&p->list);
635 718
636 /* convert record (i.e, patch mcount-call with NOP) */ 719 /* convert record (i.e, patch mcount-call with NOP) */
637 if (ftrace_code_disable(p)) { 720 if (ftrace_code_disable(mod, p)) {
638 p->flags |= FTRACE_FL_CONVERTED; 721 p->flags |= FTRACE_FL_CONVERTED;
639 ftrace_update_cnt++; 722 ftrace_update_cnt++;
640 } else 723 } else
@@ -677,7 +760,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
677 760
678 cnt = num_to_init / ENTRIES_PER_PAGE; 761 cnt = num_to_init / ENTRIES_PER_PAGE;
679 pr_info("ftrace: allocating %ld entries in %d pages\n", 762 pr_info("ftrace: allocating %ld entries in %d pages\n",
680 num_to_init, cnt); 763 num_to_init, cnt + 1);
681 764
682 for (i = 0; i < cnt; i++) { 765 for (i = 0; i < cnt; i++) {
683 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 766 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -702,7 +785,6 @@ enum {
702#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 785#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
703 786
704struct ftrace_iterator { 787struct ftrace_iterator {
705 loff_t pos;
706 struct ftrace_page *pg; 788 struct ftrace_page *pg;
707 unsigned idx; 789 unsigned idx;
708 unsigned flags; 790 unsigned flags;
@@ -727,6 +809,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
727 iter->pg = iter->pg->next; 809 iter->pg = iter->pg->next;
728 iter->idx = 0; 810 iter->idx = 0;
729 goto retry; 811 goto retry;
812 } else {
813 iter->idx = -1;
730 } 814 }
731 } else { 815 } else {
732 rec = &iter->pg->records[iter->idx++]; 816 rec = &iter->pg->records[iter->idx++];
@@ -738,6 +822,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
738 ((iter->flags & FTRACE_ITER_FAILURES) && 822 ((iter->flags & FTRACE_ITER_FAILURES) &&
739 !(rec->flags & FTRACE_FL_FAILED)) || 823 !(rec->flags & FTRACE_FL_FAILED)) ||
740 824
825 ((iter->flags & FTRACE_ITER_FILTER) &&
826 !(rec->flags & FTRACE_FL_FILTER)) ||
827
741 ((iter->flags & FTRACE_ITER_NOTRACE) && 828 ((iter->flags & FTRACE_ITER_NOTRACE) &&
742 !(rec->flags & FTRACE_FL_NOTRACE))) { 829 !(rec->flags & FTRACE_FL_NOTRACE))) {
743 rec = NULL; 830 rec = NULL;
@@ -746,8 +833,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
746 } 833 }
747 spin_unlock(&ftrace_lock); 834 spin_unlock(&ftrace_lock);
748 835
749 iter->pos = *pos;
750
751 return rec; 836 return rec;
752} 837}
753 838
@@ -755,16 +840,16 @@ static void *t_start(struct seq_file *m, loff_t *pos)
755{ 840{
756 struct ftrace_iterator *iter = m->private; 841 struct ftrace_iterator *iter = m->private;
757 void *p = NULL; 842 void *p = NULL;
758 loff_t l = -1;
759 843
760 if (*pos != iter->pos) { 844 if (*pos > 0) {
761 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 845 if (iter->idx < 0)
762 ; 846 return p;
763 } else { 847 (*pos)--;
764 l = *pos; 848 iter->idx--;
765 p = t_next(m, p, &l);
766 } 849 }
767 850
851 p = t_next(m, p, pos);
852
768 return p; 853 return p;
769} 854}
770 855
@@ -808,7 +893,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
808 return -ENOMEM; 893 return -ENOMEM;
809 894
810 iter->pg = ftrace_pages_start; 895 iter->pg = ftrace_pages_start;
811 iter->pos = -1;
812 896
813 ret = seq_open(file, &show_ftrace_seq_ops); 897 ret = seq_open(file, &show_ftrace_seq_ops);
814 if (!ret) { 898 if (!ret) {
@@ -895,7 +979,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
895 979
896 if (file->f_mode & FMODE_READ) { 980 if (file->f_mode & FMODE_READ) {
897 iter->pg = ftrace_pages_start; 981 iter->pg = ftrace_pages_start;
898 iter->pos = -1;
899 iter->flags = enable ? FTRACE_ITER_FILTER : 982 iter->flags = enable ? FTRACE_ITER_FILTER :
900 FTRACE_ITER_NOTRACE; 983 FTRACE_ITER_NOTRACE;
901 984
@@ -1186,7 +1269,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1186 1269
1187 mutex_lock(&ftrace_sysctl_lock); 1270 mutex_lock(&ftrace_sysctl_lock);
1188 mutex_lock(&ftrace_start_lock); 1271 mutex_lock(&ftrace_start_lock);
1189 if (iter->filtered && ftrace_start && ftrace_enabled) 1272 if (ftrace_start_up && ftrace_enabled)
1190 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1273 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1191 mutex_unlock(&ftrace_start_lock); 1274 mutex_unlock(&ftrace_start_lock);
1192 mutex_unlock(&ftrace_sysctl_lock); 1275 mutex_unlock(&ftrace_sysctl_lock);
@@ -1238,12 +1321,233 @@ static struct file_operations ftrace_notrace_fops = {
1238 .release = ftrace_notrace_release, 1321 .release = ftrace_notrace_release,
1239}; 1322};
1240 1323
1241static __init int ftrace_init_debugfs(void) 1324#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1325
1326static DEFINE_MUTEX(graph_lock);
1327
1328int ftrace_graph_count;
1329unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1330
1331static void *
1332g_next(struct seq_file *m, void *v, loff_t *pos)
1242{ 1333{
1243 struct dentry *d_tracer; 1334 unsigned long *array = m->private;
1244 struct dentry *entry; 1335 int index = *pos;
1245 1336
1246 d_tracer = tracing_init_dentry(); 1337 (*pos)++;
1338
1339 if (index >= ftrace_graph_count)
1340 return NULL;
1341
1342 return &array[index];
1343}
1344
1345static void *g_start(struct seq_file *m, loff_t *pos)
1346{
1347 void *p = NULL;
1348
1349 mutex_lock(&graph_lock);
1350
1351 p = g_next(m, p, pos);
1352
1353 return p;
1354}
1355
1356static void g_stop(struct seq_file *m, void *p)
1357{
1358 mutex_unlock(&graph_lock);
1359}
1360
1361static int g_show(struct seq_file *m, void *v)
1362{
1363 unsigned long *ptr = v;
1364 char str[KSYM_SYMBOL_LEN];
1365
1366 if (!ptr)
1367 return 0;
1368
1369 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1370
1371 seq_printf(m, "%s\n", str);
1372
1373 return 0;
1374}
1375
1376static struct seq_operations ftrace_graph_seq_ops = {
1377 .start = g_start,
1378 .next = g_next,
1379 .stop = g_stop,
1380 .show = g_show,
1381};
1382
1383static int
1384ftrace_graph_open(struct inode *inode, struct file *file)
1385{
1386 int ret = 0;
1387
1388 if (unlikely(ftrace_disabled))
1389 return -ENODEV;
1390
1391 mutex_lock(&graph_lock);
1392 if ((file->f_mode & FMODE_WRITE) &&
1393 !(file->f_flags & O_APPEND)) {
1394 ftrace_graph_count = 0;
1395 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1396 }
1397
1398 if (file->f_mode & FMODE_READ) {
1399 ret = seq_open(file, &ftrace_graph_seq_ops);
1400 if (!ret) {
1401 struct seq_file *m = file->private_data;
1402 m->private = ftrace_graph_funcs;
1403 }
1404 } else
1405 file->private_data = ftrace_graph_funcs;
1406 mutex_unlock(&graph_lock);
1407
1408 return ret;
1409}
1410
1411static ssize_t
1412ftrace_graph_read(struct file *file, char __user *ubuf,
1413 size_t cnt, loff_t *ppos)
1414{
1415 if (file->f_mode & FMODE_READ)
1416 return seq_read(file, ubuf, cnt, ppos);
1417 else
1418 return -EPERM;
1419}
1420
1421static int
1422ftrace_set_func(unsigned long *array, int idx, char *buffer)
1423{
1424 char str[KSYM_SYMBOL_LEN];
1425 struct dyn_ftrace *rec;
1426 struct ftrace_page *pg;
1427 int found = 0;
1428 int i, j;
1429
1430 if (ftrace_disabled)
1431 return -ENODEV;
1432
1433 /* should not be called from interrupt context */
1434 spin_lock(&ftrace_lock);
1435
1436 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1437 for (i = 0; i < pg->index; i++) {
1438 rec = &pg->records[i];
1439
1440 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1441 continue;
1442
1443 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1444 if (strcmp(str, buffer) == 0) {
1445 found = 1;
1446 for (j = 0; j < idx; j++)
1447 if (array[j] == rec->ip) {
1448 found = 0;
1449 break;
1450 }
1451 if (found)
1452 array[idx] = rec->ip;
1453 break;
1454 }
1455 }
1456 }
1457 spin_unlock(&ftrace_lock);
1458
1459 return found ? 0 : -EINVAL;
1460}
1461
1462static ssize_t
1463ftrace_graph_write(struct file *file, const char __user *ubuf,
1464 size_t cnt, loff_t *ppos)
1465{
1466 unsigned char buffer[FTRACE_BUFF_MAX+1];
1467 unsigned long *array;
1468 size_t read = 0;
1469 ssize_t ret;
1470 int index = 0;
1471 char ch;
1472
1473 if (!cnt || cnt < 0)
1474 return 0;
1475
1476 mutex_lock(&graph_lock);
1477
1478 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1479 ret = -EBUSY;
1480 goto out;
1481 }
1482
1483 if (file->f_mode & FMODE_READ) {
1484 struct seq_file *m = file->private_data;
1485 array = m->private;
1486 } else
1487 array = file->private_data;
1488
1489 ret = get_user(ch, ubuf++);
1490 if (ret)
1491 goto out;
1492 read++;
1493 cnt--;
1494
1495 /* skip white space */
1496 while (cnt && isspace(ch)) {
1497 ret = get_user(ch, ubuf++);
1498 if (ret)
1499 goto out;
1500 read++;
1501 cnt--;
1502 }
1503
1504 if (isspace(ch)) {
1505 *ppos += read;
1506 ret = read;
1507 goto out;
1508 }
1509
1510 while (cnt && !isspace(ch)) {
1511 if (index < FTRACE_BUFF_MAX)
1512 buffer[index++] = ch;
1513 else {
1514 ret = -EINVAL;
1515 goto out;
1516 }
1517 ret = get_user(ch, ubuf++);
1518 if (ret)
1519 goto out;
1520 read++;
1521 cnt--;
1522 }
1523 buffer[index] = 0;
1524
1525 /* we allow only one at a time */
1526 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1527 if (ret)
1528 goto out;
1529
1530 ftrace_graph_count++;
1531
1532 file->f_pos += read;
1533
1534 ret = read;
1535 out:
1536 mutex_unlock(&graph_lock);
1537
1538 return ret;
1539}
1540
1541static const struct file_operations ftrace_graph_fops = {
1542 .open = ftrace_graph_open,
1543 .read = ftrace_graph_read,
1544 .write = ftrace_graph_write,
1545};
1546#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1547
1548static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1549{
1550 struct dentry *entry;
1247 1551
1248 entry = debugfs_create_file("available_filter_functions", 0444, 1552 entry = debugfs_create_file("available_filter_functions", 0444,
1249 d_tracer, NULL, &ftrace_avail_fops); 1553 d_tracer, NULL, &ftrace_avail_fops);
@@ -1268,12 +1572,20 @@ static __init int ftrace_init_debugfs(void)
1268 pr_warning("Could not create debugfs " 1572 pr_warning("Could not create debugfs "
1269 "'set_ftrace_notrace' entry\n"); 1573 "'set_ftrace_notrace' entry\n");
1270 1574
1575#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1576 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1577 NULL,
1578 &ftrace_graph_fops);
1579 if (!entry)
1580 pr_warning("Could not create debugfs "
1581 "'set_graph_function' entry\n");
1582#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1583
1271 return 0; 1584 return 0;
1272} 1585}
1273 1586
1274fs_initcall(ftrace_init_debugfs); 1587static int ftrace_convert_nops(struct module *mod,
1275 1588 unsigned long *start,
1276static int ftrace_convert_nops(unsigned long *start,
1277 unsigned long *end) 1589 unsigned long *end)
1278{ 1590{
1279 unsigned long *p; 1591 unsigned long *p;
@@ -1284,23 +1596,32 @@ static int ftrace_convert_nops(unsigned long *start,
1284 p = start; 1596 p = start;
1285 while (p < end) { 1597 while (p < end) {
1286 addr = ftrace_call_adjust(*p++); 1598 addr = ftrace_call_adjust(*p++);
1599 /*
1600 * Some architecture linkers will pad between
1601 * the different mcount_loc sections of different
1602 * object files to satisfy alignments.
1603 * Skip any NULL pointers.
1604 */
1605 if (!addr)
1606 continue;
1287 ftrace_record_ip(addr); 1607 ftrace_record_ip(addr);
1288 } 1608 }
1289 1609
1290 /* disable interrupts to prevent kstop machine */ 1610 /* disable interrupts to prevent kstop machine */
1291 local_irq_save(flags); 1611 local_irq_save(flags);
1292 ftrace_update_code(); 1612 ftrace_update_code(mod);
1293 local_irq_restore(flags); 1613 local_irq_restore(flags);
1294 mutex_unlock(&ftrace_start_lock); 1614 mutex_unlock(&ftrace_start_lock);
1295 1615
1296 return 0; 1616 return 0;
1297} 1617}
1298 1618
1299void ftrace_init_module(unsigned long *start, unsigned long *end) 1619void ftrace_init_module(struct module *mod,
1620 unsigned long *start, unsigned long *end)
1300{ 1621{
1301 if (ftrace_disabled || start == end) 1622 if (ftrace_disabled || start == end)
1302 return; 1623 return;
1303 ftrace_convert_nops(start, end); 1624 ftrace_convert_nops(mod, start, end);
1304} 1625}
1305 1626
1306extern unsigned long __start_mcount_loc[]; 1627extern unsigned long __start_mcount_loc[];
@@ -1330,7 +1651,8 @@ void __init ftrace_init(void)
1330 1651
1331 last_ftrace_enabled = ftrace_enabled = 1; 1652 last_ftrace_enabled = ftrace_enabled = 1;
1332 1653
1333 ret = ftrace_convert_nops(__start_mcount_loc, 1654 ret = ftrace_convert_nops(NULL,
1655 __start_mcount_loc,
1334 __stop_mcount_loc); 1656 __stop_mcount_loc);
1335 1657
1336 return; 1658 return;
@@ -1347,12 +1669,186 @@ static int __init ftrace_nodyn_init(void)
1347} 1669}
1348device_initcall(ftrace_nodyn_init); 1670device_initcall(ftrace_nodyn_init);
1349 1671
1350# define ftrace_startup() do { } while (0) 1672static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1351# define ftrace_shutdown() do { } while (0) 1673static inline void ftrace_startup_enable(int command) { }
1674/* Keep as macros so we do not need to define the commands */
1675# define ftrace_startup(command) do { } while (0)
1676# define ftrace_shutdown(command) do { } while (0)
1352# define ftrace_startup_sysctl() do { } while (0) 1677# define ftrace_startup_sysctl() do { } while (0)
1353# define ftrace_shutdown_sysctl() do { } while (0) 1678# define ftrace_shutdown_sysctl() do { } while (0)
1354#endif /* CONFIG_DYNAMIC_FTRACE */ 1679#endif /* CONFIG_DYNAMIC_FTRACE */
1355 1680
1681static ssize_t
1682ftrace_pid_read(struct file *file, char __user *ubuf,
1683 size_t cnt, loff_t *ppos)
1684{
1685 char buf[64];
1686 int r;
1687
1688 if (ftrace_pid_trace == ftrace_swapper_pid)
1689 r = sprintf(buf, "swapper tasks\n");
1690 else if (ftrace_pid_trace)
1691 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1692 else
1693 r = sprintf(buf, "no pid\n");
1694
1695 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1696}
1697
1698static void clear_ftrace_swapper(void)
1699{
1700 struct task_struct *p;
1701 int cpu;
1702
1703 get_online_cpus();
1704 for_each_online_cpu(cpu) {
1705 p = idle_task(cpu);
1706 clear_tsk_trace_trace(p);
1707 }
1708 put_online_cpus();
1709}
1710
1711static void set_ftrace_swapper(void)
1712{
1713 struct task_struct *p;
1714 int cpu;
1715
1716 get_online_cpus();
1717 for_each_online_cpu(cpu) {
1718 p = idle_task(cpu);
1719 set_tsk_trace_trace(p);
1720 }
1721 put_online_cpus();
1722}
1723
1724static void clear_ftrace_pid(struct pid *pid)
1725{
1726 struct task_struct *p;
1727
1728 do_each_pid_task(pid, PIDTYPE_PID, p) {
1729 clear_tsk_trace_trace(p);
1730 } while_each_pid_task(pid, PIDTYPE_PID, p);
1731 put_pid(pid);
1732}
1733
1734static void set_ftrace_pid(struct pid *pid)
1735{
1736 struct task_struct *p;
1737
1738 do_each_pid_task(pid, PIDTYPE_PID, p) {
1739 set_tsk_trace_trace(p);
1740 } while_each_pid_task(pid, PIDTYPE_PID, p);
1741}
1742
1743static void clear_ftrace_pid_task(struct pid **pid)
1744{
1745 if (*pid == ftrace_swapper_pid)
1746 clear_ftrace_swapper();
1747 else
1748 clear_ftrace_pid(*pid);
1749
1750 *pid = NULL;
1751}
1752
1753static void set_ftrace_pid_task(struct pid *pid)
1754{
1755 if (pid == ftrace_swapper_pid)
1756 set_ftrace_swapper();
1757 else
1758 set_ftrace_pid(pid);
1759}
1760
1761static ssize_t
1762ftrace_pid_write(struct file *filp, const char __user *ubuf,
1763 size_t cnt, loff_t *ppos)
1764{
1765 struct pid *pid;
1766 char buf[64];
1767 long val;
1768 int ret;
1769
1770 if (cnt >= sizeof(buf))
1771 return -EINVAL;
1772
1773 if (copy_from_user(&buf, ubuf, cnt))
1774 return -EFAULT;
1775
1776 buf[cnt] = 0;
1777
1778 ret = strict_strtol(buf, 10, &val);
1779 if (ret < 0)
1780 return ret;
1781
1782 mutex_lock(&ftrace_start_lock);
1783 if (val < 0) {
1784 /* disable pid tracing */
1785 if (!ftrace_pid_trace)
1786 goto out;
1787
1788 clear_ftrace_pid_task(&ftrace_pid_trace);
1789
1790 } else {
1791 /* swapper task is special */
1792 if (!val) {
1793 pid = ftrace_swapper_pid;
1794 if (pid == ftrace_pid_trace)
1795 goto out;
1796 } else {
1797 pid = find_get_pid(val);
1798
1799 if (pid == ftrace_pid_trace) {
1800 put_pid(pid);
1801 goto out;
1802 }
1803 }
1804
1805 if (ftrace_pid_trace)
1806 clear_ftrace_pid_task(&ftrace_pid_trace);
1807
1808 if (!pid)
1809 goto out;
1810
1811 ftrace_pid_trace = pid;
1812
1813 set_ftrace_pid_task(ftrace_pid_trace);
1814 }
1815
1816 /* update the function call */
1817 ftrace_update_pid_func();
1818 ftrace_startup_enable(0);
1819
1820 out:
1821 mutex_unlock(&ftrace_start_lock);
1822
1823 return cnt;
1824}
1825
1826static struct file_operations ftrace_pid_fops = {
1827 .read = ftrace_pid_read,
1828 .write = ftrace_pid_write,
1829};
1830
1831static __init int ftrace_init_debugfs(void)
1832{
1833 struct dentry *d_tracer;
1834 struct dentry *entry;
1835
1836 d_tracer = tracing_init_dentry();
1837 if (!d_tracer)
1838 return 0;
1839
1840 ftrace_init_dyn_debugfs(d_tracer);
1841
1842 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1843 NULL, &ftrace_pid_fops);
1844 if (!entry)
1845 pr_warning("Could not create debugfs "
1846 "'set_ftrace_pid' entry\n");
1847 return 0;
1848}
1849
1850fs_initcall(ftrace_init_debugfs);
1851
1356/** 1852/**
1357 * ftrace_kill - kill ftrace 1853 * ftrace_kill - kill ftrace
1358 * 1854 *
@@ -1386,10 +1882,11 @@ int register_ftrace_function(struct ftrace_ops *ops)
1386 return -1; 1882 return -1;
1387 1883
1388 mutex_lock(&ftrace_sysctl_lock); 1884 mutex_lock(&ftrace_sysctl_lock);
1885
1389 ret = __register_ftrace_function(ops); 1886 ret = __register_ftrace_function(ops);
1390 ftrace_startup(); 1887 ftrace_startup(0);
1391 mutex_unlock(&ftrace_sysctl_lock);
1392 1888
1889 mutex_unlock(&ftrace_sysctl_lock);
1393 return ret; 1890 return ret;
1394} 1891}
1395 1892
@@ -1405,7 +1902,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1405 1902
1406 mutex_lock(&ftrace_sysctl_lock); 1903 mutex_lock(&ftrace_sysctl_lock);
1407 ret = __unregister_ftrace_function(ops); 1904 ret = __unregister_ftrace_function(ops);
1408 ftrace_shutdown(); 1905 ftrace_shutdown(0);
1409 mutex_unlock(&ftrace_sysctl_lock); 1906 mutex_unlock(&ftrace_sysctl_lock);
1410 1907
1411 return ret; 1908 return ret;
@@ -1454,3 +1951,153 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1454 return ret; 1951 return ret;
1455} 1952}
1456 1953
1954#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1955
1956static atomic_t ftrace_graph_active;
1957
1958int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1959{
1960 return 0;
1961}
1962
1963/* The callbacks that hook a function */
1964trace_func_graph_ret_t ftrace_graph_return =
1965 (trace_func_graph_ret_t)ftrace_stub;
1966trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1967
1968/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1969static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1970{
1971 int i;
1972 int ret = 0;
1973 unsigned long flags;
1974 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1975 struct task_struct *g, *t;
1976
1977 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1978 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1979 * sizeof(struct ftrace_ret_stack),
1980 GFP_KERNEL);
1981 if (!ret_stack_list[i]) {
1982 start = 0;
1983 end = i;
1984 ret = -ENOMEM;
1985 goto free;
1986 }
1987 }
1988
1989 read_lock_irqsave(&tasklist_lock, flags);
1990 do_each_thread(g, t) {
1991 if (start == end) {
1992 ret = -EAGAIN;
1993 goto unlock;
1994 }
1995
1996 if (t->ret_stack == NULL) {
1997 t->curr_ret_stack = -1;
1998 /* Make sure IRQs see the -1 first: */
1999 barrier();
2000 t->ret_stack = ret_stack_list[start++];
2001 atomic_set(&t->tracing_graph_pause, 0);
2002 atomic_set(&t->trace_overrun, 0);
2003 }
2004 } while_each_thread(g, t);
2005
2006unlock:
2007 read_unlock_irqrestore(&tasklist_lock, flags);
2008free:
2009 for (i = start; i < end; i++)
2010 kfree(ret_stack_list[i]);
2011 return ret;
2012}
2013
2014/* Allocate a return stack for each task */
2015static int start_graph_tracing(void)
2016{
2017 struct ftrace_ret_stack **ret_stack_list;
2018 int ret;
2019
2020 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2021 sizeof(struct ftrace_ret_stack *),
2022 GFP_KERNEL);
2023
2024 if (!ret_stack_list)
2025 return -ENOMEM;
2026
2027 do {
2028 ret = alloc_retstack_tasklist(ret_stack_list);
2029 } while (ret == -EAGAIN);
2030
2031 kfree(ret_stack_list);
2032 return ret;
2033}
2034
2035int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2036 trace_func_graph_ent_t entryfunc)
2037{
2038 int ret = 0;
2039
2040 mutex_lock(&ftrace_sysctl_lock);
2041
2042 atomic_inc(&ftrace_graph_active);
2043 ret = start_graph_tracing();
2044 if (ret) {
2045 atomic_dec(&ftrace_graph_active);
2046 goto out;
2047 }
2048
2049 ftrace_graph_return = retfunc;
2050 ftrace_graph_entry = entryfunc;
2051
2052 ftrace_startup(FTRACE_START_FUNC_RET);
2053
2054out:
2055 mutex_unlock(&ftrace_sysctl_lock);
2056 return ret;
2057}
2058
2059void unregister_ftrace_graph(void)
2060{
2061 mutex_lock(&ftrace_sysctl_lock);
2062
2063 atomic_dec(&ftrace_graph_active);
2064 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2065 ftrace_graph_entry = ftrace_graph_entry_stub;
2066 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2067
2068 mutex_unlock(&ftrace_sysctl_lock);
2069}
2070
2071/* Allocate a return stack for newly created task */
2072void ftrace_graph_init_task(struct task_struct *t)
2073{
2074 if (atomic_read(&ftrace_graph_active)) {
2075 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2076 * sizeof(struct ftrace_ret_stack),
2077 GFP_KERNEL);
2078 if (!t->ret_stack)
2079 return;
2080 t->curr_ret_stack = -1;
2081 atomic_set(&t->tracing_graph_pause, 0);
2082 atomic_set(&t->trace_overrun, 0);
2083 } else
2084 t->ret_stack = NULL;
2085}
2086
2087void ftrace_graph_exit_task(struct task_struct *t)
2088{
2089 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2090
2091 t->ret_stack = NULL;
2092 /* NULL must become visible to IRQs before we free it: */
2093 barrier();
2094
2095 kfree(ret_stack);
2096}
2097
2098void ftrace_graph_stop(void)
2099{
2100 ftrace_stop();
2101}
2102#endif
2103