aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-03-25 20:06:34 -0400
committerSteven Rostedt <srostedt@redhat.com>2009-03-25 21:03:17 -0400
commit318e0a73c9e41b9a17241829bcd0605a39b87cb9 (patch)
tree230bd849953f60d12003fd5b9fecfd151ebb468b /kernel/trace/ftrace.c
parentfb9fb015e92123fa3a8e0c2e2fff491d4a56b470 (diff)
tracing: remove on the fly allocator from function profiler
Impact: safer code The on the fly allocator for the function profiler was to save memory. But at the expense of stability. Although it survived several tests, allocating from the function tracer is just too risky, just to save space. This patch removes the allocator and simply allocates enough entries at start up. Each function gets a profiling structure of 40 bytes. With an average of 20K functions, and this is for each CPU, we have 800K per online CPU. This is not too bad, at least for non-embedded. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c76
1 files changed, 43 insertions, 33 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a141d8499ab0..4d90c916b2bb 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -401,6 +401,8 @@ static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
401int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 401int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
402{ 402{
403 struct ftrace_profile_page *pg; 403 struct ftrace_profile_page *pg;
404 int functions;
405 int pages;
404 int i; 406 int i;
405 407
406 /* If we already allocated, do nothing */ 408 /* If we already allocated, do nothing */
@@ -411,22 +413,46 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
411 if (!stat->pages) 413 if (!stat->pages)
412 return -ENOMEM; 414 return -ENOMEM;
413 415
416#ifdef CONFIG_DYNAMIC_FTRACE
417 functions = ftrace_update_tot_cnt;
418#else
419 /*
420 * We do not know the number of functions that exist because
421 * dynamic tracing is what counts them. With past experience
422 * we have around 20K functions. That should be more than enough.
423 * It is highly unlikely we will execute every function in
424 * the kernel.
425 */
426 functions = 20000;
427#endif
428
414 pg = stat->start = stat->pages; 429 pg = stat->start = stat->pages;
415 430
416 /* allocate 10 more pages to start */ 431 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
417 for (i = 0; i < 10; i++) { 432
433 for (i = 0; i < pages; i++) {
418 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 434 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
419 /*
420 * We only care about allocating profile_pages, if
421 * we failed to allocate here, hopefully we will allocate
422 * later.
423 */
424 if (!pg->next) 435 if (!pg->next)
425 break; 436 goto out_free;
426 pg = pg->next; 437 pg = pg->next;
427 } 438 }
428 439
429 return 0; 440 return 0;
441
442 out_free:
443 pg = stat->start;
444 while (pg) {
445 unsigned long tmp = (unsigned long)pg;
446
447 pg = pg->next;
448 free_page(tmp);
449 }
450
451 free_page((unsigned long)stat->pages);
452 stat->pages = NULL;
453 stat->start = NULL;
454
455 return -ENOMEM;
430} 456}
431 457
432static int ftrace_profile_init_cpu(int cpu) 458static int ftrace_profile_init_cpu(int cpu)
@@ -460,7 +486,7 @@ static int ftrace_profile_init_cpu(int cpu)
460 ftrace_profile_bits++; 486 ftrace_profile_bits++;
461 } 487 }
462 488
463 /* Preallocate a few pages */ 489 /* Preallocate the function profiling pages */
464 if (ftrace_profile_pages_init(stat) < 0) { 490 if (ftrace_profile_pages_init(stat) < 0) {
465 kfree(stat->hash); 491 kfree(stat->hash);
466 stat->hash = NULL; 492 stat->hash = NULL;
@@ -516,24 +542,21 @@ static void ftrace_add_profile(struct ftrace_profile_stat *stat,
516 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 542 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
517} 543}
518 544
519/* Interrupts must be disabled calling this */ 545/*
546 * The memory is already allocated, this simply finds a new record to use.
547 */
520static struct ftrace_profile * 548static struct ftrace_profile *
521ftrace_profile_alloc(struct ftrace_profile_stat *stat, 549ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
522 unsigned long ip, bool alloc_safe)
523{ 550{
524 struct ftrace_profile *rec = NULL; 551 struct ftrace_profile *rec = NULL;
525 552
526 /* prevent recursion */ 553 /* prevent recursion (from NMIs) */
527 if (atomic_inc_return(&stat->disabled) != 1) 554 if (atomic_inc_return(&stat->disabled) != 1)
528 goto out; 555 goto out;
529 556
530 /* Try to always keep another page available */
531 if (!stat->pages->next && alloc_safe)
532 stat->pages->next = (void *)get_zeroed_page(GFP_ATOMIC);
533
534 /* 557 /*
535 * Try to find the function again since another 558 * Try to find the function again since an NMI
536 * task on another CPU could have added it 559 * could have added it
537 */ 560 */
538 rec = ftrace_find_profiled_func(stat, ip); 561 rec = ftrace_find_profiled_func(stat, ip);
539 if (rec) 562 if (rec)
@@ -555,29 +578,16 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat,
555 return rec; 578 return rec;
556} 579}
557 580
558/*
559 * If we are not in an interrupt, or softirq and
560 * and interrupts are disabled and preemption is not enabled
561 * (not in a spinlock) then it should be safe to allocate memory.
562 */
563static bool ftrace_safe_to_allocate(void)
564{
565 return !in_interrupt() && irqs_disabled() && !preempt_count();
566}
567
568static void 581static void
569function_profile_call(unsigned long ip, unsigned long parent_ip) 582function_profile_call(unsigned long ip, unsigned long parent_ip)
570{ 583{
571 struct ftrace_profile_stat *stat; 584 struct ftrace_profile_stat *stat;
572 struct ftrace_profile *rec; 585 struct ftrace_profile *rec;
573 unsigned long flags; 586 unsigned long flags;
574 bool alloc_safe;
575 587
576 if (!ftrace_profile_enabled) 588 if (!ftrace_profile_enabled)
577 return; 589 return;
578 590
579 alloc_safe = ftrace_safe_to_allocate();
580
581 local_irq_save(flags); 591 local_irq_save(flags);
582 592
583 stat = &__get_cpu_var(ftrace_profile_stats); 593 stat = &__get_cpu_var(ftrace_profile_stats);
@@ -586,7 +596,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
586 596
587 rec = ftrace_find_profiled_func(stat, ip); 597 rec = ftrace_find_profiled_func(stat, ip);
588 if (!rec) { 598 if (!rec) {
589 rec = ftrace_profile_alloc(stat, ip, alloc_safe); 599 rec = ftrace_profile_alloc(stat, ip);
590 if (!rec) 600 if (!rec)
591 goto out; 601 goto out;
592 } 602 }