aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_output.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-04-24 12:20:52 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-04-24 23:06:00 -0400
commit060fa5c83e67901ba47ab484cfcdb32737d630ba (patch)
treec83ec27ae1064cfb290904f95cb85b3b0889b2a1 /kernel/trace/trace_output.c
parentb8e65554d80b4c560d201362d0e8fa02109d89fd (diff)
tracing/events: reuse trace event ids after overflow
With modules being able to add trace events, and the max trace event counter is 16 bits (65536) we can overflow the counter easily with a simple while loop adding and removing modules that contain trace events. This patch links together the registered trace events and on overflow searches for available trace event ids. It will still fail if over 65536 events are registered, but considering that a typical kernel only has 22000 functions, 65000 events should be sufficient. Reported-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_output.c')
-rw-r--r--kernel/trace/trace_output.c71
1 files changed, 60 insertions, 11 deletions
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 06997e75114b..5fc51f0f75fc 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -483,6 +483,36 @@ struct trace_event *ftrace_find_event(int type)
483 return NULL; 483 return NULL;
484} 484}
485 485
486static LIST_HEAD(ftrace_event_list);
487
488static int trace_search_list(struct list_head **list)
489{
490 struct trace_event *e;
491 int last = __TRACE_LAST_TYPE;
492
493 if (list_empty(&ftrace_event_list)) {
494 *list = &ftrace_event_list;
495 return last + 1;
496 }
497
498 /*
499 * We used up all possible max events,
500 * lets see if somebody freed one.
501 */
502 list_for_each_entry(e, &ftrace_event_list, list) {
503 if (e->type != last + 1)
504 break;
505 last++;
506 }
507
508 /* Did we used up all 65 thousand events??? */
509 if ((last + 1) > FTRACE_MAX_EVENT)
510 return 0;
511
512 *list = &e->list;
513 return last + 1;
514}
515
486/** 516/**
487 * register_ftrace_event - register output for an event type 517 * register_ftrace_event - register output for an event type
488 * @event: the event type to register 518 * @event: the event type to register
@@ -505,20 +535,40 @@ int register_ftrace_event(struct trace_event *event)
505 535
506 mutex_lock(&trace_event_mutex); 536 mutex_lock(&trace_event_mutex);
507 537
508 if (!event) { 538 if (WARN_ON(!event))
509 ret = next_event_type++;
510 goto out; 539 goto out;
511 }
512 540
513 if (!event->type) 541 INIT_LIST_HEAD(&event->list);
514 event->type = next_event_type++; 542
515 else if (event->type > __TRACE_LAST_TYPE) { 543 if (!event->type) {
544 struct list_head *list;
545
546 if (next_event_type > FTRACE_MAX_EVENT) {
547
548 event->type = trace_search_list(&list);
549 if (!event->type)
550 goto out;
551
552 } else {
553
554 event->type = next_event_type++;
555 list = &ftrace_event_list;
556 }
557
558 if (WARN_ON(ftrace_find_event(event->type)))
559 goto out;
560
561 list_add_tail(&event->list, list);
562
563 } else if (event->type > __TRACE_LAST_TYPE) {
516 printk(KERN_WARNING "Need to add type to trace.h\n"); 564 printk(KERN_WARNING "Need to add type to trace.h\n");
517 WARN_ON(1); 565 WARN_ON(1);
518 }
519
520 if (ftrace_find_event(event->type))
521 goto out; 566 goto out;
567 } else {
568 /* Is this event already used */
569 if (ftrace_find_event(event->type))
570 goto out;
571 }
522 572
523 if (event->trace == NULL) 573 if (event->trace == NULL)
524 event->trace = trace_nop_print; 574 event->trace = trace_nop_print;
@@ -537,8 +587,6 @@ int register_ftrace_event(struct trace_event *event)
537 out: 587 out:
538 mutex_unlock(&trace_event_mutex); 588 mutex_unlock(&trace_event_mutex);
539 589
540 WARN_ON_ONCE(next_event_type > FTRACE_MAX_EVENT);
541
542 return ret; 590 return ret;
543} 591}
544EXPORT_SYMBOL_GPL(register_ftrace_event); 592EXPORT_SYMBOL_GPL(register_ftrace_event);
@@ -551,6 +599,7 @@ int unregister_ftrace_event(struct trace_event *event)
551{ 599{
552 mutex_lock(&trace_event_mutex); 600 mutex_lock(&trace_event_mutex);
553 hlist_del(&event->node); 601 hlist_del(&event->node);
602 list_del(&event->list);
554 mutex_unlock(&trace_event_mutex); 603 mutex_unlock(&trace_event_mutex);
555 604
556 return 0; 605 return 0;