diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2014-01-10 17:01:58 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2014-04-21 13:59:25 -0400 |
commit | 4104d326b670c2b66f575d2004daa28b2d1b4c8d (patch) | |
tree | 9eb7a3084d9bc9d7d5b6eccb0d17d37481020c61 /kernel/trace/trace_irqsoff.c | |
parent | a798c10faf62a505d24e5f6213fbaf904a39623f (diff) |
ftrace: Remove global function list and call function directly
Instead of having a list of global functions that are called,
as only one global function is allow to be enabled at a time, there's
no reason to have a list.
Instead, simply have all the users of the global ops, use the global ops
directly, instead of registering their own ftrace_ops. Just switch what
function is used before enabling the function tracer.
This removes a lot of code as well as the complexity involved with it.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 33 |
1 files changed, 16 insertions, 17 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 8ff02cbb892f..b5cb047df3e9 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -151,12 +151,6 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, | |||
151 | 151 | ||
152 | atomic_dec(&data->disabled); | 152 | atomic_dec(&data->disabled); |
153 | } | 153 | } |
154 | |||
155 | static struct ftrace_ops trace_ops __read_mostly = | ||
156 | { | ||
157 | .func = irqsoff_tracer_call, | ||
158 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, | ||
159 | }; | ||
160 | #endif /* CONFIG_FUNCTION_TRACER */ | 154 | #endif /* CONFIG_FUNCTION_TRACER */ |
161 | 155 | ||
162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 156 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -531,7 +525,7 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) | |||
531 | } | 525 | } |
532 | #endif /* CONFIG_PREEMPT_TRACER */ | 526 | #endif /* CONFIG_PREEMPT_TRACER */ |
533 | 527 | ||
534 | static int register_irqsoff_function(int graph, int set) | 528 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
535 | { | 529 | { |
536 | int ret; | 530 | int ret; |
537 | 531 | ||
@@ -543,7 +537,7 @@ static int register_irqsoff_function(int graph, int set) | |||
543 | ret = register_ftrace_graph(&irqsoff_graph_return, | 537 | ret = register_ftrace_graph(&irqsoff_graph_return, |
544 | &irqsoff_graph_entry); | 538 | &irqsoff_graph_entry); |
545 | else | 539 | else |
546 | ret = register_ftrace_function(&trace_ops); | 540 | ret = register_ftrace_function(tr->ops); |
547 | 541 | ||
548 | if (!ret) | 542 | if (!ret) |
549 | function_enabled = true; | 543 | function_enabled = true; |
@@ -551,7 +545,7 @@ static int register_irqsoff_function(int graph, int set) | |||
551 | return ret; | 545 | return ret; |
552 | } | 546 | } |
553 | 547 | ||
554 | static void unregister_irqsoff_function(int graph) | 548 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) |
555 | { | 549 | { |
556 | if (!function_enabled) | 550 | if (!function_enabled) |
557 | return; | 551 | return; |
@@ -559,17 +553,17 @@ static void unregister_irqsoff_function(int graph) | |||
559 | if (graph) | 553 | if (graph) |
560 | unregister_ftrace_graph(); | 554 | unregister_ftrace_graph(); |
561 | else | 555 | else |
562 | unregister_ftrace_function(&trace_ops); | 556 | unregister_ftrace_function(tr->ops); |
563 | 557 | ||
564 | function_enabled = false; | 558 | function_enabled = false; |
565 | } | 559 | } |
566 | 560 | ||
567 | static void irqsoff_function_set(int set) | 561 | static void irqsoff_function_set(struct trace_array *tr, int set) |
568 | { | 562 | { |
569 | if (set) | 563 | if (set) |
570 | register_irqsoff_function(is_graph(), 1); | 564 | register_irqsoff_function(tr, is_graph(), 1); |
571 | else | 565 | else |
572 | unregister_irqsoff_function(is_graph()); | 566 | unregister_irqsoff_function(tr, is_graph()); |
573 | } | 567 | } |
574 | 568 | ||
575 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) | 569 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
@@ -577,7 +571,7 @@ static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) | |||
577 | struct tracer *tracer = tr->current_trace; | 571 | struct tracer *tracer = tr->current_trace; |
578 | 572 | ||
579 | if (mask & TRACE_ITER_FUNCTION) | 573 | if (mask & TRACE_ITER_FUNCTION) |
580 | irqsoff_function_set(set); | 574 | irqsoff_function_set(tr, set); |
581 | 575 | ||
582 | return trace_keep_overwrite(tracer, mask, set); | 576 | return trace_keep_overwrite(tracer, mask, set); |
583 | } | 577 | } |
@@ -586,7 +580,7 @@ static int start_irqsoff_tracer(struct trace_array *tr, int graph) | |||
586 | { | 580 | { |
587 | int ret; | 581 | int ret; |
588 | 582 | ||
589 | ret = register_irqsoff_function(graph, 0); | 583 | ret = register_irqsoff_function(tr, graph, 0); |
590 | 584 | ||
591 | if (!ret && tracing_is_enabled()) | 585 | if (!ret && tracing_is_enabled()) |
592 | tracer_enabled = 1; | 586 | tracer_enabled = 1; |
@@ -600,7 +594,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) | |||
600 | { | 594 | { |
601 | tracer_enabled = 0; | 595 | tracer_enabled = 0; |
602 | 596 | ||
603 | unregister_irqsoff_function(graph); | 597 | unregister_irqsoff_function(tr, graph); |
604 | } | 598 | } |
605 | 599 | ||
606 | static void __irqsoff_tracer_init(struct trace_array *tr) | 600 | static void __irqsoff_tracer_init(struct trace_array *tr) |
@@ -617,7 +611,11 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
617 | smp_wmb(); | 611 | smp_wmb(); |
618 | tracing_reset_online_cpus(&tr->trace_buffer); | 612 | tracing_reset_online_cpus(&tr->trace_buffer); |
619 | 613 | ||
620 | if (start_irqsoff_tracer(tr, is_graph())) | 614 | ftrace_init_array_ops(tr, irqsoff_tracer_call); |
615 | |||
616 | /* Only toplevel instance supports graph tracing */ | ||
617 | if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && | ||
618 | is_graph()))) | ||
621 | printk(KERN_ERR "failed to start irqsoff tracer\n"); | 619 | printk(KERN_ERR "failed to start irqsoff tracer\n"); |
622 | } | 620 | } |
623 | 621 | ||
@@ -630,6 +628,7 @@ static void irqsoff_tracer_reset(struct trace_array *tr) | |||
630 | 628 | ||
631 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); | 629 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
632 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); | 630 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); |
631 | ftrace_reset_array_ops(tr); | ||
633 | } | 632 | } |
634 | 633 | ||
635 | static void irqsoff_tracer_start(struct trace_array *tr) | 634 | static void irqsoff_tracer_start(struct trace_array *tr) |