diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-11-05 16:05:44 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-06 01:50:57 -0500 |
commit | 0f04870148ecb825133bc2733f473b1c5773ac0b (patch) | |
tree | 81399b7b9535d11ac6120366fc01fd051118226a /kernel/trace/trace.c | |
parent | 60a7ecf42661f2b22168751298592da6ee210c9e (diff) |
ftrace: soft tracing stop and start
Impact: add way to quickly start stop tracing from the kernel
This patch adds a soft stop and start to the trace. This simply
disables function tracing via the ftrace_disabled flag, and
disables the trace buffers to prevent recording. The tracing
code may still be executed, but the trace will not be recorded.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 81 |
1 files changed, 79 insertions, 2 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 29ab40a764c8..113aea9447ec 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -43,6 +43,15 @@ | |||
43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
44 | unsigned long __read_mostly tracing_thresh; | 44 | unsigned long __read_mostly tracing_thresh; |
45 | 45 | ||
46 | |||
47 | /* | ||
48 | * Kill all tracing for good (never come back). | ||
49 | * It is initialized to 1 but will turn to zero if the initialization | ||
50 | * of the tracer is successful. But that is the only place that sets | ||
51 | * this back to zero. | ||
52 | */ | ||
53 | int tracing_disabled = 1; | ||
54 | |||
46 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 55 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); |
47 | 56 | ||
48 | static inline void ftrace_disable_cpu(void) | 57 | static inline void ftrace_disable_cpu(void) |
@@ -62,8 +71,6 @@ static cpumask_t __read_mostly tracing_buffer_mask; | |||
62 | #define for_each_tracing_cpu(cpu) \ | 71 | #define for_each_tracing_cpu(cpu) \ |
63 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 72 | for_each_cpu_mask(cpu, tracing_buffer_mask) |
64 | 73 | ||
65 | static int tracing_disabled = 1; | ||
66 | |||
67 | /* | 74 | /* |
68 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 75 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
69 | * | 76 | * |
@@ -613,6 +620,76 @@ static void trace_init_cmdlines(void) | |||
613 | cmdline_idx = 0; | 620 | cmdline_idx = 0; |
614 | } | 621 | } |
615 | 622 | ||
623 | static int trace_stop_count; | ||
624 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
625 | |||
626 | /** | ||
627 | * tracing_start - quick start of the tracer | ||
628 | * | ||
629 | * If tracing is enabled but was stopped by tracing_stop, | ||
630 | * this will start the tracer back up. | ||
631 | */ | ||
632 | void tracing_start(void) | ||
633 | { | ||
634 | struct ring_buffer *buffer; | ||
635 | unsigned long flags; | ||
636 | |||
637 | if (tracing_disabled) | ||
638 | return; | ||
639 | |||
640 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
641 | if (--trace_stop_count) | ||
642 | goto out; | ||
643 | |||
644 | if (trace_stop_count < 0) { | ||
645 | /* Someone screwed up their debugging */ | ||
646 | WARN_ON_ONCE(1); | ||
647 | trace_stop_count = 0; | ||
648 | goto out; | ||
649 | } | ||
650 | |||
651 | |||
652 | buffer = global_trace.buffer; | ||
653 | if (buffer) | ||
654 | ring_buffer_record_enable(buffer); | ||
655 | |||
656 | buffer = max_tr.buffer; | ||
657 | if (buffer) | ||
658 | ring_buffer_record_enable(buffer); | ||
659 | |||
660 | ftrace_start(); | ||
661 | out: | ||
662 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
663 | } | ||
664 | |||
665 | /** | ||
666 | * tracing_stop - quick stop of the tracer | ||
667 | * | ||
668 | * Light weight way to stop tracing. Use in conjunction with | ||
669 | * tracing_start. | ||
670 | */ | ||
671 | void tracing_stop(void) | ||
672 | { | ||
673 | struct ring_buffer *buffer; | ||
674 | unsigned long flags; | ||
675 | |||
676 | ftrace_stop(); | ||
677 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
678 | if (trace_stop_count++) | ||
679 | goto out; | ||
680 | |||
681 | buffer = global_trace.buffer; | ||
682 | if (buffer) | ||
683 | ring_buffer_record_disable(buffer); | ||
684 | |||
685 | buffer = max_tr.buffer; | ||
686 | if (buffer) | ||
687 | ring_buffer_record_disable(buffer); | ||
688 | |||
689 | out: | ||
690 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
691 | } | ||
692 | |||
616 | void trace_stop_cmdline_recording(void); | 693 | void trace_stop_cmdline_recording(void); |
617 | 694 | ||
618 | static void trace_save_cmdline(struct task_struct *tsk) | 695 | static void trace_save_cmdline(struct task_struct *tsk) |