diff options
-rw-r--r-- | include/linux/ftrace.h | 5 | ||||
-rw-r--r-- | kernel/trace/trace.c | 81 |
2 files changed, 84 insertions, 2 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 794ab907dbfe..7a75fc6d41f4 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -216,6 +216,9 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
216 | #ifdef CONFIG_TRACING | 216 | #ifdef CONFIG_TRACING |
217 | extern int ftrace_dump_on_oops; | 217 | extern int ftrace_dump_on_oops; |
218 | 218 | ||
219 | extern void tracing_start(void); | ||
220 | extern void tracing_stop(void); | ||
221 | |||
219 | extern void | 222 | extern void |
220 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); | 223 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); |
221 | 224 | ||
@@ -246,6 +249,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | |||
246 | static inline int | 249 | static inline int |
247 | ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); | 250 | ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); |
248 | 251 | ||
252 | static inline void tracing_start(void) { } | ||
253 | static inline void tracing_stop(void) { } | ||
249 | static inline int | 254 | static inline int |
250 | ftrace_printk(const char *fmt, ...) | 255 | ftrace_printk(const char *fmt, ...) |
251 | { | 256 | { |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 29ab40a764c8..113aea9447ec 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -43,6 +43,15 @@ | |||
43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
44 | unsigned long __read_mostly tracing_thresh; | 44 | unsigned long __read_mostly tracing_thresh; |
45 | 45 | ||
46 | |||
47 | /* | ||
48 | * Kill all tracing for good (never come back). | ||
49 | * It is initialized to 1 but will turn to zero if the initialization | ||
50 | * of the tracer is successful. But that is the only place that sets | ||
51 | * this back to zero. | ||
52 | */ | ||
53 | int tracing_disabled = 1; | ||
54 | |||
46 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 55 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); |
47 | 56 | ||
48 | static inline void ftrace_disable_cpu(void) | 57 | static inline void ftrace_disable_cpu(void) |
@@ -62,8 +71,6 @@ static cpumask_t __read_mostly tracing_buffer_mask; | |||
62 | #define for_each_tracing_cpu(cpu) \ | 71 | #define for_each_tracing_cpu(cpu) \ |
63 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 72 | for_each_cpu_mask(cpu, tracing_buffer_mask) |
64 | 73 | ||
65 | static int tracing_disabled = 1; | ||
66 | |||
67 | /* | 74 | /* |
68 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 75 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
69 | * | 76 | * |
@@ -613,6 +620,76 @@ static void trace_init_cmdlines(void) | |||
613 | cmdline_idx = 0; | 620 | cmdline_idx = 0; |
614 | } | 621 | } |
615 | 622 | ||
623 | static int trace_stop_count; | ||
624 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
625 | |||
626 | /** | ||
627 | * tracing_start - quick start of the tracer | ||
628 | * | ||
629 | * If tracing is enabled but was stopped by tracing_stop, | ||
630 | * this will start the tracer back up. | ||
631 | */ | ||
632 | void tracing_start(void) | ||
633 | { | ||
634 | struct ring_buffer *buffer; | ||
635 | unsigned long flags; | ||
636 | |||
637 | if (tracing_disabled) | ||
638 | return; | ||
639 | |||
640 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
641 | if (--trace_stop_count) | ||
642 | goto out; | ||
643 | |||
644 | if (trace_stop_count < 0) { | ||
645 | /* Someone screwed up their debugging */ | ||
646 | WARN_ON_ONCE(1); | ||
647 | trace_stop_count = 0; | ||
648 | goto out; | ||
649 | } | ||
650 | |||
651 | |||
652 | buffer = global_trace.buffer; | ||
653 | if (buffer) | ||
654 | ring_buffer_record_enable(buffer); | ||
655 | |||
656 | buffer = max_tr.buffer; | ||
657 | if (buffer) | ||
658 | ring_buffer_record_enable(buffer); | ||
659 | |||
660 | ftrace_start(); | ||
661 | out: | ||
662 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
663 | } | ||
664 | |||
665 | /** | ||
666 | * tracing_stop - quick stop of the tracer | ||
667 | * | ||
668 | * Light weight way to stop tracing. Use in conjunction with | ||
669 | * tracing_start. | ||
670 | */ | ||
671 | void tracing_stop(void) | ||
672 | { | ||
673 | struct ring_buffer *buffer; | ||
674 | unsigned long flags; | ||
675 | |||
676 | ftrace_stop(); | ||
677 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
678 | if (trace_stop_count++) | ||
679 | goto out; | ||
680 | |||
681 | buffer = global_trace.buffer; | ||
682 | if (buffer) | ||
683 | ring_buffer_record_disable(buffer); | ||
684 | |||
685 | buffer = max_tr.buffer; | ||
686 | if (buffer) | ||
687 | ring_buffer_record_disable(buffer); | ||
688 | |||
689 | out: | ||
690 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
691 | } | ||
692 | |||
616 | void trace_stop_cmdline_recording(void); | 693 | void trace_stop_cmdline_recording(void); |
617 | 694 | ||
618 | static void trace_save_cmdline(struct task_struct *tsk) | 695 | static void trace_save_cmdline(struct task_struct *tsk) |