diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4af5b218f953..9b66ee14b39f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -752,10 +752,10 @@ out: | |||
752 | mutex_unlock(&trace_types_lock); | 752 | mutex_unlock(&trace_types_lock); |
753 | } | 753 | } |
754 | 754 | ||
755 | static void __tracing_reset(struct trace_array *tr, int cpu) | 755 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) |
756 | { | 756 | { |
757 | ftrace_disable_cpu(); | 757 | ftrace_disable_cpu(); |
758 | ring_buffer_reset_cpu(tr->buffer, cpu); | 758 | ring_buffer_reset_cpu(buffer, cpu); |
759 | ftrace_enable_cpu(); | 759 | ftrace_enable_cpu(); |
760 | } | 760 | } |
761 | 761 | ||
@@ -767,7 +767,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
767 | 767 | ||
768 | /* Make sure all commits have finished */ | 768 | /* Make sure all commits have finished */ |
769 | synchronize_sched(); | 769 | synchronize_sched(); |
770 | __tracing_reset(tr, cpu); | 770 | __tracing_reset(buffer, cpu); |
771 | 771 | ||
772 | ring_buffer_record_enable(buffer); | 772 | ring_buffer_record_enable(buffer); |
773 | } | 773 | } |
@@ -785,7 +785,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
785 | tr->time_start = ftrace_now(tr->cpu); | 785 | tr->time_start = ftrace_now(tr->cpu); |
786 | 786 | ||
787 | for_each_online_cpu(cpu) | 787 | for_each_online_cpu(cpu) |
788 | __tracing_reset(tr, cpu); | 788 | __tracing_reset(buffer, cpu); |
789 | 789 | ||
790 | ring_buffer_record_enable(buffer); | 790 | ring_buffer_record_enable(buffer); |
791 | } | 791 | } |
@@ -862,6 +862,8 @@ void tracing_start(void) | |||
862 | goto out; | 862 | goto out; |
863 | } | 863 | } |
864 | 864 | ||
865 | /* Prevent the buffers from switching */ | ||
866 | arch_spin_lock(&ftrace_max_lock); | ||
865 | 867 | ||
866 | buffer = global_trace.buffer; | 868 | buffer = global_trace.buffer; |
867 | if (buffer) | 869 | if (buffer) |
@@ -871,6 +873,8 @@ void tracing_start(void) | |||
871 | if (buffer) | 873 | if (buffer) |
872 | ring_buffer_record_enable(buffer); | 874 | ring_buffer_record_enable(buffer); |
873 | 875 | ||
876 | arch_spin_unlock(&ftrace_max_lock); | ||
877 | |||
874 | ftrace_start(); | 878 | ftrace_start(); |
875 | out: | 879 | out: |
876 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); | 880 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); |
@@ -892,6 +896,9 @@ void tracing_stop(void) | |||
892 | if (trace_stop_count++) | 896 | if (trace_stop_count++) |
893 | goto out; | 897 | goto out; |
894 | 898 | ||
899 | /* Prevent the buffers from switching */ | ||
900 | arch_spin_lock(&ftrace_max_lock); | ||
901 | |||
895 | buffer = global_trace.buffer; | 902 | buffer = global_trace.buffer; |
896 | if (buffer) | 903 | if (buffer) |
897 | ring_buffer_record_disable(buffer); | 904 | ring_buffer_record_disable(buffer); |
@@ -900,6 +907,8 @@ void tracing_stop(void) | |||
900 | if (buffer) | 907 | if (buffer) |
901 | ring_buffer_record_disable(buffer); | 908 | ring_buffer_record_disable(buffer); |
902 | 909 | ||
910 | arch_spin_unlock(&ftrace_max_lock); | ||
911 | |||
903 | out: | 912 | out: |
904 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); | 913 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); |
905 | } | 914 | } |
@@ -1187,6 +1196,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1187 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1196 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
1188 | return; | 1197 | return; |
1189 | 1198 | ||
1199 | /* | ||
1200 | * NMIs can not handle page faults, even with fix ups. | ||
1201 | * The save user stack can (and often does) fault. | ||
1202 | */ | ||
1203 | if (unlikely(in_nmi())) | ||
1204 | return; | ||
1205 | |||
1190 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1206 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1191 | sizeof(*entry), flags, pc); | 1207 | sizeof(*entry), flags, pc); |
1192 | if (!event) | 1208 | if (!event) |
@@ -1633,6 +1649,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1633 | 1649 | ||
1634 | ftrace_enable_cpu(); | 1650 | ftrace_enable_cpu(); |
1635 | 1651 | ||
1652 | iter->leftover = 0; | ||
1636 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1653 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1637 | ; | 1654 | ; |
1638 | 1655 | ||