diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 55 |
1 files changed, 44 insertions, 11 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 032c57ca6502..3ec2ee6f6560 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -92,12 +92,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled); | |||
92 | static inline void ftrace_disable_cpu(void) | 92 | static inline void ftrace_disable_cpu(void) |
93 | { | 93 | { |
94 | preempt_disable(); | 94 | preempt_disable(); |
95 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); | 95 | __this_cpu_inc(ftrace_cpu_disabled); |
96 | } | 96 | } |
97 | 97 | ||
98 | static inline void ftrace_enable_cpu(void) | 98 | static inline void ftrace_enable_cpu(void) |
99 | { | 99 | { |
100 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); | 100 | __this_cpu_dec(ftrace_cpu_disabled); |
101 | preempt_enable(); | 101 | preempt_enable(); |
102 | } | 102 | } |
103 | 103 | ||
@@ -374,6 +374,21 @@ static int __init set_buf_size(char *str) | |||
374 | } | 374 | } |
375 | __setup("trace_buf_size=", set_buf_size); | 375 | __setup("trace_buf_size=", set_buf_size); |
376 | 376 | ||
377 | static int __init set_tracing_thresh(char *str) | ||
378 | { | ||
379 | unsigned long threshhold; | ||
380 | int ret; | ||
381 | |||
382 | if (!str) | ||
383 | return 0; | ||
384 | ret = strict_strtoul(str, 0, &threshhold); | ||
385 | if (ret < 0) | ||
386 | return 0; | ||
387 | tracing_thresh = threshhold * 1000; | ||
388 | return 1; | ||
389 | } | ||
390 | __setup("tracing_thresh=", set_tracing_thresh); | ||
391 | |||
377 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 392 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
378 | { | 393 | { |
379 | return nsecs / 1000; | 394 | return nsecs / 1000; |
@@ -579,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
579 | static arch_spinlock_t ftrace_max_lock = | 594 | static arch_spinlock_t ftrace_max_lock = |
580 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 595 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
581 | 596 | ||
597 | unsigned long __read_mostly tracing_thresh; | ||
598 | |||
582 | #ifdef CONFIG_TRACER_MAX_TRACE | 599 | #ifdef CONFIG_TRACER_MAX_TRACE |
583 | unsigned long __read_mostly tracing_max_latency; | 600 | unsigned long __read_mostly tracing_max_latency; |
584 | unsigned long __read_mostly tracing_thresh; | ||
585 | 601 | ||
586 | /* | 602 | /* |
587 | * Copy the new maximum trace into the separate maximum-trace | 603 | * Copy the new maximum trace into the separate maximum-trace |
@@ -592,7 +608,7 @@ static void | |||
592 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 608 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
593 | { | 609 | { |
594 | struct trace_array_cpu *data = tr->data[cpu]; | 610 | struct trace_array_cpu *data = tr->data[cpu]; |
595 | struct trace_array_cpu *max_data = tr->data[cpu]; | 611 | struct trace_array_cpu *max_data; |
596 | 612 | ||
597 | max_tr.cpu = cpu; | 613 | max_tr.cpu = cpu; |
598 | max_tr.time_start = data->preempt_timestamp; | 614 | max_tr.time_start = data->preempt_timestamp; |
@@ -602,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
602 | max_data->critical_start = data->critical_start; | 618 | max_data->critical_start = data->critical_start; |
603 | max_data->critical_end = data->critical_end; | 619 | max_data->critical_end = data->critical_end; |
604 | 620 | ||
605 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 621 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
606 | max_data->pid = tsk->pid; | 622 | max_data->pid = tsk->pid; |
607 | max_data->uid = task_uid(tsk); | 623 | max_data->uid = task_uid(tsk); |
608 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 624 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
@@ -824,10 +840,10 @@ out: | |||
824 | mutex_unlock(&trace_types_lock); | 840 | mutex_unlock(&trace_types_lock); |
825 | } | 841 | } |
826 | 842 | ||
827 | static void __tracing_reset(struct trace_array *tr, int cpu) | 843 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) |
828 | { | 844 | { |
829 | ftrace_disable_cpu(); | 845 | ftrace_disable_cpu(); |
830 | ring_buffer_reset_cpu(tr->buffer, cpu); | 846 | ring_buffer_reset_cpu(buffer, cpu); |
831 | ftrace_enable_cpu(); | 847 | ftrace_enable_cpu(); |
832 | } | 848 | } |
833 | 849 | ||
@@ -839,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
839 | 855 | ||
840 | /* Make sure all commits have finished */ | 856 | /* Make sure all commits have finished */ |
841 | synchronize_sched(); | 857 | synchronize_sched(); |
842 | __tracing_reset(tr, cpu); | 858 | __tracing_reset(buffer, cpu); |
843 | 859 | ||
844 | ring_buffer_record_enable(buffer); | 860 | ring_buffer_record_enable(buffer); |
845 | } | 861 | } |
@@ -857,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
857 | tr->time_start = ftrace_now(tr->cpu); | 873 | tr->time_start = ftrace_now(tr->cpu); |
858 | 874 | ||
859 | for_each_online_cpu(cpu) | 875 | for_each_online_cpu(cpu) |
860 | __tracing_reset(tr, cpu); | 876 | __tracing_reset(buffer, cpu); |
861 | 877 | ||
862 | ring_buffer_record_enable(buffer); | 878 | ring_buffer_record_enable(buffer); |
863 | } | 879 | } |
@@ -934,6 +950,8 @@ void tracing_start(void) | |||
934 | goto out; | 950 | goto out; |
935 | } | 951 | } |
936 | 952 | ||
953 | /* Prevent the buffers from switching */ | ||
954 | arch_spin_lock(&ftrace_max_lock); | ||
937 | 955 | ||
938 | buffer = global_trace.buffer; | 956 | buffer = global_trace.buffer; |
939 | if (buffer) | 957 | if (buffer) |
@@ -943,6 +961,8 @@ void tracing_start(void) | |||
943 | if (buffer) | 961 | if (buffer) |
944 | ring_buffer_record_enable(buffer); | 962 | ring_buffer_record_enable(buffer); |
945 | 963 | ||
964 | arch_spin_unlock(&ftrace_max_lock); | ||
965 | |||
946 | ftrace_start(); | 966 | ftrace_start(); |
947 | out: | 967 | out: |
948 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 968 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
@@ -964,6 +984,9 @@ void tracing_stop(void) | |||
964 | if (trace_stop_count++) | 984 | if (trace_stop_count++) |
965 | goto out; | 985 | goto out; |
966 | 986 | ||
987 | /* Prevent the buffers from switching */ | ||
988 | arch_spin_lock(&ftrace_max_lock); | ||
989 | |||
967 | buffer = global_trace.buffer; | 990 | buffer = global_trace.buffer; |
968 | if (buffer) | 991 | if (buffer) |
969 | ring_buffer_record_disable(buffer); | 992 | ring_buffer_record_disable(buffer); |
@@ -972,6 +995,8 @@ void tracing_stop(void) | |||
972 | if (buffer) | 995 | if (buffer) |
973 | ring_buffer_record_disable(buffer); | 996 | ring_buffer_record_disable(buffer); |
974 | 997 | ||
998 | arch_spin_unlock(&ftrace_max_lock); | ||
999 | |||
975 | out: | 1000 | out: |
976 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 1001 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
977 | } | 1002 | } |
@@ -1166,7 +1191,7 @@ trace_function(struct trace_array *tr, | |||
1166 | struct ftrace_entry *entry; | 1191 | struct ftrace_entry *entry; |
1167 | 1192 | ||
1168 | /* If we are reading the ring buffer, don't trace */ | 1193 | /* If we are reading the ring buffer, don't trace */ |
1169 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 1194 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
1170 | return; | 1195 | return; |
1171 | 1196 | ||
1172 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1197 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -1259,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1259 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1284 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
1260 | return; | 1285 | return; |
1261 | 1286 | ||
1287 | /* | ||
1288 | * NMIs can not handle page faults, even with fix ups. | ||
1289 | * The save user stack can (and often does) fault. | ||
1290 | */ | ||
1291 | if (unlikely(in_nmi())) | ||
1292 | return; | ||
1293 | |||
1262 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1294 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1263 | sizeof(*entry), flags, pc); | 1295 | sizeof(*entry), flags, pc); |
1264 | if (!event) | 1296 | if (!event) |
@@ -1703,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1703 | 1735 | ||
1704 | ftrace_enable_cpu(); | 1736 | ftrace_enable_cpu(); |
1705 | 1737 | ||
1738 | iter->leftover = 0; | ||
1706 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1739 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1707 | ; | 1740 | ; |
1708 | 1741 | ||
@@ -4248,10 +4281,10 @@ static __init int tracer_init_debugfs(void) | |||
4248 | #ifdef CONFIG_TRACER_MAX_TRACE | 4281 | #ifdef CONFIG_TRACER_MAX_TRACE |
4249 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 4282 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
4250 | &tracing_max_latency, &tracing_max_lat_fops); | 4283 | &tracing_max_latency, &tracing_max_lat_fops); |
4284 | #endif | ||
4251 | 4285 | ||
4252 | trace_create_file("tracing_thresh", 0644, d_tracer, | 4286 | trace_create_file("tracing_thresh", 0644, d_tracer, |
4253 | &tracing_thresh, &tracing_max_lat_fops); | 4287 | &tracing_thresh, &tracing_max_lat_fops); |
4254 | #endif | ||
4255 | 4288 | ||
4256 | trace_create_file("README", 0444, d_tracer, | 4289 | trace_create_file("README", 0444, d_tracer, |
4257 | NULL, &tracing_readme_fops); | 4290 | NULL, &tracing_readme_fops); |