aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c49
1 files changed, 41 insertions, 8 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ed01fdba4a55..3ec2ee6f6560 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -374,6 +374,21 @@ static int __init set_buf_size(char *str)
374} 374}
375__setup("trace_buf_size=", set_buf_size); 375__setup("trace_buf_size=", set_buf_size);
376 376
377static int __init set_tracing_thresh(char *str)
378{
379 unsigned long threshhold;
380 int ret;
381
382 if (!str)
383 return 0;
384 ret = strict_strtoul(str, 0, &threshhold);
385 if (ret < 0)
386 return 0;
387 tracing_thresh = threshhold * 1000;
388 return 1;
389}
390__setup("tracing_thresh=", set_tracing_thresh);
391
377unsigned long nsecs_to_usecs(unsigned long nsecs) 392unsigned long nsecs_to_usecs(unsigned long nsecs)
378{ 393{
379 return nsecs / 1000; 394 return nsecs / 1000;
@@ -579,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
579static arch_spinlock_t ftrace_max_lock = 594static arch_spinlock_t ftrace_max_lock =
580 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 595 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
581 596
597unsigned long __read_mostly tracing_thresh;
598
582#ifdef CONFIG_TRACER_MAX_TRACE 599#ifdef CONFIG_TRACER_MAX_TRACE
583unsigned long __read_mostly tracing_max_latency; 600unsigned long __read_mostly tracing_max_latency;
584unsigned long __read_mostly tracing_thresh;
585 601
586/* 602/*
587 * Copy the new maximum trace into the separate maximum-trace 603 * Copy the new maximum trace into the separate maximum-trace
@@ -592,7 +608,7 @@ static void
592__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 608__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
593{ 609{
594 struct trace_array_cpu *data = tr->data[cpu]; 610 struct trace_array_cpu *data = tr->data[cpu];
595 struct trace_array_cpu *max_data = tr->data[cpu]; 611 struct trace_array_cpu *max_data;
596 612
597 max_tr.cpu = cpu; 613 max_tr.cpu = cpu;
598 max_tr.time_start = data->preempt_timestamp; 614 max_tr.time_start = data->preempt_timestamp;
@@ -602,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
602 max_data->critical_start = data->critical_start; 618 max_data->critical_start = data->critical_start;
603 max_data->critical_end = data->critical_end; 619 max_data->critical_end = data->critical_end;
604 620
605 memcpy(data->comm, tsk->comm, TASK_COMM_LEN); 621 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
606 max_data->pid = tsk->pid; 622 max_data->pid = tsk->pid;
607 max_data->uid = task_uid(tsk); 623 max_data->uid = task_uid(tsk);
608 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 624 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
@@ -824,10 +840,10 @@ out:
824 mutex_unlock(&trace_types_lock); 840 mutex_unlock(&trace_types_lock);
825} 841}
826 842
827static void __tracing_reset(struct trace_array *tr, int cpu) 843static void __tracing_reset(struct ring_buffer *buffer, int cpu)
828{ 844{
829 ftrace_disable_cpu(); 845 ftrace_disable_cpu();
830 ring_buffer_reset_cpu(tr->buffer, cpu); 846 ring_buffer_reset_cpu(buffer, cpu);
831 ftrace_enable_cpu(); 847 ftrace_enable_cpu();
832} 848}
833 849
@@ -839,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
839 855
840 /* Make sure all commits have finished */ 856 /* Make sure all commits have finished */
841 synchronize_sched(); 857 synchronize_sched();
842 __tracing_reset(tr, cpu); 858 __tracing_reset(buffer, cpu);
843 859
844 ring_buffer_record_enable(buffer); 860 ring_buffer_record_enable(buffer);
845} 861}
@@ -857,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
857 tr->time_start = ftrace_now(tr->cpu); 873 tr->time_start = ftrace_now(tr->cpu);
858 874
859 for_each_online_cpu(cpu) 875 for_each_online_cpu(cpu)
860 __tracing_reset(tr, cpu); 876 __tracing_reset(buffer, cpu);
861 877
862 ring_buffer_record_enable(buffer); 878 ring_buffer_record_enable(buffer);
863} 879}
@@ -934,6 +950,8 @@ void tracing_start(void)
934 goto out; 950 goto out;
935 } 951 }
936 952
953 /* Prevent the buffers from switching */
954 arch_spin_lock(&ftrace_max_lock);
937 955
938 buffer = global_trace.buffer; 956 buffer = global_trace.buffer;
939 if (buffer) 957 if (buffer)
@@ -943,6 +961,8 @@ void tracing_start(void)
943 if (buffer) 961 if (buffer)
944 ring_buffer_record_enable(buffer); 962 ring_buffer_record_enable(buffer);
945 963
964 arch_spin_unlock(&ftrace_max_lock);
965
946 ftrace_start(); 966 ftrace_start();
947 out: 967 out:
948 spin_unlock_irqrestore(&tracing_start_lock, flags); 968 spin_unlock_irqrestore(&tracing_start_lock, flags);
@@ -964,6 +984,9 @@ void tracing_stop(void)
964 if (trace_stop_count++) 984 if (trace_stop_count++)
965 goto out; 985 goto out;
966 986
987 /* Prevent the buffers from switching */
988 arch_spin_lock(&ftrace_max_lock);
989
967 buffer = global_trace.buffer; 990 buffer = global_trace.buffer;
968 if (buffer) 991 if (buffer)
969 ring_buffer_record_disable(buffer); 992 ring_buffer_record_disable(buffer);
@@ -972,6 +995,8 @@ void tracing_stop(void)
972 if (buffer) 995 if (buffer)
973 ring_buffer_record_disable(buffer); 996 ring_buffer_record_disable(buffer);
974 997
998 arch_spin_unlock(&ftrace_max_lock);
999
975 out: 1000 out:
976 spin_unlock_irqrestore(&tracing_start_lock, flags); 1001 spin_unlock_irqrestore(&tracing_start_lock, flags);
977} 1002}
@@ -1259,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1259 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1284 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1260 return; 1285 return;
1261 1286
1287 /*
1288 * NMIs can not handle page faults, even with fix ups.
1289 * The save user stack can (and often does) fault.
1290 */
1291 if (unlikely(in_nmi()))
1292 return;
1293
1262 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1294 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1263 sizeof(*entry), flags, pc); 1295 sizeof(*entry), flags, pc);
1264 if (!event) 1296 if (!event)
@@ -1703,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1703 1735
1704 ftrace_enable_cpu(); 1736 ftrace_enable_cpu();
1705 1737
1738 iter->leftover = 0;
1706 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1739 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1707 ; 1740 ;
1708 1741
@@ -4248,10 +4281,10 @@ static __init int tracer_init_debugfs(void)
4248#ifdef CONFIG_TRACER_MAX_TRACE 4281#ifdef CONFIG_TRACER_MAX_TRACE
4249 trace_create_file("tracing_max_latency", 0644, d_tracer, 4282 trace_create_file("tracing_max_latency", 0644, d_tracer,
4250 &tracing_max_latency, &tracing_max_lat_fops); 4283 &tracing_max_latency, &tracing_max_lat_fops);
4284#endif
4251 4285
4252 trace_create_file("tracing_thresh", 0644, d_tracer, 4286 trace_create_file("tracing_thresh", 0644, d_tracer,
4253 &tracing_thresh, &tracing_max_lat_fops); 4287 &tracing_thresh, &tracing_max_lat_fops);
4254#endif
4255 4288
4256 trace_create_file("README", 0444, d_tracer, 4289 trace_create_file("README", 0444, d_tracer,
4257 NULL, &tracing_readme_fops); 4290 NULL, &tracing_readme_fops);