diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 514 |
1 files changed, 307 insertions, 207 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b20d3ec75de9..44f916a04065 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
13 | */ | 13 | */ |
14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> |
15 | #include <linux/utsrelease.h> | 15 | #include <generated/utsrelease.h> |
16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> |
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
@@ -32,10 +32,11 @@ | |||
32 | #include <linux/splice.h> | 32 | #include <linux/splice.h> |
33 | #include <linux/kdebug.h> | 33 | #include <linux/kdebug.h> |
34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
35 | #include <linux/rwsem.h> | ||
36 | #include <linux/slab.h> | ||
35 | #include <linux/ctype.h> | 37 | #include <linux/ctype.h> |
36 | #include <linux/init.h> | 38 | #include <linux/init.h> |
37 | #include <linux/poll.h> | 39 | #include <linux/poll.h> |
38 | #include <linux/gfp.h> | ||
39 | #include <linux/fs.h> | 40 | #include <linux/fs.h> |
40 | 41 | ||
41 | #include "trace.h" | 42 | #include "trace.h" |
@@ -86,25 +87,22 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
86 | */ | 87 | */ |
87 | static int tracing_disabled = 1; | 88 | static int tracing_disabled = 1; |
88 | 89 | ||
89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 90 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
90 | 91 | ||
91 | static inline void ftrace_disable_cpu(void) | 92 | static inline void ftrace_disable_cpu(void) |
92 | { | 93 | { |
93 | preempt_disable(); | 94 | preempt_disable(); |
94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 95 | __this_cpu_inc(ftrace_cpu_disabled); |
95 | } | 96 | } |
96 | 97 | ||
97 | static inline void ftrace_enable_cpu(void) | 98 | static inline void ftrace_enable_cpu(void) |
98 | { | 99 | { |
99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 100 | __this_cpu_dec(ftrace_cpu_disabled); |
100 | preempt_enable(); | 101 | preempt_enable(); |
101 | } | 102 | } |
102 | 103 | ||
103 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
104 | 105 | ||
105 | /* Define which cpu buffers are currently read in trace_pipe */ | ||
106 | static cpumask_var_t tracing_reader_cpumask; | ||
107 | |||
108 | #define for_each_tracing_cpu(cpu) \ | 106 | #define for_each_tracing_cpu(cpu) \ |
109 | for_each_cpu(cpu, tracing_buffer_mask) | 107 | for_each_cpu(cpu, tracing_buffer_mask) |
110 | 108 | ||
@@ -129,7 +127,7 @@ static int tracing_set_tracer(const char *buf); | |||
129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 127 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
130 | static char *default_bootup_tracer; | 128 | static char *default_bootup_tracer; |
131 | 129 | ||
132 | static int __init set_ftrace(char *str) | 130 | static int __init set_cmdline_ftrace(char *str) |
133 | { | 131 | { |
134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 132 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
135 | default_bootup_tracer = bootup_tracer_buf; | 133 | default_bootup_tracer = bootup_tracer_buf; |
@@ -137,7 +135,7 @@ static int __init set_ftrace(char *str) | |||
137 | ring_buffer_expanded = 1; | 135 | ring_buffer_expanded = 1; |
138 | return 1; | 136 | return 1; |
139 | } | 137 | } |
140 | __setup("ftrace=", set_ftrace); | 138 | __setup("ftrace=", set_cmdline_ftrace); |
141 | 139 | ||
142 | static int __init set_ftrace_dump_on_oops(char *str) | 140 | static int __init set_ftrace_dump_on_oops(char *str) |
143 | { | 141 | { |
@@ -203,7 +201,7 @@ cycle_t ftrace_now(int cpu) | |||
203 | */ | 201 | */ |
204 | static struct trace_array max_tr; | 202 | static struct trace_array max_tr; |
205 | 203 | ||
206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 204 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
207 | 205 | ||
208 | /* tracer_enabled is used to toggle activation of a tracer */ | 206 | /* tracer_enabled is used to toggle activation of a tracer */ |
209 | static int tracer_enabled = 1; | 207 | static int tracer_enabled = 1; |
@@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly; | |||
243 | 241 | ||
244 | /* | 242 | /* |
245 | * trace_types_lock is used to protect the trace_types list. | 243 | * trace_types_lock is used to protect the trace_types list. |
246 | * This lock is also used to keep user access serialized. | ||
247 | * Accesses from userspace will grab this lock while userspace | ||
248 | * activities happen inside the kernel. | ||
249 | */ | 244 | */ |
250 | static DEFINE_MUTEX(trace_types_lock); | 245 | static DEFINE_MUTEX(trace_types_lock); |
251 | 246 | ||
247 | /* | ||
248 | * serialize the access of the ring buffer | ||
249 | * | ||
250 | * ring buffer serializes readers, but it is low level protection. | ||
251 | * The validity of the events (which returns by ring_buffer_peek() ..etc) | ||
252 | * are not protected by ring buffer. | ||
253 | * | ||
254 | * The content of events may become garbage if we allow other process consumes | ||
255 | * these events concurrently: | ||
256 | * A) the page of the consumed events may become a normal page | ||
257 | * (not reader page) in ring buffer, and this page will be rewrited | ||
258 | * by events producer. | ||
259 | * B) The page of the consumed events may become a page for splice_read, | ||
260 | * and this page will be returned to system. | ||
261 | * | ||
262 | * These primitives allow multi process access to different cpu ring buffer | ||
263 | * concurrently. | ||
264 | * | ||
265 | * These primitives don't distinguish read-only and read-consume access. | ||
266 | * Multi read-only access are also serialized. | ||
267 | */ | ||
268 | |||
269 | #ifdef CONFIG_SMP | ||
270 | static DECLARE_RWSEM(all_cpu_access_lock); | ||
271 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | ||
272 | |||
273 | static inline void trace_access_lock(int cpu) | ||
274 | { | ||
275 | if (cpu == TRACE_PIPE_ALL_CPU) { | ||
276 | /* gain it for accessing the whole ring buffer. */ | ||
277 | down_write(&all_cpu_access_lock); | ||
278 | } else { | ||
279 | /* gain it for accessing a cpu ring buffer. */ | ||
280 | |||
281 | /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ | ||
282 | down_read(&all_cpu_access_lock); | ||
283 | |||
284 | /* Secondly block other access to this @cpu ring buffer. */ | ||
285 | mutex_lock(&per_cpu(cpu_access_lock, cpu)); | ||
286 | } | ||
287 | } | ||
288 | |||
289 | static inline void trace_access_unlock(int cpu) | ||
290 | { | ||
291 | if (cpu == TRACE_PIPE_ALL_CPU) { | ||
292 | up_write(&all_cpu_access_lock); | ||
293 | } else { | ||
294 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | ||
295 | up_read(&all_cpu_access_lock); | ||
296 | } | ||
297 | } | ||
298 | |||
299 | static inline void trace_access_lock_init(void) | ||
300 | { | ||
301 | int cpu; | ||
302 | |||
303 | for_each_possible_cpu(cpu) | ||
304 | mutex_init(&per_cpu(cpu_access_lock, cpu)); | ||
305 | } | ||
306 | |||
307 | #else | ||
308 | |||
309 | static DEFINE_MUTEX(access_lock); | ||
310 | |||
311 | static inline void trace_access_lock(int cpu) | ||
312 | { | ||
313 | (void)cpu; | ||
314 | mutex_lock(&access_lock); | ||
315 | } | ||
316 | |||
317 | static inline void trace_access_unlock(int cpu) | ||
318 | { | ||
319 | (void)cpu; | ||
320 | mutex_unlock(&access_lock); | ||
321 | } | ||
322 | |||
323 | static inline void trace_access_lock_init(void) | ||
324 | { | ||
325 | } | ||
326 | |||
327 | #endif | ||
328 | |||
252 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 329 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
253 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 330 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
254 | 331 | ||
@@ -297,6 +374,21 @@ static int __init set_buf_size(char *str) | |||
297 | } | 374 | } |
298 | __setup("trace_buf_size=", set_buf_size); | 375 | __setup("trace_buf_size=", set_buf_size); |
299 | 376 | ||
377 | static int __init set_tracing_thresh(char *str) | ||
378 | { | ||
379 | unsigned long threshhold; | ||
380 | int ret; | ||
381 | |||
382 | if (!str) | ||
383 | return 0; | ||
384 | ret = strict_strtoul(str, 0, &threshhold); | ||
385 | if (ret < 0) | ||
386 | return 0; | ||
387 | tracing_thresh = threshhold * 1000; | ||
388 | return 1; | ||
389 | } | ||
390 | __setup("tracing_thresh=", set_tracing_thresh); | ||
391 | |||
300 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 392 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
301 | { | 393 | { |
302 | return nsecs / 1000; | 394 | return nsecs / 1000; |
@@ -313,7 +405,6 @@ static const char *trace_options[] = { | |||
313 | "bin", | 405 | "bin", |
314 | "block", | 406 | "block", |
315 | "stacktrace", | 407 | "stacktrace", |
316 | "sched-tree", | ||
317 | "trace_printk", | 408 | "trace_printk", |
318 | "ftrace_preempt", | 409 | "ftrace_preempt", |
319 | "branch", | 410 | "branch", |
@@ -493,19 +584,20 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
493 | * protected by per_cpu spinlocks. But the action of the swap | 584 | * protected by per_cpu spinlocks. But the action of the swap |
494 | * needs its own lock. | 585 | * needs its own lock. |
495 | * | 586 | * |
496 | * This is defined as a raw_spinlock_t in order to help | 587 | * This is defined as a arch_spinlock_t in order to help |
497 | * with performance when lockdep debugging is enabled. | 588 | * with performance when lockdep debugging is enabled. |
498 | * | 589 | * |
499 | * It is also used in other places outside the update_max_tr | 590 | * It is also used in other places outside the update_max_tr |
500 | * so it needs to be defined outside of the | 591 | * so it needs to be defined outside of the |
501 | * CONFIG_TRACER_MAX_TRACE. | 592 | * CONFIG_TRACER_MAX_TRACE. |
502 | */ | 593 | */ |
503 | static raw_spinlock_t ftrace_max_lock = | 594 | static arch_spinlock_t ftrace_max_lock = |
504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 595 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
596 | |||
597 | unsigned long __read_mostly tracing_thresh; | ||
505 | 598 | ||
506 | #ifdef CONFIG_TRACER_MAX_TRACE | 599 | #ifdef CONFIG_TRACER_MAX_TRACE |
507 | unsigned long __read_mostly tracing_max_latency; | 600 | unsigned long __read_mostly tracing_max_latency; |
508 | unsigned long __read_mostly tracing_thresh; | ||
509 | 601 | ||
510 | /* | 602 | /* |
511 | * Copy the new maximum trace into the separate maximum-trace | 603 | * Copy the new maximum trace into the separate maximum-trace |
@@ -516,7 +608,7 @@ static void | |||
516 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 608 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
517 | { | 609 | { |
518 | struct trace_array_cpu *data = tr->data[cpu]; | 610 | struct trace_array_cpu *data = tr->data[cpu]; |
519 | struct trace_array_cpu *max_data = tr->data[cpu]; | 611 | struct trace_array_cpu *max_data; |
520 | 612 | ||
521 | max_tr.cpu = cpu; | 613 | max_tr.cpu = cpu; |
522 | max_tr.time_start = data->preempt_timestamp; | 614 | max_tr.time_start = data->preempt_timestamp; |
@@ -526,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
526 | max_data->critical_start = data->critical_start; | 618 | max_data->critical_start = data->critical_start; |
527 | max_data->critical_end = data->critical_end; | 619 | max_data->critical_end = data->critical_end; |
528 | 620 | ||
529 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 621 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
530 | max_data->pid = tsk->pid; | 622 | max_data->pid = tsk->pid; |
531 | max_data->uid = task_uid(tsk); | 623 | max_data->uid = task_uid(tsk); |
532 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 624 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
@@ -555,13 +647,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
555 | return; | 647 | return; |
556 | 648 | ||
557 | WARN_ON_ONCE(!irqs_disabled()); | 649 | WARN_ON_ONCE(!irqs_disabled()); |
558 | __raw_spin_lock(&ftrace_max_lock); | 650 | arch_spin_lock(&ftrace_max_lock); |
559 | 651 | ||
560 | tr->buffer = max_tr.buffer; | 652 | tr->buffer = max_tr.buffer; |
561 | max_tr.buffer = buf; | 653 | max_tr.buffer = buf; |
562 | 654 | ||
563 | __update_max_tr(tr, tsk, cpu); | 655 | __update_max_tr(tr, tsk, cpu); |
564 | __raw_spin_unlock(&ftrace_max_lock); | 656 | arch_spin_unlock(&ftrace_max_lock); |
565 | } | 657 | } |
566 | 658 | ||
567 | /** | 659 | /** |
@@ -581,7 +673,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
581 | return; | 673 | return; |
582 | 674 | ||
583 | WARN_ON_ONCE(!irqs_disabled()); | 675 | WARN_ON_ONCE(!irqs_disabled()); |
584 | __raw_spin_lock(&ftrace_max_lock); | 676 | arch_spin_lock(&ftrace_max_lock); |
585 | 677 | ||
586 | ftrace_disable_cpu(); | 678 | ftrace_disable_cpu(); |
587 | 679 | ||
@@ -603,7 +695,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 695 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
604 | 696 | ||
605 | __update_max_tr(tr, tsk, cpu); | 697 | __update_max_tr(tr, tsk, cpu); |
606 | __raw_spin_unlock(&ftrace_max_lock); | 698 | arch_spin_unlock(&ftrace_max_lock); |
607 | } | 699 | } |
608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 700 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
609 | 701 | ||
@@ -748,10 +840,10 @@ out: | |||
748 | mutex_unlock(&trace_types_lock); | 840 | mutex_unlock(&trace_types_lock); |
749 | } | 841 | } |
750 | 842 | ||
751 | static void __tracing_reset(struct trace_array *tr, int cpu) | 843 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) |
752 | { | 844 | { |
753 | ftrace_disable_cpu(); | 845 | ftrace_disable_cpu(); |
754 | ring_buffer_reset_cpu(tr->buffer, cpu); | 846 | ring_buffer_reset_cpu(buffer, cpu); |
755 | ftrace_enable_cpu(); | 847 | ftrace_enable_cpu(); |
756 | } | 848 | } |
757 | 849 | ||
@@ -763,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
763 | 855 | ||
764 | /* Make sure all commits have finished */ | 856 | /* Make sure all commits have finished */ |
765 | synchronize_sched(); | 857 | synchronize_sched(); |
766 | __tracing_reset(tr, cpu); | 858 | __tracing_reset(buffer, cpu); |
767 | 859 | ||
768 | ring_buffer_record_enable(buffer); | 860 | ring_buffer_record_enable(buffer); |
769 | } | 861 | } |
@@ -781,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
781 | tr->time_start = ftrace_now(tr->cpu); | 873 | tr->time_start = ftrace_now(tr->cpu); |
782 | 874 | ||
783 | for_each_online_cpu(cpu) | 875 | for_each_online_cpu(cpu) |
784 | __tracing_reset(tr, cpu); | 876 | __tracing_reset(buffer, cpu); |
785 | 877 | ||
786 | ring_buffer_record_enable(buffer); | 878 | ring_buffer_record_enable(buffer); |
787 | } | 879 | } |
@@ -802,7 +894,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 894 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 895 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
804 | static int cmdline_idx; | 896 | static int cmdline_idx; |
805 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 897 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
806 | 898 | ||
807 | /* temporary disable recording */ | 899 | /* temporary disable recording */ |
808 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 900 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
@@ -858,6 +950,8 @@ void tracing_start(void) | |||
858 | goto out; | 950 | goto out; |
859 | } | 951 | } |
860 | 952 | ||
953 | /* Prevent the buffers from switching */ | ||
954 | arch_spin_lock(&ftrace_max_lock); | ||
861 | 955 | ||
862 | buffer = global_trace.buffer; | 956 | buffer = global_trace.buffer; |
863 | if (buffer) | 957 | if (buffer) |
@@ -867,6 +961,8 @@ void tracing_start(void) | |||
867 | if (buffer) | 961 | if (buffer) |
868 | ring_buffer_record_enable(buffer); | 962 | ring_buffer_record_enable(buffer); |
869 | 963 | ||
964 | arch_spin_unlock(&ftrace_max_lock); | ||
965 | |||
870 | ftrace_start(); | 966 | ftrace_start(); |
871 | out: | 967 | out: |
872 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 968 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
@@ -888,6 +984,9 @@ void tracing_stop(void) | |||
888 | if (trace_stop_count++) | 984 | if (trace_stop_count++) |
889 | goto out; | 985 | goto out; |
890 | 986 | ||
987 | /* Prevent the buffers from switching */ | ||
988 | arch_spin_lock(&ftrace_max_lock); | ||
989 | |||
891 | buffer = global_trace.buffer; | 990 | buffer = global_trace.buffer; |
892 | if (buffer) | 991 | if (buffer) |
893 | ring_buffer_record_disable(buffer); | 992 | ring_buffer_record_disable(buffer); |
@@ -896,6 +995,8 @@ void tracing_stop(void) | |||
896 | if (buffer) | 995 | if (buffer) |
897 | ring_buffer_record_disable(buffer); | 996 | ring_buffer_record_disable(buffer); |
898 | 997 | ||
998 | arch_spin_unlock(&ftrace_max_lock); | ||
999 | |||
899 | out: | 1000 | out: |
900 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 1001 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
901 | } | 1002 | } |
@@ -915,7 +1016,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
915 | * nor do we want to disable interrupts, | 1016 | * nor do we want to disable interrupts, |
916 | * so if we miss here, then better luck next time. | 1017 | * so if we miss here, then better luck next time. |
917 | */ | 1018 | */ |
918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 1019 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
919 | return; | 1020 | return; |
920 | 1021 | ||
921 | idx = map_pid_to_cmdline[tsk->pid]; | 1022 | idx = map_pid_to_cmdline[tsk->pid]; |
@@ -940,7 +1041,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
940 | 1041 | ||
941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 1042 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
942 | 1043 | ||
943 | __raw_spin_unlock(&trace_cmdline_lock); | 1044 | arch_spin_unlock(&trace_cmdline_lock); |
944 | } | 1045 | } |
945 | 1046 | ||
946 | void trace_find_cmdline(int pid, char comm[]) | 1047 | void trace_find_cmdline(int pid, char comm[]) |
@@ -952,20 +1053,25 @@ void trace_find_cmdline(int pid, char comm[]) | |||
952 | return; | 1053 | return; |
953 | } | 1054 | } |
954 | 1055 | ||
1056 | if (WARN_ON_ONCE(pid < 0)) { | ||
1057 | strcpy(comm, "<XXX>"); | ||
1058 | return; | ||
1059 | } | ||
1060 | |||
955 | if (pid > PID_MAX_DEFAULT) { | 1061 | if (pid > PID_MAX_DEFAULT) { |
956 | strcpy(comm, "<...>"); | 1062 | strcpy(comm, "<...>"); |
957 | return; | 1063 | return; |
958 | } | 1064 | } |
959 | 1065 | ||
960 | preempt_disable(); | 1066 | preempt_disable(); |
961 | __raw_spin_lock(&trace_cmdline_lock); | 1067 | arch_spin_lock(&trace_cmdline_lock); |
962 | map = map_pid_to_cmdline[pid]; | 1068 | map = map_pid_to_cmdline[pid]; |
963 | if (map != NO_CMDLINE_MAP) | 1069 | if (map != NO_CMDLINE_MAP) |
964 | strcpy(comm, saved_cmdlines[map]); | 1070 | strcpy(comm, saved_cmdlines[map]); |
965 | else | 1071 | else |
966 | strcpy(comm, "<...>"); | 1072 | strcpy(comm, "<...>"); |
967 | 1073 | ||
968 | __raw_spin_unlock(&trace_cmdline_lock); | 1074 | arch_spin_unlock(&trace_cmdline_lock); |
969 | preempt_enable(); | 1075 | preempt_enable(); |
970 | } | 1076 | } |
971 | 1077 | ||
@@ -1085,7 +1191,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1191 | struct ftrace_entry *entry; |
1086 | 1192 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1193 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1194 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
1089 | return; | 1195 | return; |
1090 | 1196 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1197 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -1151,6 +1257,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |||
1151 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1257 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
1152 | } | 1258 | } |
1153 | 1259 | ||
1260 | /** | ||
1261 | * trace_dump_stack - record a stack back trace in the trace buffer | ||
1262 | */ | ||
1263 | void trace_dump_stack(void) | ||
1264 | { | ||
1265 | unsigned long flags; | ||
1266 | |||
1267 | if (tracing_disabled || tracing_selftest_running) | ||
1268 | return; | ||
1269 | |||
1270 | local_save_flags(flags); | ||
1271 | |||
1272 | /* skipping 3 traces, seems to get us at the caller of this function */ | ||
1273 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | ||
1274 | } | ||
1275 | |||
1154 | void | 1276 | void |
1155 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1277 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1156 | { | 1278 | { |
@@ -1162,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1162 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1284 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
1163 | return; | 1285 | return; |
1164 | 1286 | ||
1287 | /* | ||
1288 | * NMIs can not handle page faults, even with fix ups. | ||
1289 | * The save user stack can (and often does) fault. | ||
1290 | */ | ||
1291 | if (unlikely(in_nmi())) | ||
1292 | return; | ||
1293 | |||
1165 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1294 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1166 | sizeof(*entry), flags, pc); | 1295 | sizeof(*entry), flags, pc); |
1167 | if (!event) | 1296 | if (!event) |
@@ -1251,8 +1380,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1251 | */ | 1380 | */ |
1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1381 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1253 | { | 1382 | { |
1254 | static raw_spinlock_t trace_buf_lock = | 1383 | static arch_spinlock_t trace_buf_lock = |
1255 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1384 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1256 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1385 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1257 | 1386 | ||
1258 | struct ftrace_event_call *call = &event_bprint; | 1387 | struct ftrace_event_call *call = &event_bprint; |
@@ -1283,7 +1412,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1283 | 1412 | ||
1284 | /* Lockdep uses trace_printk for lock tracing */ | 1413 | /* Lockdep uses trace_printk for lock tracing */ |
1285 | local_irq_save(flags); | 1414 | local_irq_save(flags); |
1286 | __raw_spin_lock(&trace_buf_lock); | 1415 | arch_spin_lock(&trace_buf_lock); |
1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1416 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1288 | 1417 | ||
1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1418 | if (len > TRACE_BUF_SIZE || len < 0) |
@@ -1300,11 +1429,13 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1300 | entry->fmt = fmt; | 1429 | entry->fmt = fmt; |
1301 | 1430 | ||
1302 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1431 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
1303 | if (!filter_check_discard(call, entry, buffer, event)) | 1432 | if (!filter_check_discard(call, entry, buffer, event)) { |
1304 | ring_buffer_unlock_commit(buffer, event); | 1433 | ring_buffer_unlock_commit(buffer, event); |
1434 | ftrace_trace_stack(buffer, flags, 6, pc); | ||
1435 | } | ||
1305 | 1436 | ||
1306 | out_unlock: | 1437 | out_unlock: |
1307 | __raw_spin_unlock(&trace_buf_lock); | 1438 | arch_spin_unlock(&trace_buf_lock); |
1308 | local_irq_restore(flags); | 1439 | local_irq_restore(flags); |
1309 | 1440 | ||
1310 | out: | 1441 | out: |
@@ -1334,7 +1465,7 @@ int trace_array_printk(struct trace_array *tr, | |||
1334 | int trace_array_vprintk(struct trace_array *tr, | 1465 | int trace_array_vprintk(struct trace_array *tr, |
1335 | unsigned long ip, const char *fmt, va_list args) | 1466 | unsigned long ip, const char *fmt, va_list args) |
1336 | { | 1467 | { |
1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1468 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
1338 | static char trace_buf[TRACE_BUF_SIZE]; | 1469 | static char trace_buf[TRACE_BUF_SIZE]; |
1339 | 1470 | ||
1340 | struct ftrace_event_call *call = &event_print; | 1471 | struct ftrace_event_call *call = &event_print; |
@@ -1360,12 +1491,9 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1360 | 1491 | ||
1361 | pause_graph_tracing(); | 1492 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1493 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1494 | arch_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1495 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1365 | 1496 | ||
1366 | len = min(len, TRACE_BUF_SIZE-1); | ||
1367 | trace_buf[len] = 0; | ||
1368 | |||
1369 | size = sizeof(*entry) + len + 1; | 1497 | size = sizeof(*entry) + len + 1; |
1370 | buffer = tr->buffer; | 1498 | buffer = tr->buffer; |
1371 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1499 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
@@ -1373,15 +1501,17 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1373 | if (!event) | 1501 | if (!event) |
1374 | goto out_unlock; | 1502 | goto out_unlock; |
1375 | entry = ring_buffer_event_data(event); | 1503 | entry = ring_buffer_event_data(event); |
1376 | entry->ip = ip; | 1504 | entry->ip = ip; |
1377 | 1505 | ||
1378 | memcpy(&entry->buf, trace_buf, len); | 1506 | memcpy(&entry->buf, trace_buf, len); |
1379 | entry->buf[len] = 0; | 1507 | entry->buf[len] = '\0'; |
1380 | if (!filter_check_discard(call, entry, buffer, event)) | 1508 | if (!filter_check_discard(call, entry, buffer, event)) { |
1381 | ring_buffer_unlock_commit(buffer, event); | 1509 | ring_buffer_unlock_commit(buffer, event); |
1510 | ftrace_trace_stack(buffer, irq_flags, 6, pc); | ||
1511 | } | ||
1382 | 1512 | ||
1383 | out_unlock: | 1513 | out_unlock: |
1384 | __raw_spin_unlock(&trace_buf_lock); | 1514 | arch_spin_unlock(&trace_buf_lock); |
1385 | raw_local_irq_restore(irq_flags); | 1515 | raw_local_irq_restore(irq_flags); |
1386 | unpause_graph_tracing(); | 1516 | unpause_graph_tracing(); |
1387 | out: | 1517 | out: |
@@ -1515,6 +1645,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1515 | int i = (int)*pos; | 1645 | int i = (int)*pos; |
1516 | void *ent; | 1646 | void *ent; |
1517 | 1647 | ||
1648 | WARN_ON_ONCE(iter->leftover); | ||
1649 | |||
1518 | (*pos)++; | 1650 | (*pos)++; |
1519 | 1651 | ||
1520 | /* can't go backwards */ | 1652 | /* can't go backwards */ |
@@ -1566,12 +1698,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
1566 | } | 1698 | } |
1567 | 1699 | ||
1568 | /* | 1700 | /* |
1569 | * No necessary locking here. The worst thing which can | ||
1570 | * happen is loosing events consumed at the same time | ||
1571 | * by a trace_pipe reader. | ||
1572 | * Other than that, we don't risk to crash the ring buffer | ||
1573 | * because it serializes the readers. | ||
1574 | * | ||
1575 | * The current tracer is copied to avoid a global locking | 1701 | * The current tracer is copied to avoid a global locking |
1576 | * all around. | 1702 | * all around. |
1577 | */ | 1703 | */ |
@@ -1609,21 +1735,34 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1609 | 1735 | ||
1610 | ftrace_enable_cpu(); | 1736 | ftrace_enable_cpu(); |
1611 | 1737 | ||
1738 | iter->leftover = 0; | ||
1612 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1739 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1613 | ; | 1740 | ; |
1614 | 1741 | ||
1615 | } else { | 1742 | } else { |
1616 | l = *pos - 1; | 1743 | /* |
1617 | p = s_next(m, p, &l); | 1744 | * If we overflowed the seq_file before, then we want |
1745 | * to just reuse the trace_seq buffer again. | ||
1746 | */ | ||
1747 | if (iter->leftover) | ||
1748 | p = iter; | ||
1749 | else { | ||
1750 | l = *pos - 1; | ||
1751 | p = s_next(m, p, &l); | ||
1752 | } | ||
1618 | } | 1753 | } |
1619 | 1754 | ||
1620 | trace_event_read_lock(); | 1755 | trace_event_read_lock(); |
1756 | trace_access_lock(cpu_file); | ||
1621 | return p; | 1757 | return p; |
1622 | } | 1758 | } |
1623 | 1759 | ||
1624 | static void s_stop(struct seq_file *m, void *p) | 1760 | static void s_stop(struct seq_file *m, void *p) |
1625 | { | 1761 | { |
1762 | struct trace_iterator *iter = m->private; | ||
1763 | |||
1626 | atomic_dec(&trace_record_cmdline_disabled); | 1764 | atomic_dec(&trace_record_cmdline_disabled); |
1765 | trace_access_unlock(iter->cpu_file); | ||
1627 | trace_event_read_unlock(); | 1766 | trace_event_read_unlock(); |
1628 | } | 1767 | } |
1629 | 1768 | ||
@@ -1922,6 +2061,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1922 | static int s_show(struct seq_file *m, void *v) | 2061 | static int s_show(struct seq_file *m, void *v) |
1923 | { | 2062 | { |
1924 | struct trace_iterator *iter = v; | 2063 | struct trace_iterator *iter = v; |
2064 | int ret; | ||
1925 | 2065 | ||
1926 | if (iter->ent == NULL) { | 2066 | if (iter->ent == NULL) { |
1927 | if (iter->tr) { | 2067 | if (iter->tr) { |
@@ -1941,9 +2081,27 @@ static int s_show(struct seq_file *m, void *v) | |||
1941 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 2081 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
1942 | print_func_help_header(m); | 2082 | print_func_help_header(m); |
1943 | } | 2083 | } |
2084 | } else if (iter->leftover) { | ||
2085 | /* | ||
2086 | * If we filled the seq_file buffer earlier, we | ||
2087 | * want to just show it now. | ||
2088 | */ | ||
2089 | ret = trace_print_seq(m, &iter->seq); | ||
2090 | |||
2091 | /* ret should this time be zero, but you never know */ | ||
2092 | iter->leftover = ret; | ||
2093 | |||
1944 | } else { | 2094 | } else { |
1945 | print_trace_line(iter); | 2095 | print_trace_line(iter); |
1946 | trace_print_seq(m, &iter->seq); | 2096 | ret = trace_print_seq(m, &iter->seq); |
2097 | /* | ||
2098 | * If we overflow the seq_file buffer, then it will | ||
2099 | * ask us for this data again at start up. | ||
2100 | * Use that instead. | ||
2101 | * ret is 0 if seq_file write succeeded. | ||
2102 | * -1 otherwise. | ||
2103 | */ | ||
2104 | iter->leftover = ret; | ||
1947 | } | 2105 | } |
1948 | 2106 | ||
1949 | return 0; | 2107 | return 0; |
@@ -2253,7 +2411,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2253 | mutex_lock(&tracing_cpumask_update_lock); | 2411 | mutex_lock(&tracing_cpumask_update_lock); |
2254 | 2412 | ||
2255 | local_irq_disable(); | 2413 | local_irq_disable(); |
2256 | __raw_spin_lock(&ftrace_max_lock); | 2414 | arch_spin_lock(&ftrace_max_lock); |
2257 | for_each_tracing_cpu(cpu) { | 2415 | for_each_tracing_cpu(cpu) { |
2258 | /* | 2416 | /* |
2259 | * Increase/decrease the disabled counter if we are | 2417 | * Increase/decrease the disabled counter if we are |
@@ -2268,7 +2426,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2268 | atomic_dec(&global_trace.data[cpu]->disabled); | 2426 | atomic_dec(&global_trace.data[cpu]->disabled); |
2269 | } | 2427 | } |
2270 | } | 2428 | } |
2271 | __raw_spin_unlock(&ftrace_max_lock); | 2429 | arch_spin_unlock(&ftrace_max_lock); |
2272 | local_irq_enable(); | 2430 | local_irq_enable(); |
2273 | 2431 | ||
2274 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2432 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
@@ -2290,67 +2448,49 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2290 | .write = tracing_cpumask_write, | 2448 | .write = tracing_cpumask_write, |
2291 | }; | 2449 | }; |
2292 | 2450 | ||
2293 | static ssize_t | 2451 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
2294 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | ||
2295 | size_t cnt, loff_t *ppos) | ||
2296 | { | 2452 | { |
2297 | struct tracer_opt *trace_opts; | 2453 | struct tracer_opt *trace_opts; |
2298 | u32 tracer_flags; | 2454 | u32 tracer_flags; |
2299 | int len = 0; | ||
2300 | char *buf; | ||
2301 | int r = 0; | ||
2302 | int i; | 2455 | int i; |
2303 | 2456 | ||
2304 | |||
2305 | /* calculate max size */ | ||
2306 | for (i = 0; trace_options[i]; i++) { | ||
2307 | len += strlen(trace_options[i]); | ||
2308 | len += 3; /* "no" and newline */ | ||
2309 | } | ||
2310 | |||
2311 | mutex_lock(&trace_types_lock); | 2457 | mutex_lock(&trace_types_lock); |
2312 | tracer_flags = current_trace->flags->val; | 2458 | tracer_flags = current_trace->flags->val; |
2313 | trace_opts = current_trace->flags->opts; | 2459 | trace_opts = current_trace->flags->opts; |
2314 | 2460 | ||
2315 | /* | ||
2316 | * Increase the size with names of options specific | ||
2317 | * of the current tracer. | ||
2318 | */ | ||
2319 | for (i = 0; trace_opts[i].name; i++) { | ||
2320 | len += strlen(trace_opts[i].name); | ||
2321 | len += 3; /* "no" and newline */ | ||
2322 | } | ||
2323 | |||
2324 | /* +1 for \0 */ | ||
2325 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
2326 | if (!buf) { | ||
2327 | mutex_unlock(&trace_types_lock); | ||
2328 | return -ENOMEM; | ||
2329 | } | ||
2330 | |||
2331 | for (i = 0; trace_options[i]; i++) { | 2461 | for (i = 0; trace_options[i]; i++) { |
2332 | if (trace_flags & (1 << i)) | 2462 | if (trace_flags & (1 << i)) |
2333 | r += sprintf(buf + r, "%s\n", trace_options[i]); | 2463 | seq_printf(m, "%s\n", trace_options[i]); |
2334 | else | 2464 | else |
2335 | r += sprintf(buf + r, "no%s\n", trace_options[i]); | 2465 | seq_printf(m, "no%s\n", trace_options[i]); |
2336 | } | 2466 | } |
2337 | 2467 | ||
2338 | for (i = 0; trace_opts[i].name; i++) { | 2468 | for (i = 0; trace_opts[i].name; i++) { |
2339 | if (tracer_flags & trace_opts[i].bit) | 2469 | if (tracer_flags & trace_opts[i].bit) |
2340 | r += sprintf(buf + r, "%s\n", | 2470 | seq_printf(m, "%s\n", trace_opts[i].name); |
2341 | trace_opts[i].name); | ||
2342 | else | 2471 | else |
2343 | r += sprintf(buf + r, "no%s\n", | 2472 | seq_printf(m, "no%s\n", trace_opts[i].name); |
2344 | trace_opts[i].name); | ||
2345 | } | 2473 | } |
2346 | mutex_unlock(&trace_types_lock); | 2474 | mutex_unlock(&trace_types_lock); |
2347 | 2475 | ||
2348 | WARN_ON(r >= len + 1); | 2476 | return 0; |
2477 | } | ||
2349 | 2478 | ||
2350 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2479 | static int __set_tracer_option(struct tracer *trace, |
2480 | struct tracer_flags *tracer_flags, | ||
2481 | struct tracer_opt *opts, int neg) | ||
2482 | { | ||
2483 | int ret; | ||
2351 | 2484 | ||
2352 | kfree(buf); | 2485 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); |
2353 | return r; | 2486 | if (ret) |
2487 | return ret; | ||
2488 | |||
2489 | if (neg) | ||
2490 | tracer_flags->val &= ~opts->bit; | ||
2491 | else | ||
2492 | tracer_flags->val |= opts->bit; | ||
2493 | return 0; | ||
2354 | } | 2494 | } |
2355 | 2495 | ||
2356 | /* Try to assign a tracer specific option */ | 2496 | /* Try to assign a tracer specific option */ |
@@ -2358,33 +2498,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
2358 | { | 2498 | { |
2359 | struct tracer_flags *tracer_flags = trace->flags; | 2499 | struct tracer_flags *tracer_flags = trace->flags; |
2360 | struct tracer_opt *opts = NULL; | 2500 | struct tracer_opt *opts = NULL; |
2361 | int ret = 0, i = 0; | 2501 | int i; |
2362 | int len; | ||
2363 | 2502 | ||
2364 | for (i = 0; tracer_flags->opts[i].name; i++) { | 2503 | for (i = 0; tracer_flags->opts[i].name; i++) { |
2365 | opts = &tracer_flags->opts[i]; | 2504 | opts = &tracer_flags->opts[i]; |
2366 | len = strlen(opts->name); | ||
2367 | 2505 | ||
2368 | if (strncmp(cmp, opts->name, len) == 0) { | 2506 | if (strcmp(cmp, opts->name) == 0) |
2369 | ret = trace->set_flag(tracer_flags->val, | 2507 | return __set_tracer_option(trace, trace->flags, |
2370 | opts->bit, !neg); | 2508 | opts, neg); |
2371 | break; | ||
2372 | } | ||
2373 | } | 2509 | } |
2374 | /* Not found */ | ||
2375 | if (!tracer_flags->opts[i].name) | ||
2376 | return -EINVAL; | ||
2377 | |||
2378 | /* Refused to handle */ | ||
2379 | if (ret) | ||
2380 | return ret; | ||
2381 | |||
2382 | if (neg) | ||
2383 | tracer_flags->val &= ~opts->bit; | ||
2384 | else | ||
2385 | tracer_flags->val |= opts->bit; | ||
2386 | 2510 | ||
2387 | return 0; | 2511 | return -EINVAL; |
2388 | } | 2512 | } |
2389 | 2513 | ||
2390 | static void set_tracer_flags(unsigned int mask, int enabled) | 2514 | static void set_tracer_flags(unsigned int mask, int enabled) |
@@ -2404,7 +2528,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2404 | size_t cnt, loff_t *ppos) | 2528 | size_t cnt, loff_t *ppos) |
2405 | { | 2529 | { |
2406 | char buf[64]; | 2530 | char buf[64]; |
2407 | char *cmp = buf; | 2531 | char *cmp; |
2408 | int neg = 0; | 2532 | int neg = 0; |
2409 | int ret; | 2533 | int ret; |
2410 | int i; | 2534 | int i; |
@@ -2416,16 +2540,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2416 | return -EFAULT; | 2540 | return -EFAULT; |
2417 | 2541 | ||
2418 | buf[cnt] = 0; | 2542 | buf[cnt] = 0; |
2543 | cmp = strstrip(buf); | ||
2419 | 2544 | ||
2420 | if (strncmp(buf, "no", 2) == 0) { | 2545 | if (strncmp(cmp, "no", 2) == 0) { |
2421 | neg = 1; | 2546 | neg = 1; |
2422 | cmp += 2; | 2547 | cmp += 2; |
2423 | } | 2548 | } |
2424 | 2549 | ||
2425 | for (i = 0; trace_options[i]; i++) { | 2550 | for (i = 0; trace_options[i]; i++) { |
2426 | int len = strlen(trace_options[i]); | 2551 | if (strcmp(cmp, trace_options[i]) == 0) { |
2427 | |||
2428 | if (strncmp(cmp, trace_options[i], len) == 0) { | ||
2429 | set_tracer_flags(1 << i, !neg); | 2552 | set_tracer_flags(1 << i, !neg); |
2430 | break; | 2553 | break; |
2431 | } | 2554 | } |
@@ -2445,9 +2568,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2445 | return cnt; | 2568 | return cnt; |
2446 | } | 2569 | } |
2447 | 2570 | ||
2571 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | ||
2572 | { | ||
2573 | if (tracing_disabled) | ||
2574 | return -ENODEV; | ||
2575 | return single_open(file, tracing_trace_options_show, NULL); | ||
2576 | } | ||
2577 | |||
2448 | static const struct file_operations tracing_iter_fops = { | 2578 | static const struct file_operations tracing_iter_fops = { |
2449 | .open = tracing_open_generic, | 2579 | .open = tracing_trace_options_open, |
2450 | .read = tracing_trace_options_read, | 2580 | .read = seq_read, |
2581 | .llseek = seq_lseek, | ||
2582 | .release = single_release, | ||
2451 | .write = tracing_trace_options_write, | 2583 | .write = tracing_trace_options_write, |
2452 | }; | 2584 | }; |
2453 | 2585 | ||
@@ -2821,22 +2953,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2821 | 2953 | ||
2822 | mutex_lock(&trace_types_lock); | 2954 | mutex_lock(&trace_types_lock); |
2823 | 2955 | ||
2824 | /* We only allow one reader per cpu */ | ||
2825 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | ||
2826 | if (!cpumask_empty(tracing_reader_cpumask)) { | ||
2827 | ret = -EBUSY; | ||
2828 | goto out; | ||
2829 | } | ||
2830 | cpumask_setall(tracing_reader_cpumask); | ||
2831 | } else { | ||
2832 | if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) | ||
2833 | cpumask_set_cpu(cpu_file, tracing_reader_cpumask); | ||
2834 | else { | ||
2835 | ret = -EBUSY; | ||
2836 | goto out; | ||
2837 | } | ||
2838 | } | ||
2839 | |||
2840 | /* create a buffer to store the information to pass to userspace */ | 2956 | /* create a buffer to store the information to pass to userspace */ |
2841 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2957 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
2842 | if (!iter) { | 2958 | if (!iter) { |
@@ -2892,10 +3008,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
2892 | 3008 | ||
2893 | mutex_lock(&trace_types_lock); | 3009 | mutex_lock(&trace_types_lock); |
2894 | 3010 | ||
2895 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) | 3011 | if (iter->trace->pipe_close) |
2896 | cpumask_clear(tracing_reader_cpumask); | 3012 | iter->trace->pipe_close(iter); |
2897 | else | ||
2898 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | ||
2899 | 3013 | ||
2900 | mutex_unlock(&trace_types_lock); | 3014 | mutex_unlock(&trace_types_lock); |
2901 | 3015 | ||
@@ -3055,6 +3169,7 @@ waitagain: | |||
3055 | iter->pos = -1; | 3169 | iter->pos = -1; |
3056 | 3170 | ||
3057 | trace_event_read_lock(); | 3171 | trace_event_read_lock(); |
3172 | trace_access_lock(iter->cpu_file); | ||
3058 | while (find_next_entry_inc(iter) != NULL) { | 3173 | while (find_next_entry_inc(iter) != NULL) { |
3059 | enum print_line_t ret; | 3174 | enum print_line_t ret; |
3060 | int len = iter->seq.len; | 3175 | int len = iter->seq.len; |
@@ -3071,6 +3186,7 @@ waitagain: | |||
3071 | if (iter->seq.len >= cnt) | 3186 | if (iter->seq.len >= cnt) |
3072 | break; | 3187 | break; |
3073 | } | 3188 | } |
3189 | trace_access_unlock(iter->cpu_file); | ||
3074 | trace_event_read_unlock(); | 3190 | trace_event_read_unlock(); |
3075 | 3191 | ||
3076 | /* Now copy what we have to the user */ | 3192 | /* Now copy what we have to the user */ |
@@ -3103,7 +3219,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
3103 | __free_page(spd->pages[idx]); | 3219 | __free_page(spd->pages[idx]); |
3104 | } | 3220 | } |
3105 | 3221 | ||
3106 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3222 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
3107 | .can_merge = 0, | 3223 | .can_merge = 0, |
3108 | .map = generic_pipe_buf_map, | 3224 | .map = generic_pipe_buf_map, |
3109 | .unmap = generic_pipe_buf_unmap, | 3225 | .unmap = generic_pipe_buf_unmap, |
@@ -3196,6 +3312,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3196 | } | 3312 | } |
3197 | 3313 | ||
3198 | trace_event_read_lock(); | 3314 | trace_event_read_lock(); |
3315 | trace_access_lock(iter->cpu_file); | ||
3199 | 3316 | ||
3200 | /* Fill as many pages as possible. */ | 3317 | /* Fill as many pages as possible. */ |
3201 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3318 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { |
@@ -3219,6 +3336,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3219 | trace_seq_init(&iter->seq); | 3336 | trace_seq_init(&iter->seq); |
3220 | } | 3337 | } |
3221 | 3338 | ||
3339 | trace_access_unlock(iter->cpu_file); | ||
3222 | trace_event_read_unlock(); | 3340 | trace_event_read_unlock(); |
3223 | mutex_unlock(&iter->mutex); | 3341 | mutex_unlock(&iter->mutex); |
3224 | 3342 | ||
@@ -3334,7 +3452,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3334 | size_t cnt, loff_t *fpos) | 3452 | size_t cnt, loff_t *fpos) |
3335 | { | 3453 | { |
3336 | char *buf; | 3454 | char *buf; |
3337 | char *end; | ||
3338 | 3455 | ||
3339 | if (tracing_disabled) | 3456 | if (tracing_disabled) |
3340 | return -EINVAL; | 3457 | return -EINVAL; |
@@ -3342,7 +3459,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3342 | if (cnt > TRACE_BUF_SIZE) | 3459 | if (cnt > TRACE_BUF_SIZE) |
3343 | cnt = TRACE_BUF_SIZE; | 3460 | cnt = TRACE_BUF_SIZE; |
3344 | 3461 | ||
3345 | buf = kmalloc(cnt + 1, GFP_KERNEL); | 3462 | buf = kmalloc(cnt + 2, GFP_KERNEL); |
3346 | if (buf == NULL) | 3463 | if (buf == NULL) |
3347 | return -ENOMEM; | 3464 | return -ENOMEM; |
3348 | 3465 | ||
@@ -3350,35 +3467,31 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3350 | kfree(buf); | 3467 | kfree(buf); |
3351 | return -EFAULT; | 3468 | return -EFAULT; |
3352 | } | 3469 | } |
3470 | if (buf[cnt-1] != '\n') { | ||
3471 | buf[cnt] = '\n'; | ||
3472 | buf[cnt+1] = '\0'; | ||
3473 | } else | ||
3474 | buf[cnt] = '\0'; | ||
3353 | 3475 | ||
3354 | /* Cut from the first nil or newline. */ | 3476 | cnt = mark_printk("%s", buf); |
3355 | buf[cnt] = '\0'; | ||
3356 | end = strchr(buf, '\n'); | ||
3357 | if (end) | ||
3358 | *end = '\0'; | ||
3359 | |||
3360 | cnt = mark_printk("%s\n", buf); | ||
3361 | kfree(buf); | 3477 | kfree(buf); |
3362 | *fpos += cnt; | 3478 | *fpos += cnt; |
3363 | 3479 | ||
3364 | return cnt; | 3480 | return cnt; |
3365 | } | 3481 | } |
3366 | 3482 | ||
3367 | static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, | 3483 | static int tracing_clock_show(struct seq_file *m, void *v) |
3368 | size_t cnt, loff_t *ppos) | ||
3369 | { | 3484 | { |
3370 | char buf[64]; | ||
3371 | int bufiter = 0; | ||
3372 | int i; | 3485 | int i; |
3373 | 3486 | ||
3374 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 3487 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) |
3375 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, | 3488 | seq_printf(m, |
3376 | "%s%s%s%s", i ? " " : "", | 3489 | "%s%s%s%s", i ? " " : "", |
3377 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 3490 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, |
3378 | i == trace_clock_id ? "]" : ""); | 3491 | i == trace_clock_id ? "]" : ""); |
3379 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); | 3492 | seq_putc(m, '\n'); |
3380 | 3493 | ||
3381 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); | 3494 | return 0; |
3382 | } | 3495 | } |
3383 | 3496 | ||
3384 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 3497 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, |
@@ -3420,6 +3533,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
3420 | return cnt; | 3533 | return cnt; |
3421 | } | 3534 | } |
3422 | 3535 | ||
3536 | static int tracing_clock_open(struct inode *inode, struct file *file) | ||
3537 | { | ||
3538 | if (tracing_disabled) | ||
3539 | return -ENODEV; | ||
3540 | return single_open(file, tracing_clock_show, NULL); | ||
3541 | } | ||
3542 | |||
3423 | static const struct file_operations tracing_max_lat_fops = { | 3543 | static const struct file_operations tracing_max_lat_fops = { |
3424 | .open = tracing_open_generic, | 3544 | .open = tracing_open_generic, |
3425 | .read = tracing_max_lat_read, | 3545 | .read = tracing_max_lat_read, |
@@ -3458,8 +3578,10 @@ static const struct file_operations tracing_mark_fops = { | |||
3458 | }; | 3578 | }; |
3459 | 3579 | ||
3460 | static const struct file_operations trace_clock_fops = { | 3580 | static const struct file_operations trace_clock_fops = { |
3461 | .open = tracing_open_generic, | 3581 | .open = tracing_clock_open, |
3462 | .read = tracing_clock_read, | 3582 | .read = seq_read, |
3583 | .llseek = seq_lseek, | ||
3584 | .release = single_release, | ||
3463 | .write = tracing_clock_write, | 3585 | .write = tracing_clock_write, |
3464 | }; | 3586 | }; |
3465 | 3587 | ||
@@ -3516,10 +3638,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
3516 | 3638 | ||
3517 | info->read = 0; | 3639 | info->read = 0; |
3518 | 3640 | ||
3641 | trace_access_lock(info->cpu); | ||
3519 | ret = ring_buffer_read_page(info->tr->buffer, | 3642 | ret = ring_buffer_read_page(info->tr->buffer, |
3520 | &info->spare, | 3643 | &info->spare, |
3521 | count, | 3644 | count, |
3522 | info->cpu, 0); | 3645 | info->cpu, 0); |
3646 | trace_access_unlock(info->cpu); | ||
3523 | if (ret < 0) | 3647 | if (ret < 0) |
3524 | return 0; | 3648 | return 0; |
3525 | 3649 | ||
@@ -3589,7 +3713,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
3589 | } | 3713 | } |
3590 | 3714 | ||
3591 | /* Pipe buffer operations for a buffer. */ | 3715 | /* Pipe buffer operations for a buffer. */ |
3592 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3716 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
3593 | .can_merge = 0, | 3717 | .can_merge = 0, |
3594 | .map = generic_pipe_buf_map, | 3718 | .map = generic_pipe_buf_map, |
3595 | .unmap = generic_pipe_buf_unmap, | 3719 | .unmap = generic_pipe_buf_unmap, |
@@ -3647,6 +3771,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3647 | len &= PAGE_MASK; | 3771 | len &= PAGE_MASK; |
3648 | } | 3772 | } |
3649 | 3773 | ||
3774 | trace_access_lock(info->cpu); | ||
3650 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3775 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
3651 | 3776 | ||
3652 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { | 3777 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { |
@@ -3694,6 +3819,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3694 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3819 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
3695 | } | 3820 | } |
3696 | 3821 | ||
3822 | trace_access_unlock(info->cpu); | ||
3697 | spd.nr_pages = i; | 3823 | spd.nr_pages = i; |
3698 | 3824 | ||
3699 | /* did we read anything? */ | 3825 | /* did we read anything? */ |
@@ -3730,7 +3856,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3730 | 3856 | ||
3731 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3857 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
3732 | if (!s) | 3858 | if (!s) |
3733 | return ENOMEM; | 3859 | return -ENOMEM; |
3734 | 3860 | ||
3735 | trace_seq_init(s); | 3861 | trace_seq_init(s); |
3736 | 3862 | ||
@@ -3920,39 +4046,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
3920 | if (ret < 0) | 4046 | if (ret < 0) |
3921 | return ret; | 4047 | return ret; |
3922 | 4048 | ||
3923 | ret = 0; | 4049 | if (val != 0 && val != 1) |
3924 | switch (val) { | 4050 | return -EINVAL; |
3925 | case 0: | ||
3926 | /* do nothing if already cleared */ | ||
3927 | if (!(topt->flags->val & topt->opt->bit)) | ||
3928 | break; | ||
3929 | |||
3930 | mutex_lock(&trace_types_lock); | ||
3931 | if (current_trace->set_flag) | ||
3932 | ret = current_trace->set_flag(topt->flags->val, | ||
3933 | topt->opt->bit, 0); | ||
3934 | mutex_unlock(&trace_types_lock); | ||
3935 | if (ret) | ||
3936 | return ret; | ||
3937 | topt->flags->val &= ~topt->opt->bit; | ||
3938 | break; | ||
3939 | case 1: | ||
3940 | /* do nothing if already set */ | ||
3941 | if (topt->flags->val & topt->opt->bit) | ||
3942 | break; | ||
3943 | 4051 | ||
4052 | if (!!(topt->flags->val & topt->opt->bit) != val) { | ||
3944 | mutex_lock(&trace_types_lock); | 4053 | mutex_lock(&trace_types_lock); |
3945 | if (current_trace->set_flag) | 4054 | ret = __set_tracer_option(current_trace, topt->flags, |
3946 | ret = current_trace->set_flag(topt->flags->val, | 4055 | topt->opt, !val); |
3947 | topt->opt->bit, 1); | ||
3948 | mutex_unlock(&trace_types_lock); | 4056 | mutex_unlock(&trace_types_lock); |
3949 | if (ret) | 4057 | if (ret) |
3950 | return ret; | 4058 | return ret; |
3951 | topt->flags->val |= topt->opt->bit; | ||
3952 | break; | ||
3953 | |||
3954 | default: | ||
3955 | return -EINVAL; | ||
3956 | } | 4059 | } |
3957 | 4060 | ||
3958 | *ppos += cnt; | 4061 | *ppos += cnt; |
@@ -4153,6 +4256,8 @@ static __init int tracer_init_debugfs(void) | |||
4153 | struct dentry *d_tracer; | 4256 | struct dentry *d_tracer; |
4154 | int cpu; | 4257 | int cpu; |
4155 | 4258 | ||
4259 | trace_access_lock_init(); | ||
4260 | |||
4156 | d_tracer = tracing_init_dentry(); | 4261 | d_tracer = tracing_init_dentry(); |
4157 | 4262 | ||
4158 | trace_create_file("tracing_enabled", 0644, d_tracer, | 4263 | trace_create_file("tracing_enabled", 0644, d_tracer, |
@@ -4176,10 +4281,10 @@ static __init int tracer_init_debugfs(void) | |||
4176 | #ifdef CONFIG_TRACER_MAX_TRACE | 4281 | #ifdef CONFIG_TRACER_MAX_TRACE |
4177 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 4282 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
4178 | &tracing_max_latency, &tracing_max_lat_fops); | 4283 | &tracing_max_latency, &tracing_max_lat_fops); |
4284 | #endif | ||
4179 | 4285 | ||
4180 | trace_create_file("tracing_thresh", 0644, d_tracer, | 4286 | trace_create_file("tracing_thresh", 0644, d_tracer, |
4181 | &tracing_thresh, &tracing_max_lat_fops); | 4287 | &tracing_thresh, &tracing_max_lat_fops); |
4182 | #endif | ||
4183 | 4288 | ||
4184 | trace_create_file("README", 0444, d_tracer, | 4289 | trace_create_file("README", 0444, d_tracer, |
4185 | NULL, &tracing_readme_fops); | 4290 | NULL, &tracing_readme_fops); |
@@ -4279,8 +4384,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4279 | 4384 | ||
4280 | static void __ftrace_dump(bool disable_tracing) | 4385 | static void __ftrace_dump(bool disable_tracing) |
4281 | { | 4386 | { |
4282 | static raw_spinlock_t ftrace_dump_lock = | 4387 | static arch_spinlock_t ftrace_dump_lock = |
4283 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4388 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
4284 | /* use static because iter can be a bit big for the stack */ | 4389 | /* use static because iter can be a bit big for the stack */ |
4285 | static struct trace_iterator iter; | 4390 | static struct trace_iterator iter; |
4286 | unsigned int old_userobj; | 4391 | unsigned int old_userobj; |
@@ -4290,7 +4395,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4290 | 4395 | ||
4291 | /* only one dump */ | 4396 | /* only one dump */ |
4292 | local_irq_save(flags); | 4397 | local_irq_save(flags); |
4293 | __raw_spin_lock(&ftrace_dump_lock); | 4398 | arch_spin_lock(&ftrace_dump_lock); |
4294 | if (dump_ran) | 4399 | if (dump_ran) |
4295 | goto out; | 4400 | goto out; |
4296 | 4401 | ||
@@ -4365,7 +4470,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4365 | } | 4470 | } |
4366 | 4471 | ||
4367 | out: | 4472 | out: |
4368 | __raw_spin_unlock(&ftrace_dump_lock); | 4473 | arch_spin_unlock(&ftrace_dump_lock); |
4369 | local_irq_restore(flags); | 4474 | local_irq_restore(flags); |
4370 | } | 4475 | } |
4371 | 4476 | ||
@@ -4387,9 +4492,6 @@ __init static int tracer_alloc_buffers(void) | |||
4387 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4492 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4388 | goto out_free_buffer_mask; | 4493 | goto out_free_buffer_mask; |
4389 | 4494 | ||
4390 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | ||
4391 | goto out_free_tracing_cpumask; | ||
4392 | |||
4393 | /* To save memory, keep the ring buffer size to its minimum */ | 4495 | /* To save memory, keep the ring buffer size to its minimum */ |
4394 | if (ring_buffer_expanded) | 4496 | if (ring_buffer_expanded) |
4395 | ring_buf_size = trace_buf_size; | 4497 | ring_buf_size = trace_buf_size; |
@@ -4426,7 +4528,7 @@ __init static int tracer_alloc_buffers(void) | |||
4426 | /* Allocate the first page for all buffers */ | 4528 | /* Allocate the first page for all buffers */ |
4427 | for_each_tracing_cpu(i) { | 4529 | for_each_tracing_cpu(i) { |
4428 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4530 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4429 | max_tr.data[i] = &per_cpu(max_data, i); | 4531 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4430 | } | 4532 | } |
4431 | 4533 | ||
4432 | trace_init_cmdlines(); | 4534 | trace_init_cmdlines(); |
@@ -4447,8 +4549,6 @@ __init static int tracer_alloc_buffers(void) | |||
4447 | return 0; | 4549 | return 0; |
4448 | 4550 | ||
4449 | out_free_cpumask: | 4551 | out_free_cpumask: |
4450 | free_cpumask_var(tracing_reader_cpumask); | ||
4451 | out_free_tracing_cpumask: | ||
4452 | free_cpumask_var(tracing_cpumask); | 4552 | free_cpumask_var(tracing_cpumask); |
4453 | out_free_buffer_mask: | 4553 | out_free_buffer_mask: |
4454 | free_cpumask_var(tracing_buffer_mask); | 4554 | free_cpumask_var(tracing_buffer_mask); |