diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 637 |
1 files changed, 400 insertions, 237 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0df1b0f2cb9e..dc53ecb80589 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
22 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
23 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
@@ -32,10 +31,11 @@ | |||
32 | #include <linux/splice.h> | 31 | #include <linux/splice.h> |
33 | #include <linux/kdebug.h> | 32 | #include <linux/kdebug.h> |
34 | #include <linux/string.h> | 33 | #include <linux/string.h> |
34 | #include <linux/rwsem.h> | ||
35 | #include <linux/slab.h> | ||
35 | #include <linux/ctype.h> | 36 | #include <linux/ctype.h> |
36 | #include <linux/init.h> | 37 | #include <linux/init.h> |
37 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
38 | #include <linux/gfp.h> | ||
39 | #include <linux/fs.h> | 39 | #include <linux/fs.h> |
40 | 40 | ||
41 | #include "trace.h" | 41 | #include "trace.h" |
@@ -91,22 +91,16 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled); | |||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(ftrace_cpu_disabled); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(ftrace_cpu_disabled); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
103 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 103 | cpumask_var_t __read_mostly tracing_buffer_mask; |
104 | |||
105 | /* Define which cpu buffers are currently read in trace_pipe */ | ||
106 | static cpumask_var_t tracing_reader_cpumask; | ||
107 | |||
108 | #define for_each_tracing_cpu(cpu) \ | ||
109 | for_each_cpu(cpu, tracing_buffer_mask) | ||
110 | 104 | ||
111 | /* | 105 | /* |
112 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 106 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
@@ -119,9 +113,12 @@ static cpumask_var_t tracing_reader_cpumask; | |||
119 | * | 113 | * |
120 | * It is default off, but you can enable it with either specifying | 114 | * It is default off, but you can enable it with either specifying |
121 | * "ftrace_dump_on_oops" in the kernel command line, or setting | 115 | * "ftrace_dump_on_oops" in the kernel command line, or setting |
122 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | 116 | * /proc/sys/kernel/ftrace_dump_on_oops |
117 | * Set 1 if you want to dump buffers of all CPUs | ||
118 | * Set 2 if you want to dump the buffer of the CPU that triggered oops | ||
123 | */ | 119 | */ |
124 | int ftrace_dump_on_oops; | 120 | |
121 | enum ftrace_dump_mode ftrace_dump_on_oops; | ||
125 | 122 | ||
126 | static int tracing_set_tracer(const char *buf); | 123 | static int tracing_set_tracer(const char *buf); |
127 | 124 | ||
@@ -141,8 +138,17 @@ __setup("ftrace=", set_cmdline_ftrace); | |||
141 | 138 | ||
142 | static int __init set_ftrace_dump_on_oops(char *str) | 139 | static int __init set_ftrace_dump_on_oops(char *str) |
143 | { | 140 | { |
144 | ftrace_dump_on_oops = 1; | 141 | if (*str++ != '=' || !*str) { |
145 | return 1; | 142 | ftrace_dump_on_oops = DUMP_ALL; |
143 | return 1; | ||
144 | } | ||
145 | |||
146 | if (!strcmp("orig_cpu", str)) { | ||
147 | ftrace_dump_on_oops = DUMP_ORIG; | ||
148 | return 1; | ||
149 | } | ||
150 | |||
151 | return 0; | ||
146 | } | 152 | } |
147 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 153 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
148 | 154 | ||
@@ -243,19 +249,98 @@ static struct tracer *current_trace __read_mostly; | |||
243 | 249 | ||
244 | /* | 250 | /* |
245 | * trace_types_lock is used to protect the trace_types list. | 251 | * trace_types_lock is used to protect the trace_types list. |
246 | * This lock is also used to keep user access serialized. | ||
247 | * Accesses from userspace will grab this lock while userspace | ||
248 | * activities happen inside the kernel. | ||
249 | */ | 252 | */ |
250 | static DEFINE_MUTEX(trace_types_lock); | 253 | static DEFINE_MUTEX(trace_types_lock); |
251 | 254 | ||
255 | /* | ||
256 | * serialize the access of the ring buffer | ||
257 | * | ||
258 | * ring buffer serializes readers, but it is low level protection. | ||
259 | * The validity of the events (which returns by ring_buffer_peek() ..etc) | ||
260 | * are not protected by ring buffer. | ||
261 | * | ||
262 | * The content of events may become garbage if we allow other process consumes | ||
263 | * these events concurrently: | ||
264 | * A) the page of the consumed events may become a normal page | ||
265 | * (not reader page) in ring buffer, and this page will be rewrited | ||
266 | * by events producer. | ||
267 | * B) The page of the consumed events may become a page for splice_read, | ||
268 | * and this page will be returned to system. | ||
269 | * | ||
270 | * These primitives allow multi process access to different cpu ring buffer | ||
271 | * concurrently. | ||
272 | * | ||
273 | * These primitives don't distinguish read-only and read-consume access. | ||
274 | * Multi read-only access are also serialized. | ||
275 | */ | ||
276 | |||
277 | #ifdef CONFIG_SMP | ||
278 | static DECLARE_RWSEM(all_cpu_access_lock); | ||
279 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | ||
280 | |||
281 | static inline void trace_access_lock(int cpu) | ||
282 | { | ||
283 | if (cpu == TRACE_PIPE_ALL_CPU) { | ||
284 | /* gain it for accessing the whole ring buffer. */ | ||
285 | down_write(&all_cpu_access_lock); | ||
286 | } else { | ||
287 | /* gain it for accessing a cpu ring buffer. */ | ||
288 | |||
289 | /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ | ||
290 | down_read(&all_cpu_access_lock); | ||
291 | |||
292 | /* Secondly block other access to this @cpu ring buffer. */ | ||
293 | mutex_lock(&per_cpu(cpu_access_lock, cpu)); | ||
294 | } | ||
295 | } | ||
296 | |||
297 | static inline void trace_access_unlock(int cpu) | ||
298 | { | ||
299 | if (cpu == TRACE_PIPE_ALL_CPU) { | ||
300 | up_write(&all_cpu_access_lock); | ||
301 | } else { | ||
302 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | ||
303 | up_read(&all_cpu_access_lock); | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static inline void trace_access_lock_init(void) | ||
308 | { | ||
309 | int cpu; | ||
310 | |||
311 | for_each_possible_cpu(cpu) | ||
312 | mutex_init(&per_cpu(cpu_access_lock, cpu)); | ||
313 | } | ||
314 | |||
315 | #else | ||
316 | |||
317 | static DEFINE_MUTEX(access_lock); | ||
318 | |||
319 | static inline void trace_access_lock(int cpu) | ||
320 | { | ||
321 | (void)cpu; | ||
322 | mutex_lock(&access_lock); | ||
323 | } | ||
324 | |||
325 | static inline void trace_access_unlock(int cpu) | ||
326 | { | ||
327 | (void)cpu; | ||
328 | mutex_unlock(&access_lock); | ||
329 | } | ||
330 | |||
331 | static inline void trace_access_lock_init(void) | ||
332 | { | ||
333 | } | ||
334 | |||
335 | #endif | ||
336 | |||
252 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 337 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
253 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 338 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
254 | 339 | ||
255 | /* trace_flags holds trace_options default values */ | 340 | /* trace_flags holds trace_options default values */ |
256 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 341 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
257 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 342 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
258 | TRACE_ITER_GRAPH_TIME; | 343 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD; |
259 | 344 | ||
260 | static int trace_stop_count; | 345 | static int trace_stop_count; |
261 | static DEFINE_SPINLOCK(tracing_start_lock); | 346 | static DEFINE_SPINLOCK(tracing_start_lock); |
@@ -297,6 +382,21 @@ static int __init set_buf_size(char *str) | |||
297 | } | 382 | } |
298 | __setup("trace_buf_size=", set_buf_size); | 383 | __setup("trace_buf_size=", set_buf_size); |
299 | 384 | ||
385 | static int __init set_tracing_thresh(char *str) | ||
386 | { | ||
387 | unsigned long threshhold; | ||
388 | int ret; | ||
389 | |||
390 | if (!str) | ||
391 | return 0; | ||
392 | ret = strict_strtoul(str, 0, &threshhold); | ||
393 | if (ret < 0) | ||
394 | return 0; | ||
395 | tracing_thresh = threshhold * 1000; | ||
396 | return 1; | ||
397 | } | ||
398 | __setup("tracing_thresh=", set_tracing_thresh); | ||
399 | |||
300 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 400 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
301 | { | 401 | { |
302 | return nsecs / 1000; | 402 | return nsecs / 1000; |
@@ -324,6 +424,7 @@ static const char *trace_options[] = { | |||
324 | "latency-format", | 424 | "latency-format", |
325 | "sleep-time", | 425 | "sleep-time", |
326 | "graph-time", | 426 | "graph-time", |
427 | "record-cmd", | ||
327 | NULL | 428 | NULL |
328 | }; | 429 | }; |
329 | 430 | ||
@@ -502,9 +603,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
502 | static arch_spinlock_t ftrace_max_lock = | 603 | static arch_spinlock_t ftrace_max_lock = |
503 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 604 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
504 | 605 | ||
606 | unsigned long __read_mostly tracing_thresh; | ||
607 | |||
505 | #ifdef CONFIG_TRACER_MAX_TRACE | 608 | #ifdef CONFIG_TRACER_MAX_TRACE |
506 | unsigned long __read_mostly tracing_max_latency; | 609 | unsigned long __read_mostly tracing_max_latency; |
507 | unsigned long __read_mostly tracing_thresh; | ||
508 | 610 | ||
509 | /* | 611 | /* |
510 | * Copy the new maximum trace into the separate maximum-trace | 612 | * Copy the new maximum trace into the separate maximum-trace |
@@ -515,7 +617,7 @@ static void | |||
515 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 617 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
516 | { | 618 | { |
517 | struct trace_array_cpu *data = tr->data[cpu]; | 619 | struct trace_array_cpu *data = tr->data[cpu]; |
518 | struct trace_array_cpu *max_data = tr->data[cpu]; | 620 | struct trace_array_cpu *max_data; |
519 | 621 | ||
520 | max_tr.cpu = cpu; | 622 | max_tr.cpu = cpu; |
521 | max_tr.time_start = data->preempt_timestamp; | 623 | max_tr.time_start = data->preempt_timestamp; |
@@ -525,7 +627,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
525 | max_data->critical_start = data->critical_start; | 627 | max_data->critical_start = data->critical_start; |
526 | max_data->critical_end = data->critical_end; | 628 | max_data->critical_end = data->critical_end; |
527 | 629 | ||
528 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 630 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
529 | max_data->pid = tsk->pid; | 631 | max_data->pid = tsk->pid; |
530 | max_data->uid = task_uid(tsk); | 632 | max_data->uid = task_uid(tsk); |
531 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 633 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
@@ -554,6 +656,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
554 | return; | 656 | return; |
555 | 657 | ||
556 | WARN_ON_ONCE(!irqs_disabled()); | 658 | WARN_ON_ONCE(!irqs_disabled()); |
659 | if (!current_trace->use_max_tr) { | ||
660 | WARN_ON_ONCE(1); | ||
661 | return; | ||
662 | } | ||
557 | arch_spin_lock(&ftrace_max_lock); | 663 | arch_spin_lock(&ftrace_max_lock); |
558 | 664 | ||
559 | tr->buffer = max_tr.buffer; | 665 | tr->buffer = max_tr.buffer; |
@@ -580,6 +686,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
580 | return; | 686 | return; |
581 | 687 | ||
582 | WARN_ON_ONCE(!irqs_disabled()); | 688 | WARN_ON_ONCE(!irqs_disabled()); |
689 | if (!current_trace->use_max_tr) { | ||
690 | WARN_ON_ONCE(1); | ||
691 | return; | ||
692 | } | ||
693 | |||
583 | arch_spin_lock(&ftrace_max_lock); | 694 | arch_spin_lock(&ftrace_max_lock); |
584 | 695 | ||
585 | ftrace_disable_cpu(); | 696 | ftrace_disable_cpu(); |
@@ -624,18 +735,11 @@ __acquires(kernel_lock) | |||
624 | return -1; | 735 | return -1; |
625 | } | 736 | } |
626 | 737 | ||
627 | if (strlen(type->name) > MAX_TRACER_SIZE) { | 738 | if (strlen(type->name) >= MAX_TRACER_SIZE) { |
628 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | 739 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); |
629 | return -1; | 740 | return -1; |
630 | } | 741 | } |
631 | 742 | ||
632 | /* | ||
633 | * When this gets called we hold the BKL which means that | ||
634 | * preemption is disabled. Various trace selftests however | ||
635 | * need to disable and enable preemption for successful tests. | ||
636 | * So we drop the BKL here and grab it after the tests again. | ||
637 | */ | ||
638 | unlock_kernel(); | ||
639 | mutex_lock(&trace_types_lock); | 743 | mutex_lock(&trace_types_lock); |
640 | 744 | ||
641 | tracing_selftest_running = true; | 745 | tracing_selftest_running = true; |
@@ -717,7 +821,6 @@ __acquires(kernel_lock) | |||
717 | #endif | 821 | #endif |
718 | 822 | ||
719 | out_unlock: | 823 | out_unlock: |
720 | lock_kernel(); | ||
721 | return ret; | 824 | return ret; |
722 | } | 825 | } |
723 | 826 | ||
@@ -747,10 +850,10 @@ out: | |||
747 | mutex_unlock(&trace_types_lock); | 850 | mutex_unlock(&trace_types_lock); |
748 | } | 851 | } |
749 | 852 | ||
750 | static void __tracing_reset(struct trace_array *tr, int cpu) | 853 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) |
751 | { | 854 | { |
752 | ftrace_disable_cpu(); | 855 | ftrace_disable_cpu(); |
753 | ring_buffer_reset_cpu(tr->buffer, cpu); | 856 | ring_buffer_reset_cpu(buffer, cpu); |
754 | ftrace_enable_cpu(); | 857 | ftrace_enable_cpu(); |
755 | } | 858 | } |
756 | 859 | ||
@@ -762,7 +865,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
762 | 865 | ||
763 | /* Make sure all commits have finished */ | 866 | /* Make sure all commits have finished */ |
764 | synchronize_sched(); | 867 | synchronize_sched(); |
765 | __tracing_reset(tr, cpu); | 868 | __tracing_reset(buffer, cpu); |
766 | 869 | ||
767 | ring_buffer_record_enable(buffer); | 870 | ring_buffer_record_enable(buffer); |
768 | } | 871 | } |
@@ -780,7 +883,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
780 | tr->time_start = ftrace_now(tr->cpu); | 883 | tr->time_start = ftrace_now(tr->cpu); |
781 | 884 | ||
782 | for_each_online_cpu(cpu) | 885 | for_each_online_cpu(cpu) |
783 | __tracing_reset(tr, cpu); | 886 | __tracing_reset(buffer, cpu); |
784 | 887 | ||
785 | ring_buffer_record_enable(buffer); | 888 | ring_buffer_record_enable(buffer); |
786 | } | 889 | } |
@@ -857,6 +960,8 @@ void tracing_start(void) | |||
857 | goto out; | 960 | goto out; |
858 | } | 961 | } |
859 | 962 | ||
963 | /* Prevent the buffers from switching */ | ||
964 | arch_spin_lock(&ftrace_max_lock); | ||
860 | 965 | ||
861 | buffer = global_trace.buffer; | 966 | buffer = global_trace.buffer; |
862 | if (buffer) | 967 | if (buffer) |
@@ -866,6 +971,8 @@ void tracing_start(void) | |||
866 | if (buffer) | 971 | if (buffer) |
867 | ring_buffer_record_enable(buffer); | 972 | ring_buffer_record_enable(buffer); |
868 | 973 | ||
974 | arch_spin_unlock(&ftrace_max_lock); | ||
975 | |||
869 | ftrace_start(); | 976 | ftrace_start(); |
870 | out: | 977 | out: |
871 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 978 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
@@ -887,6 +994,9 @@ void tracing_stop(void) | |||
887 | if (trace_stop_count++) | 994 | if (trace_stop_count++) |
888 | goto out; | 995 | goto out; |
889 | 996 | ||
997 | /* Prevent the buffers from switching */ | ||
998 | arch_spin_lock(&ftrace_max_lock); | ||
999 | |||
890 | buffer = global_trace.buffer; | 1000 | buffer = global_trace.buffer; |
891 | if (buffer) | 1001 | if (buffer) |
892 | ring_buffer_record_disable(buffer); | 1002 | ring_buffer_record_disable(buffer); |
@@ -895,6 +1005,8 @@ void tracing_stop(void) | |||
895 | if (buffer) | 1005 | if (buffer) |
896 | ring_buffer_record_disable(buffer); | 1006 | ring_buffer_record_disable(buffer); |
897 | 1007 | ||
1008 | arch_spin_unlock(&ftrace_max_lock); | ||
1009 | |||
898 | out: | 1010 | out: |
899 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 1011 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
900 | } | 1012 | } |
@@ -951,6 +1063,11 @@ void trace_find_cmdline(int pid, char comm[]) | |||
951 | return; | 1063 | return; |
952 | } | 1064 | } |
953 | 1065 | ||
1066 | if (WARN_ON_ONCE(pid < 0)) { | ||
1067 | strcpy(comm, "<XXX>"); | ||
1068 | return; | ||
1069 | } | ||
1070 | |||
954 | if (pid > PID_MAX_DEFAULT) { | 1071 | if (pid > PID_MAX_DEFAULT) { |
955 | strcpy(comm, "<...>"); | 1072 | strcpy(comm, "<...>"); |
956 | return; | 1073 | return; |
@@ -1084,7 +1201,7 @@ trace_function(struct trace_array *tr, | |||
1084 | struct ftrace_entry *entry; | 1201 | struct ftrace_entry *entry; |
1085 | 1202 | ||
1086 | /* If we are reading the ring buffer, don't trace */ | 1203 | /* If we are reading the ring buffer, don't trace */ |
1087 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 1204 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
1088 | return; | 1205 | return; |
1089 | 1206 | ||
1090 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1207 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -1166,6 +1283,8 @@ void trace_dump_stack(void) | |||
1166 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1283 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); |
1167 | } | 1284 | } |
1168 | 1285 | ||
1286 | static DEFINE_PER_CPU(int, user_stack_count); | ||
1287 | |||
1169 | void | 1288 | void |
1170 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1289 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1171 | { | 1290 | { |
@@ -1177,10 +1296,27 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1177 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1296 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
1178 | return; | 1297 | return; |
1179 | 1298 | ||
1299 | /* | ||
1300 | * NMIs can not handle page faults, even with fix ups. | ||
1301 | * The save user stack can (and often does) fault. | ||
1302 | */ | ||
1303 | if (unlikely(in_nmi())) | ||
1304 | return; | ||
1305 | |||
1306 | /* | ||
1307 | * prevent recursion, since the user stack tracing may | ||
1308 | * trigger other kernel events. | ||
1309 | */ | ||
1310 | preempt_disable(); | ||
1311 | if (__this_cpu_read(user_stack_count)) | ||
1312 | goto out; | ||
1313 | |||
1314 | __this_cpu_inc(user_stack_count); | ||
1315 | |||
1180 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1316 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1181 | sizeof(*entry), flags, pc); | 1317 | sizeof(*entry), flags, pc); |
1182 | if (!event) | 1318 | if (!event) |
1183 | return; | 1319 | goto out_drop_count; |
1184 | entry = ring_buffer_event_data(event); | 1320 | entry = ring_buffer_event_data(event); |
1185 | 1321 | ||
1186 | entry->tgid = current->tgid; | 1322 | entry->tgid = current->tgid; |
@@ -1194,6 +1330,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1194 | save_stack_trace_user(&trace); | 1330 | save_stack_trace_user(&trace); |
1195 | if (!filter_check_discard(call, entry, buffer, event)) | 1331 | if (!filter_check_discard(call, entry, buffer, event)) |
1196 | ring_buffer_unlock_commit(buffer, event); | 1332 | ring_buffer_unlock_commit(buffer, event); |
1333 | |||
1334 | out_drop_count: | ||
1335 | __this_cpu_dec(user_stack_count); | ||
1336 | out: | ||
1337 | preempt_enable(); | ||
1197 | } | 1338 | } |
1198 | 1339 | ||
1199 | #ifdef UNUSED | 1340 | #ifdef UNUSED |
@@ -1205,61 +1346,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
1205 | 1346 | ||
1206 | #endif /* CONFIG_STACKTRACE */ | 1347 | #endif /* CONFIG_STACKTRACE */ |
1207 | 1348 | ||
1208 | static void | ||
1209 | ftrace_trace_special(void *__tr, | ||
1210 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
1211 | int pc) | ||
1212 | { | ||
1213 | struct ftrace_event_call *call = &event_special; | ||
1214 | struct ring_buffer_event *event; | ||
1215 | struct trace_array *tr = __tr; | ||
1216 | struct ring_buffer *buffer = tr->buffer; | ||
1217 | struct special_entry *entry; | ||
1218 | |||
1219 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, | ||
1220 | sizeof(*entry), 0, pc); | ||
1221 | if (!event) | ||
1222 | return; | ||
1223 | entry = ring_buffer_event_data(event); | ||
1224 | entry->arg1 = arg1; | ||
1225 | entry->arg2 = arg2; | ||
1226 | entry->arg3 = arg3; | ||
1227 | |||
1228 | if (!filter_check_discard(call, entry, buffer, event)) | ||
1229 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
1230 | } | ||
1231 | |||
1232 | void | ||
1233 | __trace_special(void *__tr, void *__data, | ||
1234 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1235 | { | ||
1236 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); | ||
1237 | } | ||
1238 | |||
1239 | void | ||
1240 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1241 | { | ||
1242 | struct trace_array *tr = &global_trace; | ||
1243 | struct trace_array_cpu *data; | ||
1244 | unsigned long flags; | ||
1245 | int cpu; | ||
1246 | int pc; | ||
1247 | |||
1248 | if (tracing_disabled) | ||
1249 | return; | ||
1250 | |||
1251 | pc = preempt_count(); | ||
1252 | local_irq_save(flags); | ||
1253 | cpu = raw_smp_processor_id(); | ||
1254 | data = tr->data[cpu]; | ||
1255 | |||
1256 | if (likely(atomic_inc_return(&data->disabled) == 1)) | ||
1257 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); | ||
1258 | |||
1259 | atomic_dec(&data->disabled); | ||
1260 | local_irq_restore(flags); | ||
1261 | } | ||
1262 | |||
1263 | /** | 1349 | /** |
1264 | * trace_vbprintk - write binary msg to tracing buffer | 1350 | * trace_vbprintk - write binary msg to tracing buffer |
1265 | * | 1351 | * |
@@ -1278,7 +1364,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1278 | struct bprint_entry *entry; | 1364 | struct bprint_entry *entry; |
1279 | unsigned long flags; | 1365 | unsigned long flags; |
1280 | int disable; | 1366 | int disable; |
1281 | int resched; | ||
1282 | int cpu, len = 0, size, pc; | 1367 | int cpu, len = 0, size, pc; |
1283 | 1368 | ||
1284 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1369 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
@@ -1288,7 +1373,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1288 | pause_graph_tracing(); | 1373 | pause_graph_tracing(); |
1289 | 1374 | ||
1290 | pc = preempt_count(); | 1375 | pc = preempt_count(); |
1291 | resched = ftrace_preempt_disable(); | 1376 | preempt_disable_notrace(); |
1292 | cpu = raw_smp_processor_id(); | 1377 | cpu = raw_smp_processor_id(); |
1293 | data = tr->data[cpu]; | 1378 | data = tr->data[cpu]; |
1294 | 1379 | ||
@@ -1315,8 +1400,10 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1315 | entry->fmt = fmt; | 1400 | entry->fmt = fmt; |
1316 | 1401 | ||
1317 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1402 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
1318 | if (!filter_check_discard(call, entry, buffer, event)) | 1403 | if (!filter_check_discard(call, entry, buffer, event)) { |
1319 | ring_buffer_unlock_commit(buffer, event); | 1404 | ring_buffer_unlock_commit(buffer, event); |
1405 | ftrace_trace_stack(buffer, flags, 6, pc); | ||
1406 | } | ||
1320 | 1407 | ||
1321 | out_unlock: | 1408 | out_unlock: |
1322 | arch_spin_unlock(&trace_buf_lock); | 1409 | arch_spin_unlock(&trace_buf_lock); |
@@ -1324,7 +1411,7 @@ out_unlock: | |||
1324 | 1411 | ||
1325 | out: | 1412 | out: |
1326 | atomic_dec_return(&data->disabled); | 1413 | atomic_dec_return(&data->disabled); |
1327 | ftrace_preempt_enable(resched); | 1414 | preempt_enable_notrace(); |
1328 | unpause_graph_tracing(); | 1415 | unpause_graph_tracing(); |
1329 | 1416 | ||
1330 | return len; | 1417 | return len; |
@@ -1389,8 +1476,10 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1389 | 1476 | ||
1390 | memcpy(&entry->buf, trace_buf, len); | 1477 | memcpy(&entry->buf, trace_buf, len); |
1391 | entry->buf[len] = '\0'; | 1478 | entry->buf[len] = '\0'; |
1392 | if (!filter_check_discard(call, entry, buffer, event)) | 1479 | if (!filter_check_discard(call, entry, buffer, event)) { |
1393 | ring_buffer_unlock_commit(buffer, event); | 1480 | ring_buffer_unlock_commit(buffer, event); |
1481 | ftrace_trace_stack(buffer, irq_flags, 6, pc); | ||
1482 | } | ||
1394 | 1483 | ||
1395 | out_unlock: | 1484 | out_unlock: |
1396 | arch_spin_unlock(&trace_buf_lock); | 1485 | arch_spin_unlock(&trace_buf_lock); |
@@ -1409,11 +1498,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1409 | } | 1498 | } |
1410 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1499 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1411 | 1500 | ||
1412 | enum trace_file_type { | ||
1413 | TRACE_FILE_LAT_FMT = 1, | ||
1414 | TRACE_FILE_ANNOTATE = 2, | ||
1415 | }; | ||
1416 | |||
1417 | static void trace_iterator_increment(struct trace_iterator *iter) | 1501 | static void trace_iterator_increment(struct trace_iterator *iter) |
1418 | { | 1502 | { |
1419 | /* Don't allow ftrace to trace into the ring buffers */ | 1503 | /* Don't allow ftrace to trace into the ring buffers */ |
@@ -1427,7 +1511,8 @@ static void trace_iterator_increment(struct trace_iterator *iter) | |||
1427 | } | 1511 | } |
1428 | 1512 | ||
1429 | static struct trace_entry * | 1513 | static struct trace_entry * |
1430 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | 1514 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, |
1515 | unsigned long *lost_events) | ||
1431 | { | 1516 | { |
1432 | struct ring_buffer_event *event; | 1517 | struct ring_buffer_event *event; |
1433 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1518 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
@@ -1438,7 +1523,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1438 | if (buf_iter) | 1523 | if (buf_iter) |
1439 | event = ring_buffer_iter_peek(buf_iter, ts); | 1524 | event = ring_buffer_iter_peek(buf_iter, ts); |
1440 | else | 1525 | else |
1441 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | 1526 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
1527 | lost_events); | ||
1442 | 1528 | ||
1443 | ftrace_enable_cpu(); | 1529 | ftrace_enable_cpu(); |
1444 | 1530 | ||
@@ -1446,10 +1532,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1446 | } | 1532 | } |
1447 | 1533 | ||
1448 | static struct trace_entry * | 1534 | static struct trace_entry * |
1449 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1535 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
1536 | unsigned long *missing_events, u64 *ent_ts) | ||
1450 | { | 1537 | { |
1451 | struct ring_buffer *buffer = iter->tr->buffer; | 1538 | struct ring_buffer *buffer = iter->tr->buffer; |
1452 | struct trace_entry *ent, *next = NULL; | 1539 | struct trace_entry *ent, *next = NULL; |
1540 | unsigned long lost_events = 0, next_lost = 0; | ||
1453 | int cpu_file = iter->cpu_file; | 1541 | int cpu_file = iter->cpu_file; |
1454 | u64 next_ts = 0, ts; | 1542 | u64 next_ts = 0, ts; |
1455 | int next_cpu = -1; | 1543 | int next_cpu = -1; |
@@ -1462,7 +1550,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1462 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 1550 | if (cpu_file > TRACE_PIPE_ALL_CPU) { |
1463 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 1551 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
1464 | return NULL; | 1552 | return NULL; |
1465 | ent = peek_next_entry(iter, cpu_file, ent_ts); | 1553 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); |
1466 | if (ent_cpu) | 1554 | if (ent_cpu) |
1467 | *ent_cpu = cpu_file; | 1555 | *ent_cpu = cpu_file; |
1468 | 1556 | ||
@@ -1474,7 +1562,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1474 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1562 | if (ring_buffer_empty_cpu(buffer, cpu)) |
1475 | continue; | 1563 | continue; |
1476 | 1564 | ||
1477 | ent = peek_next_entry(iter, cpu, &ts); | 1565 | ent = peek_next_entry(iter, cpu, &ts, &lost_events); |
1478 | 1566 | ||
1479 | /* | 1567 | /* |
1480 | * Pick the entry with the smallest timestamp: | 1568 | * Pick the entry with the smallest timestamp: |
@@ -1483,6 +1571,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1483 | next = ent; | 1571 | next = ent; |
1484 | next_cpu = cpu; | 1572 | next_cpu = cpu; |
1485 | next_ts = ts; | 1573 | next_ts = ts; |
1574 | next_lost = lost_events; | ||
1486 | } | 1575 | } |
1487 | } | 1576 | } |
1488 | 1577 | ||
@@ -1492,6 +1581,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1492 | if (ent_ts) | 1581 | if (ent_ts) |
1493 | *ent_ts = next_ts; | 1582 | *ent_ts = next_ts; |
1494 | 1583 | ||
1584 | if (missing_events) | ||
1585 | *missing_events = next_lost; | ||
1586 | |||
1495 | return next; | 1587 | return next; |
1496 | } | 1588 | } |
1497 | 1589 | ||
@@ -1499,13 +1591,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1499 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 1591 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
1500 | int *ent_cpu, u64 *ent_ts) | 1592 | int *ent_cpu, u64 *ent_ts) |
1501 | { | 1593 | { |
1502 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1594 | return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
1503 | } | 1595 | } |
1504 | 1596 | ||
1505 | /* Find the next real entry, and increment the iterator to the next entry */ | 1597 | /* Find the next real entry, and increment the iterator to the next entry */ |
1506 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1598 | void *trace_find_next_entry_inc(struct trace_iterator *iter) |
1507 | { | 1599 | { |
1508 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1600 | iter->ent = __find_next_entry(iter, &iter->cpu, |
1601 | &iter->lost_events, &iter->ts); | ||
1509 | 1602 | ||
1510 | if (iter->ent) | 1603 | if (iter->ent) |
1511 | trace_iterator_increment(iter); | 1604 | trace_iterator_increment(iter); |
@@ -1517,7 +1610,8 @@ static void trace_consume(struct trace_iterator *iter) | |||
1517 | { | 1610 | { |
1518 | /* Don't allow ftrace to trace into the ring buffers */ | 1611 | /* Don't allow ftrace to trace into the ring buffers */ |
1519 | ftrace_disable_cpu(); | 1612 | ftrace_disable_cpu(); |
1520 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); | 1613 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
1614 | &iter->lost_events); | ||
1521 | ftrace_enable_cpu(); | 1615 | ftrace_enable_cpu(); |
1522 | } | 1616 | } |
1523 | 1617 | ||
@@ -1536,19 +1630,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1536 | return NULL; | 1630 | return NULL; |
1537 | 1631 | ||
1538 | if (iter->idx < 0) | 1632 | if (iter->idx < 0) |
1539 | ent = find_next_entry_inc(iter); | 1633 | ent = trace_find_next_entry_inc(iter); |
1540 | else | 1634 | else |
1541 | ent = iter; | 1635 | ent = iter; |
1542 | 1636 | ||
1543 | while (ent && iter->idx < i) | 1637 | while (ent && iter->idx < i) |
1544 | ent = find_next_entry_inc(iter); | 1638 | ent = trace_find_next_entry_inc(iter); |
1545 | 1639 | ||
1546 | iter->pos = *pos; | 1640 | iter->pos = *pos; |
1547 | 1641 | ||
1548 | return ent; | 1642 | return ent; |
1549 | } | 1643 | } |
1550 | 1644 | ||
1551 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 1645 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
1552 | { | 1646 | { |
1553 | struct trace_array *tr = iter->tr; | 1647 | struct trace_array *tr = iter->tr; |
1554 | struct ring_buffer_event *event; | 1648 | struct ring_buffer_event *event; |
@@ -1580,12 +1674,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
1580 | } | 1674 | } |
1581 | 1675 | ||
1582 | /* | 1676 | /* |
1583 | * No necessary locking here. The worst thing which can | ||
1584 | * happen is loosing events consumed at the same time | ||
1585 | * by a trace_pipe reader. | ||
1586 | * Other than that, we don't risk to crash the ring buffer | ||
1587 | * because it serializes the readers. | ||
1588 | * | ||
1589 | * The current tracer is copied to avoid a global locking | 1677 | * The current tracer is copied to avoid a global locking |
1590 | * all around. | 1678 | * all around. |
1591 | */ | 1679 | */ |
@@ -1623,6 +1711,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1623 | 1711 | ||
1624 | ftrace_enable_cpu(); | 1712 | ftrace_enable_cpu(); |
1625 | 1713 | ||
1714 | iter->leftover = 0; | ||
1626 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1715 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1627 | ; | 1716 | ; |
1628 | 1717 | ||
@@ -1640,12 +1729,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1640 | } | 1729 | } |
1641 | 1730 | ||
1642 | trace_event_read_lock(); | 1731 | trace_event_read_lock(); |
1732 | trace_access_lock(cpu_file); | ||
1643 | return p; | 1733 | return p; |
1644 | } | 1734 | } |
1645 | 1735 | ||
1646 | static void s_stop(struct seq_file *m, void *p) | 1736 | static void s_stop(struct seq_file *m, void *p) |
1647 | { | 1737 | { |
1738 | struct trace_iterator *iter = m->private; | ||
1739 | |||
1648 | atomic_dec(&trace_record_cmdline_disabled); | 1740 | atomic_dec(&trace_record_cmdline_disabled); |
1741 | trace_access_unlock(iter->cpu_file); | ||
1649 | trace_event_read_unlock(); | 1742 | trace_event_read_unlock(); |
1650 | } | 1743 | } |
1651 | 1744 | ||
@@ -1669,7 +1762,7 @@ static void print_func_help_header(struct seq_file *m) | |||
1669 | } | 1762 | } |
1670 | 1763 | ||
1671 | 1764 | ||
1672 | static void | 1765 | void |
1673 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 1766 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
1674 | { | 1767 | { |
1675 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1768 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
@@ -1797,7 +1890,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1797 | } | 1890 | } |
1798 | 1891 | ||
1799 | if (event) | 1892 | if (event) |
1800 | return event->trace(iter, sym_flags); | 1893 | return event->funcs->trace(iter, sym_flags, event); |
1801 | 1894 | ||
1802 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 1895 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) |
1803 | goto partial; | 1896 | goto partial; |
@@ -1823,7 +1916,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | |||
1823 | 1916 | ||
1824 | event = ftrace_find_event(entry->type); | 1917 | event = ftrace_find_event(entry->type); |
1825 | if (event) | 1918 | if (event) |
1826 | return event->raw(iter, 0); | 1919 | return event->funcs->raw(iter, 0, event); |
1827 | 1920 | ||
1828 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 1921 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) |
1829 | goto partial; | 1922 | goto partial; |
@@ -1850,7 +1943,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
1850 | 1943 | ||
1851 | event = ftrace_find_event(entry->type); | 1944 | event = ftrace_find_event(entry->type); |
1852 | if (event) { | 1945 | if (event) { |
1853 | enum print_line_t ret = event->hex(iter, 0); | 1946 | enum print_line_t ret = event->funcs->hex(iter, 0, event); |
1854 | if (ret != TRACE_TYPE_HANDLED) | 1947 | if (ret != TRACE_TYPE_HANDLED) |
1855 | return ret; | 1948 | return ret; |
1856 | } | 1949 | } |
@@ -1875,10 +1968,11 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
1875 | } | 1968 | } |
1876 | 1969 | ||
1877 | event = ftrace_find_event(entry->type); | 1970 | event = ftrace_find_event(entry->type); |
1878 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; | 1971 | return event ? event->funcs->binary(iter, 0, event) : |
1972 | TRACE_TYPE_HANDLED; | ||
1879 | } | 1973 | } |
1880 | 1974 | ||
1881 | static int trace_empty(struct trace_iterator *iter) | 1975 | int trace_empty(struct trace_iterator *iter) |
1882 | { | 1976 | { |
1883 | int cpu; | 1977 | int cpu; |
1884 | 1978 | ||
@@ -1909,10 +2003,14 @@ static int trace_empty(struct trace_iterator *iter) | |||
1909 | } | 2003 | } |
1910 | 2004 | ||
1911 | /* Called with trace_event_read_lock() held. */ | 2005 | /* Called with trace_event_read_lock() held. */ |
1912 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 2006 | enum print_line_t print_trace_line(struct trace_iterator *iter) |
1913 | { | 2007 | { |
1914 | enum print_line_t ret; | 2008 | enum print_line_t ret; |
1915 | 2009 | ||
2010 | if (iter->lost_events) | ||
2011 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | ||
2012 | iter->cpu, iter->lost_events); | ||
2013 | |||
1916 | if (iter->trace && iter->trace->print_line) { | 2014 | if (iter->trace && iter->trace->print_line) { |
1917 | ret = iter->trace->print_line(iter); | 2015 | ret = iter->trace->print_line(iter); |
1918 | if (ret != TRACE_TYPE_UNHANDLED) | 2016 | if (ret != TRACE_TYPE_UNHANDLED) |
@@ -1941,6 +2039,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1941 | return print_trace_fmt(iter); | 2039 | return print_trace_fmt(iter); |
1942 | } | 2040 | } |
1943 | 2041 | ||
2042 | void trace_default_header(struct seq_file *m) | ||
2043 | { | ||
2044 | struct trace_iterator *iter = m->private; | ||
2045 | |||
2046 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
2047 | /* print nothing if the buffers are empty */ | ||
2048 | if (trace_empty(iter)) | ||
2049 | return; | ||
2050 | print_trace_header(m, iter); | ||
2051 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2052 | print_lat_help_header(m); | ||
2053 | } else { | ||
2054 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2055 | print_func_help_header(m); | ||
2056 | } | ||
2057 | } | ||
2058 | |||
1944 | static int s_show(struct seq_file *m, void *v) | 2059 | static int s_show(struct seq_file *m, void *v) |
1945 | { | 2060 | { |
1946 | struct trace_iterator *iter = v; | 2061 | struct trace_iterator *iter = v; |
@@ -1953,17 +2068,9 @@ static int s_show(struct seq_file *m, void *v) | |||
1953 | } | 2068 | } |
1954 | if (iter->trace && iter->trace->print_header) | 2069 | if (iter->trace && iter->trace->print_header) |
1955 | iter->trace->print_header(m); | 2070 | iter->trace->print_header(m); |
1956 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2071 | else |
1957 | /* print nothing if the buffers are empty */ | 2072 | trace_default_header(m); |
1958 | if (trace_empty(iter)) | 2073 | |
1959 | return 0; | ||
1960 | print_trace_header(m, iter); | ||
1961 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
1962 | print_lat_help_header(m); | ||
1963 | } else { | ||
1964 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
1965 | print_func_help_header(m); | ||
1966 | } | ||
1967 | } else if (iter->leftover) { | 2074 | } else if (iter->leftover) { |
1968 | /* | 2075 | /* |
1969 | * If we filled the seq_file buffer earlier, we | 2076 | * If we filled the seq_file buffer earlier, we |
@@ -2049,15 +2156,20 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2049 | 2156 | ||
2050 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2157 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
2051 | for_each_tracing_cpu(cpu) { | 2158 | for_each_tracing_cpu(cpu) { |
2052 | |||
2053 | iter->buffer_iter[cpu] = | 2159 | iter->buffer_iter[cpu] = |
2054 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2160 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
2161 | } | ||
2162 | ring_buffer_read_prepare_sync(); | ||
2163 | for_each_tracing_cpu(cpu) { | ||
2164 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
2055 | tracing_iter_reset(iter, cpu); | 2165 | tracing_iter_reset(iter, cpu); |
2056 | } | 2166 | } |
2057 | } else { | 2167 | } else { |
2058 | cpu = iter->cpu_file; | 2168 | cpu = iter->cpu_file; |
2059 | iter->buffer_iter[cpu] = | 2169 | iter->buffer_iter[cpu] = |
2060 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2170 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
2171 | ring_buffer_read_prepare_sync(); | ||
2172 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
2061 | tracing_iter_reset(iter, cpu); | 2173 | tracing_iter_reset(iter, cpu); |
2062 | } | 2174 | } |
2063 | 2175 | ||
@@ -2100,7 +2212,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp) | |||
2100 | 2212 | ||
2101 | static int tracing_release(struct inode *inode, struct file *file) | 2213 | static int tracing_release(struct inode *inode, struct file *file) |
2102 | { | 2214 | { |
2103 | struct seq_file *m = (struct seq_file *)file->private_data; | 2215 | struct seq_file *m = file->private_data; |
2104 | struct trace_iterator *iter; | 2216 | struct trace_iterator *iter; |
2105 | int cpu; | 2217 | int cpu; |
2106 | 2218 | ||
@@ -2224,11 +2336,19 @@ tracing_write_stub(struct file *filp, const char __user *ubuf, | |||
2224 | return count; | 2336 | return count; |
2225 | } | 2337 | } |
2226 | 2338 | ||
2339 | static loff_t tracing_seek(struct file *file, loff_t offset, int origin) | ||
2340 | { | ||
2341 | if (file->f_mode & FMODE_READ) | ||
2342 | return seq_lseek(file, offset, origin); | ||
2343 | else | ||
2344 | return 0; | ||
2345 | } | ||
2346 | |||
2227 | static const struct file_operations tracing_fops = { | 2347 | static const struct file_operations tracing_fops = { |
2228 | .open = tracing_open, | 2348 | .open = tracing_open, |
2229 | .read = seq_read, | 2349 | .read = seq_read, |
2230 | .write = tracing_write_stub, | 2350 | .write = tracing_write_stub, |
2231 | .llseek = seq_lseek, | 2351 | .llseek = tracing_seek, |
2232 | .release = tracing_release, | 2352 | .release = tracing_release, |
2233 | }; | 2353 | }; |
2234 | 2354 | ||
@@ -2236,6 +2356,7 @@ static const struct file_operations show_traces_fops = { | |||
2236 | .open = show_traces_open, | 2356 | .open = show_traces_open, |
2237 | .read = seq_read, | 2357 | .read = seq_read, |
2238 | .release = seq_release, | 2358 | .release = seq_release, |
2359 | .llseek = seq_lseek, | ||
2239 | }; | 2360 | }; |
2240 | 2361 | ||
2241 | /* | 2362 | /* |
@@ -2329,6 +2450,7 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2329 | .open = tracing_open_generic, | 2450 | .open = tracing_open_generic, |
2330 | .read = tracing_cpumask_read, | 2451 | .read = tracing_cpumask_read, |
2331 | .write = tracing_cpumask_write, | 2452 | .write = tracing_cpumask_write, |
2453 | .llseek = generic_file_llseek, | ||
2332 | }; | 2454 | }; |
2333 | 2455 | ||
2334 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 2456 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
@@ -2404,6 +2526,9 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2404 | trace_flags |= mask; | 2526 | trace_flags |= mask; |
2405 | else | 2527 | else |
2406 | trace_flags &= ~mask; | 2528 | trace_flags &= ~mask; |
2529 | |||
2530 | if (mask == TRACE_ITER_RECORD_CMD) | ||
2531 | trace_event_enable_cmd_record(enabled); | ||
2407 | } | 2532 | } |
2408 | 2533 | ||
2409 | static ssize_t | 2534 | static ssize_t |
@@ -2495,6 +2620,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf, | |||
2495 | static const struct file_operations tracing_readme_fops = { | 2620 | static const struct file_operations tracing_readme_fops = { |
2496 | .open = tracing_open_generic, | 2621 | .open = tracing_open_generic, |
2497 | .read = tracing_readme_read, | 2622 | .read = tracing_readme_read, |
2623 | .llseek = generic_file_llseek, | ||
2498 | }; | 2624 | }; |
2499 | 2625 | ||
2500 | static ssize_t | 2626 | static ssize_t |
@@ -2545,6 +2671,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | |||
2545 | static const struct file_operations tracing_saved_cmdlines_fops = { | 2671 | static const struct file_operations tracing_saved_cmdlines_fops = { |
2546 | .open = tracing_open_generic, | 2672 | .open = tracing_open_generic, |
2547 | .read = tracing_saved_cmdlines_read, | 2673 | .read = tracing_saved_cmdlines_read, |
2674 | .llseek = generic_file_llseek, | ||
2548 | }; | 2675 | }; |
2549 | 2676 | ||
2550 | static ssize_t | 2677 | static ssize_t |
@@ -2640,6 +2767,9 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2640 | if (ret < 0) | 2767 | if (ret < 0) |
2641 | return ret; | 2768 | return ret; |
2642 | 2769 | ||
2770 | if (!current_trace->use_max_tr) | ||
2771 | goto out; | ||
2772 | |||
2643 | ret = ring_buffer_resize(max_tr.buffer, size); | 2773 | ret = ring_buffer_resize(max_tr.buffer, size); |
2644 | if (ret < 0) { | 2774 | if (ret < 0) { |
2645 | int r; | 2775 | int r; |
@@ -2667,11 +2797,14 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2667 | return ret; | 2797 | return ret; |
2668 | } | 2798 | } |
2669 | 2799 | ||
2800 | max_tr.entries = size; | ||
2801 | out: | ||
2670 | global_trace.entries = size; | 2802 | global_trace.entries = size; |
2671 | 2803 | ||
2672 | return ret; | 2804 | return ret; |
2673 | } | 2805 | } |
2674 | 2806 | ||
2807 | |||
2675 | /** | 2808 | /** |
2676 | * tracing_update_buffers - used by tracing facility to expand ring buffers | 2809 | * tracing_update_buffers - used by tracing facility to expand ring buffers |
2677 | * | 2810 | * |
@@ -2732,12 +2865,26 @@ static int tracing_set_tracer(const char *buf) | |||
2732 | trace_branch_disable(); | 2865 | trace_branch_disable(); |
2733 | if (current_trace && current_trace->reset) | 2866 | if (current_trace && current_trace->reset) |
2734 | current_trace->reset(tr); | 2867 | current_trace->reset(tr); |
2735 | 2868 | if (current_trace && current_trace->use_max_tr) { | |
2869 | /* | ||
2870 | * We don't free the ring buffer. instead, resize it because | ||
2871 | * The max_tr ring buffer has some state (e.g. ring->clock) and | ||
2872 | * we want preserve it. | ||
2873 | */ | ||
2874 | ring_buffer_resize(max_tr.buffer, 1); | ||
2875 | max_tr.entries = 1; | ||
2876 | } | ||
2736 | destroy_trace_option_files(topts); | 2877 | destroy_trace_option_files(topts); |
2737 | 2878 | ||
2738 | current_trace = t; | 2879 | current_trace = t; |
2739 | 2880 | ||
2740 | topts = create_trace_option_files(current_trace); | 2881 | topts = create_trace_option_files(current_trace); |
2882 | if (current_trace->use_max_tr) { | ||
2883 | ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); | ||
2884 | if (ret < 0) | ||
2885 | goto out; | ||
2886 | max_tr.entries = global_trace.entries; | ||
2887 | } | ||
2741 | 2888 | ||
2742 | if (t->init) { | 2889 | if (t->init) { |
2743 | ret = tracer_init(t, tr); | 2890 | ret = tracer_init(t, tr); |
@@ -2836,22 +2983,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2836 | 2983 | ||
2837 | mutex_lock(&trace_types_lock); | 2984 | mutex_lock(&trace_types_lock); |
2838 | 2985 | ||
2839 | /* We only allow one reader per cpu */ | ||
2840 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | ||
2841 | if (!cpumask_empty(tracing_reader_cpumask)) { | ||
2842 | ret = -EBUSY; | ||
2843 | goto out; | ||
2844 | } | ||
2845 | cpumask_setall(tracing_reader_cpumask); | ||
2846 | } else { | ||
2847 | if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) | ||
2848 | cpumask_set_cpu(cpu_file, tracing_reader_cpumask); | ||
2849 | else { | ||
2850 | ret = -EBUSY; | ||
2851 | goto out; | ||
2852 | } | ||
2853 | } | ||
2854 | |||
2855 | /* create a buffer to store the information to pass to userspace */ | 2986 | /* create a buffer to store the information to pass to userspace */ |
2856 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2987 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
2857 | if (!iter) { | 2988 | if (!iter) { |
@@ -2890,6 +3021,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2890 | if (iter->trace->pipe_open) | 3021 | if (iter->trace->pipe_open) |
2891 | iter->trace->pipe_open(iter); | 3022 | iter->trace->pipe_open(iter); |
2892 | 3023 | ||
3024 | nonseekable_open(inode, filp); | ||
2893 | out: | 3025 | out: |
2894 | mutex_unlock(&trace_types_lock); | 3026 | mutex_unlock(&trace_types_lock); |
2895 | return ret; | 3027 | return ret; |
@@ -2907,12 +3039,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
2907 | 3039 | ||
2908 | mutex_lock(&trace_types_lock); | 3040 | mutex_lock(&trace_types_lock); |
2909 | 3041 | ||
2910 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) | ||
2911 | cpumask_clear(tracing_reader_cpumask); | ||
2912 | else | ||
2913 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | ||
2914 | |||
2915 | |||
2916 | if (iter->trace->pipe_close) | 3042 | if (iter->trace->pipe_close) |
2917 | iter->trace->pipe_close(iter); | 3043 | iter->trace->pipe_close(iter); |
2918 | 3044 | ||
@@ -3074,7 +3200,8 @@ waitagain: | |||
3074 | iter->pos = -1; | 3200 | iter->pos = -1; |
3075 | 3201 | ||
3076 | trace_event_read_lock(); | 3202 | trace_event_read_lock(); |
3077 | while (find_next_entry_inc(iter) != NULL) { | 3203 | trace_access_lock(iter->cpu_file); |
3204 | while (trace_find_next_entry_inc(iter) != NULL) { | ||
3078 | enum print_line_t ret; | 3205 | enum print_line_t ret; |
3079 | int len = iter->seq.len; | 3206 | int len = iter->seq.len; |
3080 | 3207 | ||
@@ -3090,6 +3217,7 @@ waitagain: | |||
3090 | if (iter->seq.len >= cnt) | 3217 | if (iter->seq.len >= cnt) |
3091 | break; | 3218 | break; |
3092 | } | 3219 | } |
3220 | trace_access_unlock(iter->cpu_file); | ||
3093 | trace_event_read_unlock(); | 3221 | trace_event_read_unlock(); |
3094 | 3222 | ||
3095 | /* Now copy what we have to the user */ | 3223 | /* Now copy what we have to the user */ |
@@ -3156,7 +3284,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
3156 | if (ret != TRACE_TYPE_NO_CONSUME) | 3284 | if (ret != TRACE_TYPE_NO_CONSUME) |
3157 | trace_consume(iter); | 3285 | trace_consume(iter); |
3158 | rem -= count; | 3286 | rem -= count; |
3159 | if (!find_next_entry_inc(iter)) { | 3287 | if (!trace_find_next_entry_inc(iter)) { |
3160 | rem = 0; | 3288 | rem = 0; |
3161 | iter->ent = NULL; | 3289 | iter->ent = NULL; |
3162 | break; | 3290 | break; |
@@ -3172,12 +3300,12 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3172 | size_t len, | 3300 | size_t len, |
3173 | unsigned int flags) | 3301 | unsigned int flags) |
3174 | { | 3302 | { |
3175 | struct page *pages[PIPE_BUFFERS]; | 3303 | struct page *pages_def[PIPE_DEF_BUFFERS]; |
3176 | struct partial_page partial[PIPE_BUFFERS]; | 3304 | struct partial_page partial_def[PIPE_DEF_BUFFERS]; |
3177 | struct trace_iterator *iter = filp->private_data; | 3305 | struct trace_iterator *iter = filp->private_data; |
3178 | struct splice_pipe_desc spd = { | 3306 | struct splice_pipe_desc spd = { |
3179 | .pages = pages, | 3307 | .pages = pages_def, |
3180 | .partial = partial, | 3308 | .partial = partial_def, |
3181 | .nr_pages = 0, /* This gets updated below. */ | 3309 | .nr_pages = 0, /* This gets updated below. */ |
3182 | .flags = flags, | 3310 | .flags = flags, |
3183 | .ops = &tracing_pipe_buf_ops, | 3311 | .ops = &tracing_pipe_buf_ops, |
@@ -3188,6 +3316,9 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3188 | size_t rem; | 3316 | size_t rem; |
3189 | unsigned int i; | 3317 | unsigned int i; |
3190 | 3318 | ||
3319 | if (splice_grow_spd(pipe, &spd)) | ||
3320 | return -ENOMEM; | ||
3321 | |||
3191 | /* copy the tracer to avoid using a global lock all around */ | 3322 | /* copy the tracer to avoid using a global lock all around */ |
3192 | mutex_lock(&trace_types_lock); | 3323 | mutex_lock(&trace_types_lock); |
3193 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3324 | if (unlikely(old_tracer != current_trace && current_trace)) { |
@@ -3209,46 +3340,50 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3209 | if (ret <= 0) | 3340 | if (ret <= 0) |
3210 | goto out_err; | 3341 | goto out_err; |
3211 | 3342 | ||
3212 | if (!iter->ent && !find_next_entry_inc(iter)) { | 3343 | if (!iter->ent && !trace_find_next_entry_inc(iter)) { |
3213 | ret = -EFAULT; | 3344 | ret = -EFAULT; |
3214 | goto out_err; | 3345 | goto out_err; |
3215 | } | 3346 | } |
3216 | 3347 | ||
3217 | trace_event_read_lock(); | 3348 | trace_event_read_lock(); |
3349 | trace_access_lock(iter->cpu_file); | ||
3218 | 3350 | ||
3219 | /* Fill as many pages as possible. */ | 3351 | /* Fill as many pages as possible. */ |
3220 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3352 | for (i = 0, rem = len; i < pipe->buffers && rem; i++) { |
3221 | pages[i] = alloc_page(GFP_KERNEL); | 3353 | spd.pages[i] = alloc_page(GFP_KERNEL); |
3222 | if (!pages[i]) | 3354 | if (!spd.pages[i]) |
3223 | break; | 3355 | break; |
3224 | 3356 | ||
3225 | rem = tracing_fill_pipe_page(rem, iter); | 3357 | rem = tracing_fill_pipe_page(rem, iter); |
3226 | 3358 | ||
3227 | /* Copy the data into the page, so we can start over. */ | 3359 | /* Copy the data into the page, so we can start over. */ |
3228 | ret = trace_seq_to_buffer(&iter->seq, | 3360 | ret = trace_seq_to_buffer(&iter->seq, |
3229 | page_address(pages[i]), | 3361 | page_address(spd.pages[i]), |
3230 | iter->seq.len); | 3362 | iter->seq.len); |
3231 | if (ret < 0) { | 3363 | if (ret < 0) { |
3232 | __free_page(pages[i]); | 3364 | __free_page(spd.pages[i]); |
3233 | break; | 3365 | break; |
3234 | } | 3366 | } |
3235 | partial[i].offset = 0; | 3367 | spd.partial[i].offset = 0; |
3236 | partial[i].len = iter->seq.len; | 3368 | spd.partial[i].len = iter->seq.len; |
3237 | 3369 | ||
3238 | trace_seq_init(&iter->seq); | 3370 | trace_seq_init(&iter->seq); |
3239 | } | 3371 | } |
3240 | 3372 | ||
3373 | trace_access_unlock(iter->cpu_file); | ||
3241 | trace_event_read_unlock(); | 3374 | trace_event_read_unlock(); |
3242 | mutex_unlock(&iter->mutex); | 3375 | mutex_unlock(&iter->mutex); |
3243 | 3376 | ||
3244 | spd.nr_pages = i; | 3377 | spd.nr_pages = i; |
3245 | 3378 | ||
3246 | return splice_to_pipe(pipe, &spd); | 3379 | ret = splice_to_pipe(pipe, &spd); |
3380 | out: | ||
3381 | splice_shrink_spd(pipe, &spd); | ||
3382 | return ret; | ||
3247 | 3383 | ||
3248 | out_err: | 3384 | out_err: |
3249 | mutex_unlock(&iter->mutex); | 3385 | mutex_unlock(&iter->mutex); |
3250 | 3386 | goto out; | |
3251 | return ret; | ||
3252 | } | 3387 | } |
3253 | 3388 | ||
3254 | static ssize_t | 3389 | static ssize_t |
@@ -3332,7 +3467,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3332 | } | 3467 | } |
3333 | 3468 | ||
3334 | tracing_start(); | 3469 | tracing_start(); |
3335 | max_tr.entries = global_trace.entries; | ||
3336 | mutex_unlock(&trace_types_lock); | 3470 | mutex_unlock(&trace_types_lock); |
3337 | 3471 | ||
3338 | return cnt; | 3472 | return cnt; |
@@ -3353,6 +3487,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3353 | size_t cnt, loff_t *fpos) | 3487 | size_t cnt, loff_t *fpos) |
3354 | { | 3488 | { |
3355 | char *buf; | 3489 | char *buf; |
3490 | size_t written; | ||
3356 | 3491 | ||
3357 | if (tracing_disabled) | 3492 | if (tracing_disabled) |
3358 | return -EINVAL; | 3493 | return -EINVAL; |
@@ -3374,11 +3509,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3374 | } else | 3509 | } else |
3375 | buf[cnt] = '\0'; | 3510 | buf[cnt] = '\0'; |
3376 | 3511 | ||
3377 | cnt = mark_printk("%s", buf); | 3512 | written = mark_printk("%s", buf); |
3378 | kfree(buf); | 3513 | kfree(buf); |
3379 | *fpos += cnt; | 3514 | *fpos += written; |
3380 | 3515 | ||
3381 | return cnt; | 3516 | /* don't tell userspace we wrote more - it might confuse them */ |
3517 | if (written > cnt) | ||
3518 | written = cnt; | ||
3519 | |||
3520 | return written; | ||
3382 | } | 3521 | } |
3383 | 3522 | ||
3384 | static int tracing_clock_show(struct seq_file *m, void *v) | 3523 | static int tracing_clock_show(struct seq_file *m, void *v) |
@@ -3445,18 +3584,21 @@ static const struct file_operations tracing_max_lat_fops = { | |||
3445 | .open = tracing_open_generic, | 3584 | .open = tracing_open_generic, |
3446 | .read = tracing_max_lat_read, | 3585 | .read = tracing_max_lat_read, |
3447 | .write = tracing_max_lat_write, | 3586 | .write = tracing_max_lat_write, |
3587 | .llseek = generic_file_llseek, | ||
3448 | }; | 3588 | }; |
3449 | 3589 | ||
3450 | static const struct file_operations tracing_ctrl_fops = { | 3590 | static const struct file_operations tracing_ctrl_fops = { |
3451 | .open = tracing_open_generic, | 3591 | .open = tracing_open_generic, |
3452 | .read = tracing_ctrl_read, | 3592 | .read = tracing_ctrl_read, |
3453 | .write = tracing_ctrl_write, | 3593 | .write = tracing_ctrl_write, |
3594 | .llseek = generic_file_llseek, | ||
3454 | }; | 3595 | }; |
3455 | 3596 | ||
3456 | static const struct file_operations set_tracer_fops = { | 3597 | static const struct file_operations set_tracer_fops = { |
3457 | .open = tracing_open_generic, | 3598 | .open = tracing_open_generic, |
3458 | .read = tracing_set_trace_read, | 3599 | .read = tracing_set_trace_read, |
3459 | .write = tracing_set_trace_write, | 3600 | .write = tracing_set_trace_write, |
3601 | .llseek = generic_file_llseek, | ||
3460 | }; | 3602 | }; |
3461 | 3603 | ||
3462 | static const struct file_operations tracing_pipe_fops = { | 3604 | static const struct file_operations tracing_pipe_fops = { |
@@ -3465,17 +3607,20 @@ static const struct file_operations tracing_pipe_fops = { | |||
3465 | .read = tracing_read_pipe, | 3607 | .read = tracing_read_pipe, |
3466 | .splice_read = tracing_splice_read_pipe, | 3608 | .splice_read = tracing_splice_read_pipe, |
3467 | .release = tracing_release_pipe, | 3609 | .release = tracing_release_pipe, |
3610 | .llseek = no_llseek, | ||
3468 | }; | 3611 | }; |
3469 | 3612 | ||
3470 | static const struct file_operations tracing_entries_fops = { | 3613 | static const struct file_operations tracing_entries_fops = { |
3471 | .open = tracing_open_generic, | 3614 | .open = tracing_open_generic, |
3472 | .read = tracing_entries_read, | 3615 | .read = tracing_entries_read, |
3473 | .write = tracing_entries_write, | 3616 | .write = tracing_entries_write, |
3617 | .llseek = generic_file_llseek, | ||
3474 | }; | 3618 | }; |
3475 | 3619 | ||
3476 | static const struct file_operations tracing_mark_fops = { | 3620 | static const struct file_operations tracing_mark_fops = { |
3477 | .open = tracing_open_generic, | 3621 | .open = tracing_open_generic, |
3478 | .write = tracing_mark_write, | 3622 | .write = tracing_mark_write, |
3623 | .llseek = generic_file_llseek, | ||
3479 | }; | 3624 | }; |
3480 | 3625 | ||
3481 | static const struct file_operations trace_clock_fops = { | 3626 | static const struct file_operations trace_clock_fops = { |
@@ -3521,7 +3666,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
3521 | size_t count, loff_t *ppos) | 3666 | size_t count, loff_t *ppos) |
3522 | { | 3667 | { |
3523 | struct ftrace_buffer_info *info = filp->private_data; | 3668 | struct ftrace_buffer_info *info = filp->private_data; |
3524 | unsigned int pos; | ||
3525 | ssize_t ret; | 3669 | ssize_t ret; |
3526 | size_t size; | 3670 | size_t size; |
3527 | 3671 | ||
@@ -3539,18 +3683,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
3539 | 3683 | ||
3540 | info->read = 0; | 3684 | info->read = 0; |
3541 | 3685 | ||
3686 | trace_access_lock(info->cpu); | ||
3542 | ret = ring_buffer_read_page(info->tr->buffer, | 3687 | ret = ring_buffer_read_page(info->tr->buffer, |
3543 | &info->spare, | 3688 | &info->spare, |
3544 | count, | 3689 | count, |
3545 | info->cpu, 0); | 3690 | info->cpu, 0); |
3691 | trace_access_unlock(info->cpu); | ||
3546 | if (ret < 0) | 3692 | if (ret < 0) |
3547 | return 0; | 3693 | return 0; |
3548 | 3694 | ||
3549 | pos = ring_buffer_page_len(info->spare); | ||
3550 | |||
3551 | if (pos < PAGE_SIZE) | ||
3552 | memset(info->spare + pos, 0, PAGE_SIZE - pos); | ||
3553 | |||
3554 | read: | 3695 | read: |
3555 | size = PAGE_SIZE - info->read; | 3696 | size = PAGE_SIZE - info->read; |
3556 | if (size > count) | 3697 | if (size > count) |
@@ -3645,11 +3786,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3645 | unsigned int flags) | 3786 | unsigned int flags) |
3646 | { | 3787 | { |
3647 | struct ftrace_buffer_info *info = file->private_data; | 3788 | struct ftrace_buffer_info *info = file->private_data; |
3648 | struct partial_page partial[PIPE_BUFFERS]; | 3789 | struct partial_page partial_def[PIPE_DEF_BUFFERS]; |
3649 | struct page *pages[PIPE_BUFFERS]; | 3790 | struct page *pages_def[PIPE_DEF_BUFFERS]; |
3650 | struct splice_pipe_desc spd = { | 3791 | struct splice_pipe_desc spd = { |
3651 | .pages = pages, | 3792 | .pages = pages_def, |
3652 | .partial = partial, | 3793 | .partial = partial_def, |
3653 | .flags = flags, | 3794 | .flags = flags, |
3654 | .ops = &buffer_pipe_buf_ops, | 3795 | .ops = &buffer_pipe_buf_ops, |
3655 | .spd_release = buffer_spd_release, | 3796 | .spd_release = buffer_spd_release, |
@@ -3658,21 +3799,28 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3658 | int entries, size, i; | 3799 | int entries, size, i; |
3659 | size_t ret; | 3800 | size_t ret; |
3660 | 3801 | ||
3802 | if (splice_grow_spd(pipe, &spd)) | ||
3803 | return -ENOMEM; | ||
3804 | |||
3661 | if (*ppos & (PAGE_SIZE - 1)) { | 3805 | if (*ppos & (PAGE_SIZE - 1)) { |
3662 | WARN_ONCE(1, "Ftrace: previous read must page-align\n"); | 3806 | WARN_ONCE(1, "Ftrace: previous read must page-align\n"); |
3663 | return -EINVAL; | 3807 | ret = -EINVAL; |
3808 | goto out; | ||
3664 | } | 3809 | } |
3665 | 3810 | ||
3666 | if (len & (PAGE_SIZE - 1)) { | 3811 | if (len & (PAGE_SIZE - 1)) { |
3667 | WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); | 3812 | WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); |
3668 | if (len < PAGE_SIZE) | 3813 | if (len < PAGE_SIZE) { |
3669 | return -EINVAL; | 3814 | ret = -EINVAL; |
3815 | goto out; | ||
3816 | } | ||
3670 | len &= PAGE_MASK; | 3817 | len &= PAGE_MASK; |
3671 | } | 3818 | } |
3672 | 3819 | ||
3820 | trace_access_lock(info->cpu); | ||
3673 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3821 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
3674 | 3822 | ||
3675 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { | 3823 | for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { |
3676 | struct page *page; | 3824 | struct page *page; |
3677 | int r; | 3825 | int r; |
3678 | 3826 | ||
@@ -3717,6 +3865,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3717 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3865 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
3718 | } | 3866 | } |
3719 | 3867 | ||
3868 | trace_access_unlock(info->cpu); | ||
3720 | spd.nr_pages = i; | 3869 | spd.nr_pages = i; |
3721 | 3870 | ||
3722 | /* did we read anything? */ | 3871 | /* did we read anything? */ |
@@ -3726,11 +3875,12 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3726 | else | 3875 | else |
3727 | ret = 0; | 3876 | ret = 0; |
3728 | /* TODO: block */ | 3877 | /* TODO: block */ |
3729 | return ret; | 3878 | goto out; |
3730 | } | 3879 | } |
3731 | 3880 | ||
3732 | ret = splice_to_pipe(pipe, &spd); | 3881 | ret = splice_to_pipe(pipe, &spd); |
3733 | 3882 | splice_shrink_spd(pipe, &spd); | |
3883 | out: | ||
3734 | return ret; | 3884 | return ret; |
3735 | } | 3885 | } |
3736 | 3886 | ||
@@ -3776,6 +3926,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3776 | static const struct file_operations tracing_stats_fops = { | 3926 | static const struct file_operations tracing_stats_fops = { |
3777 | .open = tracing_open_generic, | 3927 | .open = tracing_open_generic, |
3778 | .read = tracing_stats_read, | 3928 | .read = tracing_stats_read, |
3929 | .llseek = generic_file_llseek, | ||
3779 | }; | 3930 | }; |
3780 | 3931 | ||
3781 | #ifdef CONFIG_DYNAMIC_FTRACE | 3932 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -3812,6 +3963,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf, | |||
3812 | static const struct file_operations tracing_dyn_info_fops = { | 3963 | static const struct file_operations tracing_dyn_info_fops = { |
3813 | .open = tracing_open_generic, | 3964 | .open = tracing_open_generic, |
3814 | .read = tracing_read_dyn_info, | 3965 | .read = tracing_read_dyn_info, |
3966 | .llseek = generic_file_llseek, | ||
3815 | }; | 3967 | }; |
3816 | #endif | 3968 | #endif |
3817 | 3969 | ||
@@ -3868,13 +4020,9 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
3868 | { | 4020 | { |
3869 | struct dentry *d_percpu = tracing_dentry_percpu(); | 4021 | struct dentry *d_percpu = tracing_dentry_percpu(); |
3870 | struct dentry *d_cpu; | 4022 | struct dentry *d_cpu; |
3871 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | 4023 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
3872 | char cpu_dir[7]; | ||
3873 | 4024 | ||
3874 | if (cpu > 999 || cpu < 0) | 4025 | snprintf(cpu_dir, 30, "cpu%ld", cpu); |
3875 | return; | ||
3876 | |||
3877 | sprintf(cpu_dir, "cpu%ld", cpu); | ||
3878 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | 4026 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); |
3879 | if (!d_cpu) { | 4027 | if (!d_cpu) { |
3880 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | 4028 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); |
@@ -3965,6 +4113,7 @@ static const struct file_operations trace_options_fops = { | |||
3965 | .open = tracing_open_generic, | 4113 | .open = tracing_open_generic, |
3966 | .read = trace_options_read, | 4114 | .read = trace_options_read, |
3967 | .write = trace_options_write, | 4115 | .write = trace_options_write, |
4116 | .llseek = generic_file_llseek, | ||
3968 | }; | 4117 | }; |
3969 | 4118 | ||
3970 | static ssize_t | 4119 | static ssize_t |
@@ -4016,6 +4165,7 @@ static const struct file_operations trace_options_core_fops = { | |||
4016 | .open = tracing_open_generic, | 4165 | .open = tracing_open_generic, |
4017 | .read = trace_options_core_read, | 4166 | .read = trace_options_core_read, |
4018 | .write = trace_options_core_write, | 4167 | .write = trace_options_core_write, |
4168 | .llseek = generic_file_llseek, | ||
4019 | }; | 4169 | }; |
4020 | 4170 | ||
4021 | struct dentry *trace_create_file(const char *name, | 4171 | struct dentry *trace_create_file(const char *name, |
@@ -4153,6 +4303,8 @@ static __init int tracer_init_debugfs(void) | |||
4153 | struct dentry *d_tracer; | 4303 | struct dentry *d_tracer; |
4154 | int cpu; | 4304 | int cpu; |
4155 | 4305 | ||
4306 | trace_access_lock_init(); | ||
4307 | |||
4156 | d_tracer = tracing_init_dentry(); | 4308 | d_tracer = tracing_init_dentry(); |
4157 | 4309 | ||
4158 | trace_create_file("tracing_enabled", 0644, d_tracer, | 4310 | trace_create_file("tracing_enabled", 0644, d_tracer, |
@@ -4176,10 +4328,10 @@ static __init int tracer_init_debugfs(void) | |||
4176 | #ifdef CONFIG_TRACER_MAX_TRACE | 4328 | #ifdef CONFIG_TRACER_MAX_TRACE |
4177 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 4329 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
4178 | &tracing_max_latency, &tracing_max_lat_fops); | 4330 | &tracing_max_latency, &tracing_max_lat_fops); |
4331 | #endif | ||
4179 | 4332 | ||
4180 | trace_create_file("tracing_thresh", 0644, d_tracer, | 4333 | trace_create_file("tracing_thresh", 0644, d_tracer, |
4181 | &tracing_thresh, &tracing_max_lat_fops); | 4334 | &tracing_thresh, &tracing_max_lat_fops); |
4182 | #endif | ||
4183 | 4335 | ||
4184 | trace_create_file("README", 0444, d_tracer, | 4336 | trace_create_file("README", 0444, d_tracer, |
4185 | NULL, &tracing_readme_fops); | 4337 | NULL, &tracing_readme_fops); |
@@ -4203,9 +4355,6 @@ static __init int tracer_init_debugfs(void) | |||
4203 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4355 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
4204 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4356 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
4205 | #endif | 4357 | #endif |
4206 | #ifdef CONFIG_SYSPROF_TRACER | ||
4207 | init_tracer_sysprof_debugfs(d_tracer); | ||
4208 | #endif | ||
4209 | 4358 | ||
4210 | create_trace_options_dir(); | 4359 | create_trace_options_dir(); |
4211 | 4360 | ||
@@ -4219,7 +4368,7 @@ static int trace_panic_handler(struct notifier_block *this, | |||
4219 | unsigned long event, void *unused) | 4368 | unsigned long event, void *unused) |
4220 | { | 4369 | { |
4221 | if (ftrace_dump_on_oops) | 4370 | if (ftrace_dump_on_oops) |
4222 | ftrace_dump(); | 4371 | ftrace_dump(ftrace_dump_on_oops); |
4223 | return NOTIFY_OK; | 4372 | return NOTIFY_OK; |
4224 | } | 4373 | } |
4225 | 4374 | ||
@@ -4236,7 +4385,7 @@ static int trace_die_handler(struct notifier_block *self, | |||
4236 | switch (val) { | 4385 | switch (val) { |
4237 | case DIE_OOPS: | 4386 | case DIE_OOPS: |
4238 | if (ftrace_dump_on_oops) | 4387 | if (ftrace_dump_on_oops) |
4239 | ftrace_dump(); | 4388 | ftrace_dump(ftrace_dump_on_oops); |
4240 | break; | 4389 | break; |
4241 | default: | 4390 | default: |
4242 | break; | 4391 | break; |
@@ -4262,7 +4411,7 @@ static struct notifier_block trace_die_notifier = { | |||
4262 | */ | 4411 | */ |
4263 | #define KERN_TRACE KERN_EMERG | 4412 | #define KERN_TRACE KERN_EMERG |
4264 | 4413 | ||
4265 | static void | 4414 | void |
4266 | trace_printk_seq(struct trace_seq *s) | 4415 | trace_printk_seq(struct trace_seq *s) |
4267 | { | 4416 | { |
4268 | /* Probably should print a warning here. */ | 4417 | /* Probably should print a warning here. */ |
@@ -4277,7 +4426,15 @@ trace_printk_seq(struct trace_seq *s) | |||
4277 | trace_seq_init(s); | 4426 | trace_seq_init(s); |
4278 | } | 4427 | } |
4279 | 4428 | ||
4280 | static void __ftrace_dump(bool disable_tracing) | 4429 | void trace_init_global_iter(struct trace_iterator *iter) |
4430 | { | ||
4431 | iter->tr = &global_trace; | ||
4432 | iter->trace = current_trace; | ||
4433 | iter->cpu_file = TRACE_PIPE_ALL_CPU; | ||
4434 | } | ||
4435 | |||
4436 | static void | ||
4437 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | ||
4281 | { | 4438 | { |
4282 | static arch_spinlock_t ftrace_dump_lock = | 4439 | static arch_spinlock_t ftrace_dump_lock = |
4283 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 4440 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
@@ -4301,8 +4458,10 @@ static void __ftrace_dump(bool disable_tracing) | |||
4301 | if (disable_tracing) | 4458 | if (disable_tracing) |
4302 | ftrace_kill(); | 4459 | ftrace_kill(); |
4303 | 4460 | ||
4461 | trace_init_global_iter(&iter); | ||
4462 | |||
4304 | for_each_tracing_cpu(cpu) { | 4463 | for_each_tracing_cpu(cpu) { |
4305 | atomic_inc(&global_trace.data[cpu]->disabled); | 4464 | atomic_inc(&iter.tr->data[cpu]->disabled); |
4306 | } | 4465 | } |
4307 | 4466 | ||
4308 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 4467 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; |
@@ -4310,12 +4469,25 @@ static void __ftrace_dump(bool disable_tracing) | |||
4310 | /* don't look at user memory in panic mode */ | 4469 | /* don't look at user memory in panic mode */ |
4311 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4470 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
4312 | 4471 | ||
4313 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
4314 | |||
4315 | /* Simulate the iterator */ | 4472 | /* Simulate the iterator */ |
4316 | iter.tr = &global_trace; | 4473 | iter.tr = &global_trace; |
4317 | iter.trace = current_trace; | 4474 | iter.trace = current_trace; |
4318 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 4475 | |
4476 | switch (oops_dump_mode) { | ||
4477 | case DUMP_ALL: | ||
4478 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
4479 | break; | ||
4480 | case DUMP_ORIG: | ||
4481 | iter.cpu_file = raw_smp_processor_id(); | ||
4482 | break; | ||
4483 | case DUMP_NONE: | ||
4484 | goto out_enable; | ||
4485 | default: | ||
4486 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | ||
4487 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
4488 | } | ||
4489 | |||
4490 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
4319 | 4491 | ||
4320 | /* | 4492 | /* |
4321 | * We need to stop all tracing on all CPUS to read the | 4493 | * We need to stop all tracing on all CPUS to read the |
@@ -4338,7 +4510,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4338 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | 4510 | iter.iter_flags |= TRACE_FILE_LAT_FMT; |
4339 | iter.pos = -1; | 4511 | iter.pos = -1; |
4340 | 4512 | ||
4341 | if (find_next_entry_inc(&iter) != NULL) { | 4513 | if (trace_find_next_entry_inc(&iter) != NULL) { |
4342 | int ret; | 4514 | int ret; |
4343 | 4515 | ||
4344 | ret = print_trace_line(&iter); | 4516 | ret = print_trace_line(&iter); |
@@ -4354,12 +4526,13 @@ static void __ftrace_dump(bool disable_tracing) | |||
4354 | else | 4526 | else |
4355 | printk(KERN_TRACE "---------------------------------\n"); | 4527 | printk(KERN_TRACE "---------------------------------\n"); |
4356 | 4528 | ||
4529 | out_enable: | ||
4357 | /* Re-enable tracing if requested */ | 4530 | /* Re-enable tracing if requested */ |
4358 | if (!disable_tracing) { | 4531 | if (!disable_tracing) { |
4359 | trace_flags |= old_userobj; | 4532 | trace_flags |= old_userobj; |
4360 | 4533 | ||
4361 | for_each_tracing_cpu(cpu) { | 4534 | for_each_tracing_cpu(cpu) { |
4362 | atomic_dec(&global_trace.data[cpu]->disabled); | 4535 | atomic_dec(&iter.tr->data[cpu]->disabled); |
4363 | } | 4536 | } |
4364 | tracing_on(); | 4537 | tracing_on(); |
4365 | } | 4538 | } |
@@ -4370,9 +4543,9 @@ static void __ftrace_dump(bool disable_tracing) | |||
4370 | } | 4543 | } |
4371 | 4544 | ||
4372 | /* By default: disable tracing after the dump */ | 4545 | /* By default: disable tracing after the dump */ |
4373 | void ftrace_dump(void) | 4546 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
4374 | { | 4547 | { |
4375 | __ftrace_dump(true); | 4548 | __ftrace_dump(true, oops_dump_mode); |
4376 | } | 4549 | } |
4377 | 4550 | ||
4378 | __init static int tracer_alloc_buffers(void) | 4551 | __init static int tracer_alloc_buffers(void) |
@@ -4387,9 +4560,6 @@ __init static int tracer_alloc_buffers(void) | |||
4387 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4560 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4388 | goto out_free_buffer_mask; | 4561 | goto out_free_buffer_mask; |
4389 | 4562 | ||
4390 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | ||
4391 | goto out_free_tracing_cpumask; | ||
4392 | |||
4393 | /* To save memory, keep the ring buffer size to its minimum */ | 4563 | /* To save memory, keep the ring buffer size to its minimum */ |
4394 | if (ring_buffer_expanded) | 4564 | if (ring_buffer_expanded) |
4395 | ring_buf_size = trace_buf_size; | 4565 | ring_buf_size = trace_buf_size; |
@@ -4411,16 +4581,14 @@ __init static int tracer_alloc_buffers(void) | |||
4411 | 4581 | ||
4412 | 4582 | ||
4413 | #ifdef CONFIG_TRACER_MAX_TRACE | 4583 | #ifdef CONFIG_TRACER_MAX_TRACE |
4414 | max_tr.buffer = ring_buffer_alloc(ring_buf_size, | 4584 | max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS); |
4415 | TRACE_BUFFER_FLAGS); | ||
4416 | if (!max_tr.buffer) { | 4585 | if (!max_tr.buffer) { |
4417 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 4586 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
4418 | WARN_ON(1); | 4587 | WARN_ON(1); |
4419 | ring_buffer_free(global_trace.buffer); | 4588 | ring_buffer_free(global_trace.buffer); |
4420 | goto out_free_cpumask; | 4589 | goto out_free_cpumask; |
4421 | } | 4590 | } |
4422 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 4591 | max_tr.entries = 1; |
4423 | WARN_ON(max_tr.entries != global_trace.entries); | ||
4424 | #endif | 4592 | #endif |
4425 | 4593 | ||
4426 | /* Allocate the first page for all buffers */ | 4594 | /* Allocate the first page for all buffers */ |
@@ -4433,9 +4601,6 @@ __init static int tracer_alloc_buffers(void) | |||
4433 | 4601 | ||
4434 | register_tracer(&nop_trace); | 4602 | register_tracer(&nop_trace); |
4435 | current_trace = &nop_trace; | 4603 | current_trace = &nop_trace; |
4436 | #ifdef CONFIG_BOOT_TRACER | ||
4437 | register_tracer(&boot_tracer); | ||
4438 | #endif | ||
4439 | /* All seems OK, enable tracing */ | 4604 | /* All seems OK, enable tracing */ |
4440 | tracing_disabled = 0; | 4605 | tracing_disabled = 0; |
4441 | 4606 | ||
@@ -4447,8 +4612,6 @@ __init static int tracer_alloc_buffers(void) | |||
4447 | return 0; | 4612 | return 0; |
4448 | 4613 | ||
4449 | out_free_cpumask: | 4614 | out_free_cpumask: |
4450 | free_cpumask_var(tracing_reader_cpumask); | ||
4451 | out_free_tracing_cpumask: | ||
4452 | free_cpumask_var(tracing_cpumask); | 4615 | free_cpumask_var(tracing_cpumask); |
4453 | out_free_buffer_mask: | 4616 | out_free_buffer_mask: |
4454 | free_cpumask_var(tracing_buffer_mask); | 4617 | free_cpumask_var(tracing_buffer_mask); |