diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 6 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 28 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace.h | 4 | ||||
-rw-r--r-- | kernel/trace/trace_event_profile.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_event_types.h | 3 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 20 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 11 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 11 | ||||
-rw-r--r-- | kernel/trace/trace_stat.c | 34 |
16 files changed, 98 insertions, 62 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1551f47e7669..019f380fd764 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -226,13 +226,13 @@ config BOOT_TRACER | |||
226 | the timings of the initcalls and traces key events and the identity | 226 | the timings of the initcalls and traces key events and the identity |
227 | of tasks that can cause boot delays, such as context-switches. | 227 | of tasks that can cause boot delays, such as context-switches. |
228 | 228 | ||
229 | Its aim is to be parsed by the /scripts/bootgraph.pl tool to | 229 | Its aim is to be parsed by the scripts/bootgraph.pl tool to |
230 | produce pretty graphics about boot inefficiencies, giving a visual | 230 | produce pretty graphics about boot inefficiencies, giving a visual |
231 | representation of the delays during initcalls - but the raw | 231 | representation of the delays during initcalls - but the raw |
232 | /debug/tracing/trace text output is readable too. | 232 | /debug/tracing/trace text output is readable too. |
233 | 233 | ||
234 | You must pass in ftrace=initcall to the kernel command line | 234 | You must pass in initcall_debug and ftrace=initcall to the kernel |
235 | to enable this on bootup. | 235 | command line to enable this on bootup. |
236 | 236 | ||
237 | config TRACE_BRANCH_PROFILING | 237 | config TRACE_BRANCH_PROFILING |
238 | bool | 238 | bool |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 39af8af6fc30..1090b0aed9ba 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
25 | #include <linux/smp_lock.h> | ||
25 | #include <linux/time.h> | 26 | #include <linux/time.h> |
26 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
27 | 28 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f3716bf04df6..1e1d23c26308 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -768,7 +768,7 @@ static struct tracer_stat function_stats __initdata = { | |||
768 | .stat_show = function_stat_show | 768 | .stat_show = function_stat_show |
769 | }; | 769 | }; |
770 | 770 | ||
771 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | 771 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) |
772 | { | 772 | { |
773 | struct ftrace_profile_stat *stat; | 773 | struct ftrace_profile_stat *stat; |
774 | struct dentry *entry; | 774 | struct dentry *entry; |
@@ -786,7 +786,6 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
786 | * The files created are permanent, if something happens | 786 | * The files created are permanent, if something happens |
787 | * we still do not free memory. | 787 | * we still do not free memory. |
788 | */ | 788 | */ |
789 | kfree(stat); | ||
790 | WARN(1, | 789 | WARN(1, |
791 | "Could not allocate stat file for cpu %d\n", | 790 | "Could not allocate stat file for cpu %d\n", |
792 | cpu); | 791 | cpu); |
@@ -813,7 +812,7 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
813 | } | 812 | } |
814 | 813 | ||
815 | #else /* CONFIG_FUNCTION_PROFILER */ | 814 | #else /* CONFIG_FUNCTION_PROFILER */ |
816 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | 815 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) |
817 | { | 816 | { |
818 | } | 817 | } |
819 | #endif /* CONFIG_FUNCTION_PROFILER */ | 818 | #endif /* CONFIG_FUNCTION_PROFILER */ |
@@ -1663,7 +1662,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1663 | 1662 | ||
1664 | mutex_lock(&ftrace_regex_lock); | 1663 | mutex_lock(&ftrace_regex_lock); |
1665 | if ((file->f_mode & FMODE_WRITE) && | 1664 | if ((file->f_mode & FMODE_WRITE) && |
1666 | !(file->f_flags & O_APPEND)) | 1665 | (file->f_flags & O_TRUNC)) |
1667 | ftrace_filter_reset(enable); | 1666 | ftrace_filter_reset(enable); |
1668 | 1667 | ||
1669 | if (file->f_mode & FMODE_READ) { | 1668 | if (file->f_mode & FMODE_READ) { |
@@ -2578,7 +2577,7 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
2578 | 2577 | ||
2579 | mutex_lock(&graph_lock); | 2578 | mutex_lock(&graph_lock); |
2580 | if ((file->f_mode & FMODE_WRITE) && | 2579 | if ((file->f_mode & FMODE_WRITE) && |
2581 | !(file->f_flags & O_APPEND)) { | 2580 | (file->f_flags & O_TRUNC)) { |
2582 | ftrace_graph_count = 0; | 2581 | ftrace_graph_count = 0; |
2583 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | 2582 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); |
2584 | } | 2583 | } |
@@ -2597,6 +2596,14 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
2597 | } | 2596 | } |
2598 | 2597 | ||
2599 | static int | 2598 | static int |
2599 | ftrace_graph_release(struct inode *inode, struct file *file) | ||
2600 | { | ||
2601 | if (file->f_mode & FMODE_READ) | ||
2602 | seq_release(inode, file); | ||
2603 | return 0; | ||
2604 | } | ||
2605 | |||
2606 | static int | ||
2600 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) | 2607 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) |
2601 | { | 2608 | { |
2602 | struct dyn_ftrace *rec; | 2609 | struct dyn_ftrace *rec; |
@@ -2725,9 +2732,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
2725 | } | 2732 | } |
2726 | 2733 | ||
2727 | static const struct file_operations ftrace_graph_fops = { | 2734 | static const struct file_operations ftrace_graph_fops = { |
2728 | .open = ftrace_graph_open, | 2735 | .open = ftrace_graph_open, |
2729 | .read = seq_read, | 2736 | .read = seq_read, |
2730 | .write = ftrace_graph_write, | 2737 | .write = ftrace_graph_write, |
2738 | .release = ftrace_graph_release, | ||
2731 | }; | 2739 | }; |
2732 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2740 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2733 | 2741 | ||
@@ -3160,10 +3168,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
3160 | 3168 | ||
3161 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 3169 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); |
3162 | 3170 | ||
3163 | if (ret || !write || (last_ftrace_enabled == ftrace_enabled)) | 3171 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
3164 | goto out; | 3172 | goto out; |
3165 | 3173 | ||
3166 | last_ftrace_enabled = ftrace_enabled; | 3174 | last_ftrace_enabled = !!ftrace_enabled; |
3167 | 3175 | ||
3168 | if (ftrace_enabled) { | 3176 | if (ftrace_enabled) { |
3169 | 3177 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bf27bb7a63e2..a330513d96ce 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
735 | 735 | ||
736 | put_online_cpus(); | 736 | put_online_cpus(); |
737 | 737 | ||
738 | kfree(buffer->buffers); | ||
738 | free_cpumask_var(buffer->cpumask); | 739 | free_cpumask_var(buffer->cpumask); |
739 | 740 | ||
740 | kfree(buffer); | 741 | kfree(buffer); |
@@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
1785 | */ | 1786 | */ |
1786 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); | 1787 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); |
1787 | 1788 | ||
1788 | if (!rb_try_to_discard(cpu_buffer, event)) | 1789 | if (rb_try_to_discard(cpu_buffer, event)) |
1789 | goto out; | 1790 | goto out; |
1790 | 1791 | ||
1791 | /* | 1792 | /* |
@@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2383 | * the box. Return the padding, and we will release | 2384 | * the box. Return the padding, and we will release |
2384 | * the current locks, and try again. | 2385 | * the current locks, and try again. |
2385 | */ | 2386 | */ |
2386 | rb_advance_reader(cpu_buffer); | ||
2387 | return event; | 2387 | return event; |
2388 | 2388 | ||
2389 | case RINGBUF_TYPE_TIME_EXTEND: | 2389 | case RINGBUF_TYPE_TIME_EXTEND: |
@@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void) | |||
2486 | * buffer too. A one time deal is all you get from reading | 2486 | * buffer too. A one time deal is all you get from reading |
2487 | * the ring buffer from an NMI. | 2487 | * the ring buffer from an NMI. |
2488 | */ | 2488 | */ |
2489 | if (likely(!in_nmi() && !oops_in_progress)) | 2489 | if (likely(!in_nmi())) |
2490 | return 1; | 2490 | return 1; |
2491 | 2491 | ||
2492 | tracing_off_permanent(); | 2492 | tracing_off_permanent(); |
@@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2519 | if (dolock) | 2519 | if (dolock) |
2520 | spin_lock(&cpu_buffer->reader_lock); | 2520 | spin_lock(&cpu_buffer->reader_lock); |
2521 | event = rb_buffer_peek(buffer, cpu, ts); | 2521 | event = rb_buffer_peek(buffer, cpu, ts); |
2522 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | ||
2523 | rb_advance_reader(cpu_buffer); | ||
2522 | if (dolock) | 2524 | if (dolock) |
2523 | spin_unlock(&cpu_buffer->reader_lock); | 2525 | spin_unlock(&cpu_buffer->reader_lock); |
2524 | local_irq_restore(flags); | 2526 | local_irq_restore(flags); |
@@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2590 | spin_lock(&cpu_buffer->reader_lock); | 2592 | spin_lock(&cpu_buffer->reader_lock); |
2591 | 2593 | ||
2592 | event = rb_buffer_peek(buffer, cpu, ts); | 2594 | event = rb_buffer_peek(buffer, cpu, ts); |
2593 | if (!event) | 2595 | if (event) |
2594 | goto out_unlock; | 2596 | rb_advance_reader(cpu_buffer); |
2595 | |||
2596 | rb_advance_reader(cpu_buffer); | ||
2597 | 2597 | ||
2598 | out_unlock: | ||
2599 | if (dolock) | 2598 | if (dolock) |
2600 | spin_unlock(&cpu_buffer->reader_lock); | 2599 | spin_unlock(&cpu_buffer->reader_lock); |
2601 | local_irq_restore(flags); | 2600 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3aa0a0dfdfa8..c22b40f8f576 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/smp_lock.h> | ||
20 | #include <linux/notifier.h> | 21 | #include <linux/notifier.h> |
21 | #include <linux/irqflags.h> | 22 | #include <linux/irqflags.h> |
22 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
@@ -847,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
847 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
848 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
849 | } | 850 | } |
851 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | ||
850 | 852 | ||
851 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 853 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, |
852 | int type, | 854 | int type, |
@@ -2030,7 +2032,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2030 | 2032 | ||
2031 | /* If this file was open for write, then erase contents */ | 2033 | /* If this file was open for write, then erase contents */ |
2032 | if ((file->f_mode & FMODE_WRITE) && | 2034 | if ((file->f_mode & FMODE_WRITE) && |
2033 | !(file->f_flags & O_APPEND)) { | 2035 | (file->f_flags & O_TRUNC)) { |
2034 | long cpu = (long) inode->i_private; | 2036 | long cpu = (long) inode->i_private; |
2035 | 2037 | ||
2036 | if (cpu == TRACE_PIPE_ALL_CPU) | 2038 | if (cpu == TRACE_PIPE_ALL_CPU) |
@@ -3084,7 +3086,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
3084 | break; | 3086 | break; |
3085 | } | 3087 | } |
3086 | 3088 | ||
3087 | trace_consume(iter); | 3089 | if (ret != TRACE_TYPE_NO_CONSUME) |
3090 | trace_consume(iter); | ||
3088 | rem -= count; | 3091 | rem -= count; |
3089 | if (!find_next_entry_inc(iter)) { | 3092 | if (!find_next_entry_inc(iter)) { |
3090 | rem = 0; | 3093 | rem = 0; |
@@ -4232,8 +4235,11 @@ static void __ftrace_dump(bool disable_tracing) | |||
4232 | iter.pos = -1; | 4235 | iter.pos = -1; |
4233 | 4236 | ||
4234 | if (find_next_entry_inc(&iter) != NULL) { | 4237 | if (find_next_entry_inc(&iter) != NULL) { |
4235 | print_trace_line(&iter); | 4238 | int ret; |
4236 | trace_consume(&iter); | 4239 | |
4240 | ret = print_trace_line(&iter); | ||
4241 | if (ret != TRACE_TYPE_NO_CONSUME) | ||
4242 | trace_consume(&iter); | ||
4237 | } | 4243 | } |
4238 | 4244 | ||
4239 | trace_printk_seq(&iter.seq); | 4245 | trace_printk_seq(&iter.seq); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 3548ae5cc780..8b9f4f6e9559 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
439 | int *ent_cpu, u64 *ent_ts); | 439 | int *ent_cpu, u64 *ent_ts); |
440 | 440 | ||
441 | void tracing_generic_entry_update(struct trace_entry *entry, | ||
442 | unsigned long flags, | ||
443 | int pc); | ||
444 | |||
445 | void default_wait_pipe(struct trace_iterator *iter); | 441 | void default_wait_pipe(struct trace_iterator *iter); |
446 | void poll_wait_pipe(struct trace_iterator *iter); | 442 | void poll_wait_pipe(struct trace_iterator *iter); |
447 | 443 | ||
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 5b5895afecfe..11ba5bb4ed0a 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id) | |||
14 | 14 | ||
15 | mutex_lock(&event_mutex); | 15 | mutex_lock(&event_mutex); |
16 | list_for_each_entry(event, &ftrace_events, list) { | 16 | list_for_each_entry(event, &ftrace_events, list) { |
17 | if (event->id == event_id) { | 17 | if (event->id == event_id && event->profile_enable) { |
18 | ret = event->profile_enable(event); | 18 | ret = event->profile_enable(event); |
19 | break; | 19 | break; |
20 | } | 20 | } |
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index 5e32e375134d..6db005e12487 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h | |||
@@ -26,6 +26,9 @@ TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET, | |||
26 | ftrace_graph_ret_entry, ignore, | 26 | ftrace_graph_ret_entry, ignore, |
27 | TRACE_STRUCT( | 27 | TRACE_STRUCT( |
28 | TRACE_FIELD(unsigned long, ret.func, func) | 28 | TRACE_FIELD(unsigned long, ret.func, func) |
29 | TRACE_FIELD(unsigned long long, ret.calltime, calltime) | ||
30 | TRACE_FIELD(unsigned long long, ret.rettime, rettime) | ||
31 | TRACE_FIELD(unsigned long, ret.overrun, overrun) | ||
29 | TRACE_FIELD(int, ret.depth, depth) | 32 | TRACE_FIELD(int, ret.depth, depth) |
30 | ), | 33 | ), |
31 | TP_RAW_FMT("<-- %lx (%d)") | 34 | TP_RAW_FMT("<-- %lx (%d)") |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53c8fd376a88..e75276a49cf5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -376,7 +376,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file) | |||
376 | const struct seq_operations *seq_ops; | 376 | const struct seq_operations *seq_ops; |
377 | 377 | ||
378 | if ((file->f_mode & FMODE_WRITE) && | 378 | if ((file->f_mode & FMODE_WRITE) && |
379 | !(file->f_flags & O_APPEND)) | 379 | (file->f_flags & O_TRUNC)) |
380 | ftrace_clear_events(); | 380 | ftrace_clear_events(); |
381 | 381 | ||
382 | seq_ops = inode->i_private; | 382 | seq_ops = inode->i_private; |
@@ -940,7 +940,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
940 | entry = trace_create_file("enable", 0644, call->dir, call, | 940 | entry = trace_create_file("enable", 0644, call->dir, call, |
941 | enable); | 941 | enable); |
942 | 942 | ||
943 | if (call->id) | 943 | if (call->id && call->profile_enable) |
944 | entry = trace_create_file("id", 0444, call->dir, call, | 944 | entry = trace_create_file("id", 0444, call->dir, call, |
945 | id); | 945 | id); |
946 | 946 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 936c621bbf46..f32dc9d1ea7b 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
624 | return -ENOSPC; | 624 | return -ENOSPC; |
625 | } | 625 | } |
626 | 626 | ||
627 | filter->preds[filter->n_preds] = pred; | ||
628 | filter->n_preds++; | ||
629 | |||
630 | list_for_each_entry(call, &ftrace_events, list) { | 627 | list_for_each_entry(call, &ftrace_events, list) { |
631 | 628 | ||
632 | if (!call->define_fields) | 629 | if (!call->define_fields) |
@@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
643 | } | 640 | } |
644 | replace_filter_string(call->filter, filter_string); | 641 | replace_filter_string(call->filter, filter_string); |
645 | } | 642 | } |
643 | |||
644 | filter->preds[filter->n_preds] = pred; | ||
645 | filter->n_preds++; | ||
646 | out: | 646 | out: |
647 | return err; | 647 | return err; |
648 | } | 648 | } |
@@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system, | |||
1029 | 1029 | ||
1030 | if (elt->op == OP_AND || elt->op == OP_OR) { | 1030 | if (elt->op == OP_AND || elt->op == OP_OR) { |
1031 | pred = create_logical_pred(elt->op); | 1031 | pred = create_logical_pred(elt->op); |
1032 | if (!pred) | ||
1033 | return -ENOMEM; | ||
1032 | if (call) { | 1034 | if (call) { |
1033 | err = filter_add_pred(ps, call, pred); | 1035 | err = filter_add_pred(ps, call, pred); |
1034 | filter_free_pred(pred); | 1036 | filter_free_pred(pred); |
1035 | } else | 1037 | } else { |
1036 | err = filter_add_subsystem_pred(ps, system, | 1038 | err = filter_add_subsystem_pred(ps, system, |
1037 | pred, filter_string); | 1039 | pred, filter_string); |
1040 | if (err) | ||
1041 | filter_free_pred(pred); | ||
1042 | } | ||
1038 | if (err) | 1043 | if (err) |
1039 | return err; | 1044 | return err; |
1040 | 1045 | ||
@@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system, | |||
1048 | } | 1053 | } |
1049 | 1054 | ||
1050 | pred = create_pred(elt->op, operand1, operand2); | 1055 | pred = create_pred(elt->op, operand1, operand2); |
1056 | if (!pred) | ||
1057 | return -ENOMEM; | ||
1051 | if (call) { | 1058 | if (call) { |
1052 | err = filter_add_pred(ps, call, pred); | 1059 | err = filter_add_pred(ps, call, pred); |
1053 | filter_free_pred(pred); | 1060 | filter_free_pred(pred); |
1054 | } else | 1061 | } else { |
1055 | err = filter_add_subsystem_pred(ps, system, pred, | 1062 | err = filter_add_subsystem_pred(ps, system, pred, |
1056 | filter_string); | 1063 | filter_string); |
1064 | if (err) | ||
1065 | filter_free_pred(pred); | ||
1066 | } | ||
1057 | if (err) | 1067 | if (err) |
1058 | return err; | 1068 | return err; |
1059 | 1069 | ||
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 7402144bff21..75ef000613c3 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -363,7 +363,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) | |||
363 | out_reg: | 363 | out_reg: |
364 | ret = register_ftrace_function_probe(glob, ops, count); | 364 | ret = register_ftrace_function_probe(glob, ops, count); |
365 | 365 | ||
366 | return ret; | 366 | return ret < 0 ? ret : 0; |
367 | } | 367 | } |
368 | 368 | ||
369 | static struct ftrace_func_command ftrace_traceon_cmd = { | 369 | static struct ftrace_func_command ftrace_traceon_cmd = { |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d2249abafb53..420ec3487579 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -843,9 +843,16 @@ print_graph_function(struct trace_iterator *iter) | |||
843 | 843 | ||
844 | switch (entry->type) { | 844 | switch (entry->type) { |
845 | case TRACE_GRAPH_ENT: { | 845 | case TRACE_GRAPH_ENT: { |
846 | struct ftrace_graph_ent_entry *field; | 846 | /* |
847 | * print_graph_entry() may consume the current event, | ||
848 | * thus @field may become invalid, so we need to save it. | ||
849 | * sizeof(struct ftrace_graph_ent_entry) is very small, | ||
850 | * it can be safely saved at the stack. | ||
851 | */ | ||
852 | struct ftrace_graph_ent_entry *field, saved; | ||
847 | trace_assign_type(field, entry); | 853 | trace_assign_type(field, entry); |
848 | return print_graph_entry(field, s, iter); | 854 | saved = *field; |
855 | return print_graph_entry(&saved, s, iter); | ||
849 | } | 856 | } |
850 | case TRACE_GRAPH_RET: { | 857 | case TRACE_GRAPH_RET: { |
851 | struct ftrace_graph_ret_entry *field; | 858 | struct ftrace_graph_ret_entry *field; |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 7938f3ae93e3..e0c2545622e8 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -27,8 +27,7 @@ void trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
27 | { | 27 | { |
28 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | 28 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; |
29 | 29 | ||
30 | s->buffer[len] = 0; | 30 | seq_write(m, s->buffer, len); |
31 | seq_puts(m, s->buffer); | ||
32 | 31 | ||
33 | trace_seq_init(s); | 32 | trace_seq_init(s); |
34 | } | 33 | } |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 7b6278110827..687699d365ae 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -176,7 +176,7 @@ static int t_show(struct seq_file *m, void *v) | |||
176 | const char *str = *fmt; | 176 | const char *str = *fmt; |
177 | int i; | 177 | int i; |
178 | 178 | ||
179 | seq_printf(m, "0x%lx : \"", (unsigned long)fmt); | 179 | seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt); |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Tabs and new lines need to be converted. | 182 | * Tabs and new lines need to be converted. |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 2d7aebd71dbd..6a2a9d484cd6 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -301,17 +301,14 @@ static const struct seq_operations stack_trace_seq_ops = { | |||
301 | 301 | ||
302 | static int stack_trace_open(struct inode *inode, struct file *file) | 302 | static int stack_trace_open(struct inode *inode, struct file *file) |
303 | { | 303 | { |
304 | int ret; | 304 | return seq_open(file, &stack_trace_seq_ops); |
305 | |||
306 | ret = seq_open(file, &stack_trace_seq_ops); | ||
307 | |||
308 | return ret; | ||
309 | } | 305 | } |
310 | 306 | ||
311 | static const struct file_operations stack_trace_fops = { | 307 | static const struct file_operations stack_trace_fops = { |
312 | .open = stack_trace_open, | 308 | .open = stack_trace_open, |
313 | .read = seq_read, | 309 | .read = seq_read, |
314 | .llseek = seq_lseek, | 310 | .llseek = seq_lseek, |
311 | .release = seq_release, | ||
315 | }; | 312 | }; |
316 | 313 | ||
317 | int | 314 | int |
@@ -326,10 +323,10 @@ stack_trace_sysctl(struct ctl_table *table, int write, | |||
326 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 323 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); |
327 | 324 | ||
328 | if (ret || !write || | 325 | if (ret || !write || |
329 | (last_stack_tracer_enabled == stack_tracer_enabled)) | 326 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
330 | goto out; | 327 | goto out; |
331 | 328 | ||
332 | last_stack_tracer_enabled = stack_tracer_enabled; | 329 | last_stack_tracer_enabled = !!stack_tracer_enabled; |
333 | 330 | ||
334 | if (stack_tracer_enabled) | 331 | if (stack_tracer_enabled) |
335 | register_ftrace_function(&trace_ops); | 332 | register_ftrace_function(&trace_ops); |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index e66f5e493342..aea321c82fa0 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
@@ -73,7 +73,7 @@ static struct rb_node *release_next(struct rb_node *node) | |||
73 | } | 73 | } |
74 | } | 74 | } |
75 | 75 | ||
76 | static void reset_stat_session(struct stat_session *session) | 76 | static void __reset_stat_session(struct stat_session *session) |
77 | { | 77 | { |
78 | struct rb_node *node = session->stat_root.rb_node; | 78 | struct rb_node *node = session->stat_root.rb_node; |
79 | 79 | ||
@@ -83,10 +83,17 @@ static void reset_stat_session(struct stat_session *session) | |||
83 | session->stat_root = RB_ROOT; | 83 | session->stat_root = RB_ROOT; |
84 | } | 84 | } |
85 | 85 | ||
86 | static void reset_stat_session(struct stat_session *session) | ||
87 | { | ||
88 | mutex_lock(&session->stat_mutex); | ||
89 | __reset_stat_session(session); | ||
90 | mutex_unlock(&session->stat_mutex); | ||
91 | } | ||
92 | |||
86 | static void destroy_session(struct stat_session *session) | 93 | static void destroy_session(struct stat_session *session) |
87 | { | 94 | { |
88 | debugfs_remove(session->file); | 95 | debugfs_remove(session->file); |
89 | reset_stat_session(session); | 96 | __reset_stat_session(session); |
90 | mutex_destroy(&session->stat_mutex); | 97 | mutex_destroy(&session->stat_mutex); |
91 | kfree(session); | 98 | kfree(session); |
92 | } | 99 | } |
@@ -150,7 +157,7 @@ static int stat_seq_init(struct stat_session *session) | |||
150 | int i; | 157 | int i; |
151 | 158 | ||
152 | mutex_lock(&session->stat_mutex); | 159 | mutex_lock(&session->stat_mutex); |
153 | reset_stat_session(session); | 160 | __reset_stat_session(session); |
154 | 161 | ||
155 | if (!ts->stat_cmp) | 162 | if (!ts->stat_cmp) |
156 | ts->stat_cmp = dummy_cmp; | 163 | ts->stat_cmp = dummy_cmp; |
@@ -183,7 +190,7 @@ exit: | |||
183 | return ret; | 190 | return ret; |
184 | 191 | ||
185 | exit_free_rbtree: | 192 | exit_free_rbtree: |
186 | reset_stat_session(session); | 193 | __reset_stat_session(session); |
187 | mutex_unlock(&session->stat_mutex); | 194 | mutex_unlock(&session->stat_mutex); |
188 | return ret; | 195 | return ret; |
189 | } | 196 | } |
@@ -250,16 +257,21 @@ static const struct seq_operations trace_stat_seq_ops = { | |||
250 | static int tracing_stat_open(struct inode *inode, struct file *file) | 257 | static int tracing_stat_open(struct inode *inode, struct file *file) |
251 | { | 258 | { |
252 | int ret; | 259 | int ret; |
253 | 260 | struct seq_file *m; | |
254 | struct stat_session *session = inode->i_private; | 261 | struct stat_session *session = inode->i_private; |
255 | 262 | ||
263 | ret = stat_seq_init(session); | ||
264 | if (ret) | ||
265 | return ret; | ||
266 | |||
256 | ret = seq_open(file, &trace_stat_seq_ops); | 267 | ret = seq_open(file, &trace_stat_seq_ops); |
257 | if (!ret) { | 268 | if (ret) { |
258 | struct seq_file *m = file->private_data; | 269 | reset_stat_session(session); |
259 | m->private = session; | 270 | return ret; |
260 | ret = stat_seq_init(session); | ||
261 | } | 271 | } |
262 | 272 | ||
273 | m = file->private_data; | ||
274 | m->private = session; | ||
263 | return ret; | 275 | return ret; |
264 | } | 276 | } |
265 | 277 | ||
@@ -270,11 +282,9 @@ static int tracing_stat_release(struct inode *i, struct file *f) | |||
270 | { | 282 | { |
271 | struct stat_session *session = i->i_private; | 283 | struct stat_session *session = i->i_private; |
272 | 284 | ||
273 | mutex_lock(&session->stat_mutex); | ||
274 | reset_stat_session(session); | 285 | reset_stat_session(session); |
275 | mutex_unlock(&session->stat_mutex); | ||
276 | 286 | ||
277 | return 0; | 287 | return seq_release(i, f); |
278 | } | 288 | } |
279 | 289 | ||
280 | static const struct file_operations tracing_stat_fops = { | 290 | static const struct file_operations tracing_stat_fops = { |