diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 19 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace.c | 13 | ||||
-rw-r--r-- | kernel/trace/trace.h | 4 | ||||
-rw-r--r-- | kernel/trace/trace_event_profile.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 11 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_stat.c | 34 |
10 files changed, 67 insertions, 44 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 24e3ff53b24b..094863416b2e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1617,7 +1617,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1617 | 1617 | ||
1618 | mutex_lock(&ftrace_regex_lock); | 1618 | mutex_lock(&ftrace_regex_lock); |
1619 | if ((file->f_mode & FMODE_WRITE) && | 1619 | if ((file->f_mode & FMODE_WRITE) && |
1620 | !(file->f_flags & O_APPEND)) | 1620 | (file->f_flags & O_TRUNC)) |
1621 | ftrace_filter_reset(enable); | 1621 | ftrace_filter_reset(enable); |
1622 | 1622 | ||
1623 | if (file->f_mode & FMODE_READ) { | 1623 | if (file->f_mode & FMODE_READ) { |
@@ -2527,7 +2527,7 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
2527 | 2527 | ||
2528 | mutex_lock(&graph_lock); | 2528 | mutex_lock(&graph_lock); |
2529 | if ((file->f_mode & FMODE_WRITE) && | 2529 | if ((file->f_mode & FMODE_WRITE) && |
2530 | !(file->f_flags & O_APPEND)) { | 2530 | (file->f_flags & O_TRUNC)) { |
2531 | ftrace_graph_count = 0; | 2531 | ftrace_graph_count = 0; |
2532 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | 2532 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); |
2533 | } | 2533 | } |
@@ -2546,6 +2546,14 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
2546 | } | 2546 | } |
2547 | 2547 | ||
2548 | static int | 2548 | static int |
2549 | ftrace_graph_release(struct inode *inode, struct file *file) | ||
2550 | { | ||
2551 | if (file->f_mode & FMODE_READ) | ||
2552 | seq_release(inode, file); | ||
2553 | return 0; | ||
2554 | } | ||
2555 | |||
2556 | static int | ||
2549 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) | 2557 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) |
2550 | { | 2558 | { |
2551 | struct dyn_ftrace *rec; | 2559 | struct dyn_ftrace *rec; |
@@ -2674,9 +2682,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
2674 | } | 2682 | } |
2675 | 2683 | ||
2676 | static const struct file_operations ftrace_graph_fops = { | 2684 | static const struct file_operations ftrace_graph_fops = { |
2677 | .open = ftrace_graph_open, | 2685 | .open = ftrace_graph_open, |
2678 | .read = seq_read, | 2686 | .read = seq_read, |
2679 | .write = ftrace_graph_write, | 2687 | .write = ftrace_graph_write, |
2688 | .release = ftrace_graph_release, | ||
2680 | }; | 2689 | }; |
2681 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2690 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2682 | 2691 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 51633d74a21e..da2c59d8f486 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1170,6 +1170,7 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
1170 | 1170 | ||
1171 | put_online_cpus(); | 1171 | put_online_cpus(); |
1172 | 1172 | ||
1173 | kfree(buffer->buffers); | ||
1173 | free_cpumask_var(buffer->cpumask); | 1174 | free_cpumask_var(buffer->cpumask); |
1174 | 1175 | ||
1175 | kfree(buffer); | 1176 | kfree(buffer); |
@@ -2379,7 +2380,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
2379 | */ | 2380 | */ |
2380 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); | 2381 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); |
2381 | 2382 | ||
2382 | if (!rb_try_to_discard(cpu_buffer, event)) | 2383 | if (rb_try_to_discard(cpu_buffer, event)) |
2383 | goto out; | 2384 | goto out; |
2384 | 2385 | ||
2385 | /* | 2386 | /* |
@@ -2990,7 +2991,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2990 | * the box. Return the padding, and we will release | 2991 | * the box. Return the padding, and we will release |
2991 | * the current locks, and try again. | 2992 | * the current locks, and try again. |
2992 | */ | 2993 | */ |
2993 | rb_advance_reader(cpu_buffer); | ||
2994 | return event; | 2994 | return event; |
2995 | 2995 | ||
2996 | case RINGBUF_TYPE_TIME_EXTEND: | 2996 | case RINGBUF_TYPE_TIME_EXTEND: |
@@ -3093,7 +3093,7 @@ static inline int rb_ok_to_lock(void) | |||
3093 | * buffer too. A one time deal is all you get from reading | 3093 | * buffer too. A one time deal is all you get from reading |
3094 | * the ring buffer from an NMI. | 3094 | * the ring buffer from an NMI. |
3095 | */ | 3095 | */ |
3096 | if (likely(!in_nmi() && !oops_in_progress)) | 3096 | if (likely(!in_nmi())) |
3097 | return 1; | 3097 | return 1; |
3098 | 3098 | ||
3099 | tracing_off_permanent(); | 3099 | tracing_off_permanent(); |
@@ -3126,6 +3126,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3126 | if (dolock) | 3126 | if (dolock) |
3127 | spin_lock(&cpu_buffer->reader_lock); | 3127 | spin_lock(&cpu_buffer->reader_lock); |
3128 | event = rb_buffer_peek(buffer, cpu, ts); | 3128 | event = rb_buffer_peek(buffer, cpu, ts); |
3129 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | ||
3130 | rb_advance_reader(cpu_buffer); | ||
3129 | if (dolock) | 3131 | if (dolock) |
3130 | spin_unlock(&cpu_buffer->reader_lock); | 3132 | spin_unlock(&cpu_buffer->reader_lock); |
3131 | local_irq_restore(flags); | 3133 | local_irq_restore(flags); |
@@ -3197,12 +3199,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3197 | spin_lock(&cpu_buffer->reader_lock); | 3199 | spin_lock(&cpu_buffer->reader_lock); |
3198 | 3200 | ||
3199 | event = rb_buffer_peek(buffer, cpu, ts); | 3201 | event = rb_buffer_peek(buffer, cpu, ts); |
3200 | if (!event) | 3202 | if (event) |
3201 | goto out_unlock; | 3203 | rb_advance_reader(cpu_buffer); |
3202 | |||
3203 | rb_advance_reader(cpu_buffer); | ||
3204 | 3204 | ||
3205 | out_unlock: | ||
3206 | if (dolock) | 3205 | if (dolock) |
3207 | spin_unlock(&cpu_buffer->reader_lock); | 3206 | spin_unlock(&cpu_buffer->reader_lock); |
3208 | local_irq_restore(flags); | 3207 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0cfd1a62def1..e793cda91dd3 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
850 | } | 850 | } |
851 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | ||
851 | 852 | ||
852 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 853 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, |
853 | int type, | 854 | int type, |
@@ -1857,7 +1858,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
1857 | 1858 | ||
1858 | /* If this file was open for write, then erase contents */ | 1859 | /* If this file was open for write, then erase contents */ |
1859 | if ((file->f_mode & FMODE_WRITE) && | 1860 | if ((file->f_mode & FMODE_WRITE) && |
1860 | !(file->f_flags & O_APPEND)) { | 1861 | (file->f_flags & O_TRUNC)) { |
1861 | long cpu = (long) inode->i_private; | 1862 | long cpu = (long) inode->i_private; |
1862 | 1863 | ||
1863 | if (cpu == TRACE_PIPE_ALL_CPU) | 1864 | if (cpu == TRACE_PIPE_ALL_CPU) |
@@ -2911,7 +2912,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
2911 | break; | 2912 | break; |
2912 | } | 2913 | } |
2913 | 2914 | ||
2914 | trace_consume(iter); | 2915 | if (ret != TRACE_TYPE_NO_CONSUME) |
2916 | trace_consume(iter); | ||
2915 | rem -= count; | 2917 | rem -= count; |
2916 | if (!find_next_entry_inc(iter)) { | 2918 | if (!find_next_entry_inc(iter)) { |
2917 | rem = 0; | 2919 | rem = 0; |
@@ -4056,8 +4058,11 @@ static void __ftrace_dump(bool disable_tracing) | |||
4056 | iter.pos = -1; | 4058 | iter.pos = -1; |
4057 | 4059 | ||
4058 | if (find_next_entry_inc(&iter) != NULL) { | 4060 | if (find_next_entry_inc(&iter) != NULL) { |
4059 | print_trace_line(&iter); | 4061 | int ret; |
4060 | trace_consume(&iter); | 4062 | |
4063 | ret = print_trace_line(&iter); | ||
4064 | if (ret != TRACE_TYPE_NO_CONSUME) | ||
4065 | trace_consume(&iter); | ||
4061 | } | 4066 | } |
4062 | 4067 | ||
4063 | trace_printk_seq(&iter.seq); | 4068 | trace_printk_seq(&iter.seq); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 9301f1263c5c..d682357e4b1f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
439 | int *ent_cpu, u64 *ent_ts); | 439 | int *ent_cpu, u64 *ent_ts); |
440 | 440 | ||
441 | void tracing_generic_entry_update(struct trace_entry *entry, | ||
442 | unsigned long flags, | ||
443 | int pc); | ||
444 | |||
445 | void default_wait_pipe(struct trace_iterator *iter); | 441 | void default_wait_pipe(struct trace_iterator *iter); |
446 | void poll_wait_pipe(struct trace_iterator *iter); | 442 | void poll_wait_pipe(struct trace_iterator *iter); |
447 | 443 | ||
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 5b5895afecfe..11ba5bb4ed0a 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id) | |||
14 | 14 | ||
15 | mutex_lock(&event_mutex); | 15 | mutex_lock(&event_mutex); |
16 | list_for_each_entry(event, &ftrace_events, list) { | 16 | list_for_each_entry(event, &ftrace_events, list) { |
17 | if (event->id == event_id) { | 17 | if (event->id == event_id && event->profile_enable) { |
18 | ret = event->profile_enable(event); | 18 | ret = event->profile_enable(event); |
19 | break; | 19 | break; |
20 | } | 20 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 70ecb7653b46..e0cbede96783 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -378,7 +378,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file) | |||
378 | const struct seq_operations *seq_ops; | 378 | const struct seq_operations *seq_ops; |
379 | 379 | ||
380 | if ((file->f_mode & FMODE_WRITE) && | 380 | if ((file->f_mode & FMODE_WRITE) && |
381 | !(file->f_flags & O_APPEND)) | 381 | (file->f_flags & O_TRUNC)) |
382 | ftrace_clear_events(); | 382 | ftrace_clear_events(); |
383 | 383 | ||
384 | seq_ops = inode->i_private; | 384 | seq_ops = inode->i_private; |
@@ -945,7 +945,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
945 | entry = trace_create_file("enable", 0644, call->dir, call, | 945 | entry = trace_create_file("enable", 0644, call->dir, call, |
946 | enable); | 946 | enable); |
947 | 947 | ||
948 | if (call->id) | 948 | if (call->id && call->profile_enable) |
949 | entry = trace_create_file("id", 0444, call->dir, call, | 949 | entry = trace_create_file("id", 0444, call->dir, call, |
950 | id); | 950 | id); |
951 | 951 | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index f97244a41a4f..3f4a251b7d16 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -927,9 +927,16 @@ print_graph_function(struct trace_iterator *iter) | |||
927 | 927 | ||
928 | switch (entry->type) { | 928 | switch (entry->type) { |
929 | case TRACE_GRAPH_ENT: { | 929 | case TRACE_GRAPH_ENT: { |
930 | struct ftrace_graph_ent_entry *field; | 930 | /* |
931 | * print_graph_entry() may consume the current event, | ||
932 | * thus @field may become invalid, so we need to save it. | ||
933 | * sizeof(struct ftrace_graph_ent_entry) is very small, | ||
934 | * it can be safely saved at the stack. | ||
935 | */ | ||
936 | struct ftrace_graph_ent_entry *field, saved; | ||
931 | trace_assign_type(field, entry); | 937 | trace_assign_type(field, entry); |
932 | return print_graph_entry(field, s, iter); | 938 | saved = *field; |
939 | return print_graph_entry(&saved, s, iter); | ||
933 | } | 940 | } |
934 | case TRACE_GRAPH_RET: { | 941 | case TRACE_GRAPH_RET: { |
935 | struct ftrace_graph_ret_entry *field; | 942 | struct ftrace_graph_ret_entry *field; |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 7b6278110827..687699d365ae 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -176,7 +176,7 @@ static int t_show(struct seq_file *m, void *v) | |||
176 | const char *str = *fmt; | 176 | const char *str = *fmt; |
177 | int i; | 177 | int i; |
178 | 178 | ||
179 | seq_printf(m, "0x%lx : \"", (unsigned long)fmt); | 179 | seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt); |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Tabs and new lines need to be converted. | 182 | * Tabs and new lines need to be converted. |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index a4dc8d9ad1b1..0da1cff08d67 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -294,17 +294,14 @@ static const struct seq_operations stack_trace_seq_ops = { | |||
294 | 294 | ||
295 | static int stack_trace_open(struct inode *inode, struct file *file) | 295 | static int stack_trace_open(struct inode *inode, struct file *file) |
296 | { | 296 | { |
297 | int ret; | 297 | return seq_open(file, &stack_trace_seq_ops); |
298 | |||
299 | ret = seq_open(file, &stack_trace_seq_ops); | ||
300 | |||
301 | return ret; | ||
302 | } | 298 | } |
303 | 299 | ||
304 | static const struct file_operations stack_trace_fops = { | 300 | static const struct file_operations stack_trace_fops = { |
305 | .open = stack_trace_open, | 301 | .open = stack_trace_open, |
306 | .read = seq_read, | 302 | .read = seq_read, |
307 | .llseek = seq_lseek, | 303 | .llseek = seq_lseek, |
304 | .release = seq_release, | ||
308 | }; | 305 | }; |
309 | 306 | ||
310 | int | 307 | int |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index f069461f10bd..07c60b09258f 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
@@ -76,7 +76,7 @@ static struct rb_node *release_next(struct tracer_stat *ts, | |||
76 | } | 76 | } |
77 | } | 77 | } |
78 | 78 | ||
79 | static void reset_stat_session(struct stat_session *session) | 79 | static void __reset_stat_session(struct stat_session *session) |
80 | { | 80 | { |
81 | struct rb_node *node = session->stat_root.rb_node; | 81 | struct rb_node *node = session->stat_root.rb_node; |
82 | 82 | ||
@@ -86,10 +86,17 @@ static void reset_stat_session(struct stat_session *session) | |||
86 | session->stat_root = RB_ROOT; | 86 | session->stat_root = RB_ROOT; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void reset_stat_session(struct stat_session *session) | ||
90 | { | ||
91 | mutex_lock(&session->stat_mutex); | ||
92 | __reset_stat_session(session); | ||
93 | mutex_unlock(&session->stat_mutex); | ||
94 | } | ||
95 | |||
89 | static void destroy_session(struct stat_session *session) | 96 | static void destroy_session(struct stat_session *session) |
90 | { | 97 | { |
91 | debugfs_remove(session->file); | 98 | debugfs_remove(session->file); |
92 | reset_stat_session(session); | 99 | __reset_stat_session(session); |
93 | mutex_destroy(&session->stat_mutex); | 100 | mutex_destroy(&session->stat_mutex); |
94 | kfree(session); | 101 | kfree(session); |
95 | } | 102 | } |
@@ -153,7 +160,7 @@ static int stat_seq_init(struct stat_session *session) | |||
153 | int i; | 160 | int i; |
154 | 161 | ||
155 | mutex_lock(&session->stat_mutex); | 162 | mutex_lock(&session->stat_mutex); |
156 | reset_stat_session(session); | 163 | __reset_stat_session(session); |
157 | 164 | ||
158 | if (!ts->stat_cmp) | 165 | if (!ts->stat_cmp) |
159 | ts->stat_cmp = dummy_cmp; | 166 | ts->stat_cmp = dummy_cmp; |
@@ -186,7 +193,7 @@ exit: | |||
186 | return ret; | 193 | return ret; |
187 | 194 | ||
188 | exit_free_rbtree: | 195 | exit_free_rbtree: |
189 | reset_stat_session(session); | 196 | __reset_stat_session(session); |
190 | mutex_unlock(&session->stat_mutex); | 197 | mutex_unlock(&session->stat_mutex); |
191 | return ret; | 198 | return ret; |
192 | } | 199 | } |
@@ -253,16 +260,21 @@ static const struct seq_operations trace_stat_seq_ops = { | |||
253 | static int tracing_stat_open(struct inode *inode, struct file *file) | 260 | static int tracing_stat_open(struct inode *inode, struct file *file) |
254 | { | 261 | { |
255 | int ret; | 262 | int ret; |
256 | 263 | struct seq_file *m; | |
257 | struct stat_session *session = inode->i_private; | 264 | struct stat_session *session = inode->i_private; |
258 | 265 | ||
266 | ret = stat_seq_init(session); | ||
267 | if (ret) | ||
268 | return ret; | ||
269 | |||
259 | ret = seq_open(file, &trace_stat_seq_ops); | 270 | ret = seq_open(file, &trace_stat_seq_ops); |
260 | if (!ret) { | 271 | if (ret) { |
261 | struct seq_file *m = file->private_data; | 272 | reset_stat_session(session); |
262 | m->private = session; | 273 | return ret; |
263 | ret = stat_seq_init(session); | ||
264 | } | 274 | } |
265 | 275 | ||
276 | m = file->private_data; | ||
277 | m->private = session; | ||
266 | return ret; | 278 | return ret; |
267 | } | 279 | } |
268 | 280 | ||
@@ -273,11 +285,9 @@ static int tracing_stat_release(struct inode *i, struct file *f) | |||
273 | { | 285 | { |
274 | struct stat_session *session = i->i_private; | 286 | struct stat_session *session = i->i_private; |
275 | 287 | ||
276 | mutex_lock(&session->stat_mutex); | ||
277 | reset_stat_session(session); | 288 | reset_stat_session(session); |
278 | mutex_unlock(&session->stat_mutex); | ||
279 | 289 | ||
280 | return 0; | 290 | return seq_release(i, f); |
281 | } | 291 | } |
282 | 292 | ||
283 | static const struct file_operations tracing_stat_fops = { | 293 | static const struct file_operations tracing_stat_fops = { |