diff options
-rw-r--r-- | include/trace/ftrace.h | 4 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace.c | 34 | ||||
-rw-r--r-- | kernel/trace/trace.h | 10 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 98 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 54 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 29 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 2 |
14 files changed, 166 insertions, 155 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index d615f78cc6b6..41a6643e2136 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -670,10 +670,6 @@ perf_trace_##call(void *__data, proto) \ | |||
670 | sizeof(u64)); \ | 670 | sizeof(u64)); \ |
671 | __entry_size -= sizeof(u32); \ | 671 | __entry_size -= sizeof(u32); \ |
672 | \ | 672 | \ |
673 | if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ | ||
674 | "profile buffer not large enough")) \ | ||
675 | return; \ | ||
676 | \ | ||
677 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ | 673 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ |
678 | __entry_size, event_call->event.type, &__regs, &rctx); \ | 674 | __entry_size, event_call->event.type, &__regs, &rctx); \ |
679 | if (!entry) \ | 675 | if (!entry) \ |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e444ff88f0a4..cc2f66f68dc5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -36,11 +36,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s) | |||
36 | { | 36 | { |
37 | int ret; | 37 | int ret; |
38 | 38 | ||
39 | ret = trace_seq_printf(s, "# compressed entry header\n"); | 39 | ret = trace_seq_puts(s, "# compressed entry header\n"); |
40 | ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); | 40 | ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); |
41 | ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); | 41 | ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); |
42 | ret = trace_seq_printf(s, "\tarray : 32 bits\n"); | 42 | ret = trace_seq_puts(s, "\tarray : 32 bits\n"); |
43 | ret = trace_seq_printf(s, "\n"); | 43 | ret = trace_seq_putc(s, '\n'); |
44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", | 44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", |
45 | RINGBUF_TYPE_PADDING); | 45 | RINGBUF_TYPE_PADDING); |
46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", | 46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", |
@@ -1066,7 +1066,7 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, | |||
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | /** | 1068 | /** |
1069 | * check_pages - integrity check of buffer pages | 1069 | * rb_check_pages - integrity check of buffer pages |
1070 | * @cpu_buffer: CPU buffer with pages to test | 1070 | * @cpu_buffer: CPU buffer with pages to test |
1071 | * | 1071 | * |
1072 | * As a safety measure we check to make sure the data pages have not | 1072 | * As a safety measure we check to make sure the data pages have not |
@@ -1258,7 +1258,7 @@ static int rb_cpu_notify(struct notifier_block *self, | |||
1258 | #endif | 1258 | #endif |
1259 | 1259 | ||
1260 | /** | 1260 | /** |
1261 | * ring_buffer_alloc - allocate a new ring_buffer | 1261 | * __ring_buffer_alloc - allocate a new ring_buffer |
1262 | * @size: the size in bytes per cpu that is needed. | 1262 | * @size: the size in bytes per cpu that is needed. |
1263 | * @flags: attributes to set for the ring buffer. | 1263 | * @flags: attributes to set for the ring buffer. |
1264 | * | 1264 | * |
@@ -1607,6 +1607,7 @@ static void update_pages_handler(struct work_struct *work) | |||
1607 | * ring_buffer_resize - resize the ring buffer | 1607 | * ring_buffer_resize - resize the ring buffer |
1608 | * @buffer: the buffer to resize. | 1608 | * @buffer: the buffer to resize. |
1609 | * @size: the new size. | 1609 | * @size: the new size. |
1610 | * @cpu_id: the cpu buffer to resize | ||
1610 | * | 1611 | * |
1611 | * Minimum size is 2 * BUF_PAGE_SIZE. | 1612 | * Minimum size is 2 * BUF_PAGE_SIZE. |
1612 | * | 1613 | * |
@@ -3956,11 +3957,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); | |||
3956 | * expected. | 3957 | * expected. |
3957 | * | 3958 | * |
3958 | * After a sequence of ring_buffer_read_prepare calls, the user is | 3959 | * After a sequence of ring_buffer_read_prepare calls, the user is |
3959 | * expected to make at least one call to ring_buffer_prepare_sync. | 3960 | * expected to make at least one call to ring_buffer_read_prepare_sync. |
3960 | * Afterwards, ring_buffer_read_start is invoked to get things going | 3961 | * Afterwards, ring_buffer_read_start is invoked to get things going |
3961 | * for real. | 3962 | * for real. |
3962 | * | 3963 | * |
3963 | * This overall must be paired with ring_buffer_finish. | 3964 | * This overall must be paired with ring_buffer_read_finish. |
3964 | */ | 3965 | */ |
3965 | struct ring_buffer_iter * | 3966 | struct ring_buffer_iter * |
3966 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) | 3967 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) |
@@ -4009,7 +4010,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | |||
4009 | * an intervening ring_buffer_read_prepare_sync must have been | 4010 | * an intervening ring_buffer_read_prepare_sync must have been |
4010 | * performed. | 4011 | * performed. |
4011 | * | 4012 | * |
4012 | * Must be paired with ring_buffer_finish. | 4013 | * Must be paired with ring_buffer_read_finish. |
4013 | */ | 4014 | */ |
4014 | void | 4015 | void |
4015 | ring_buffer_read_start(struct ring_buffer_iter *iter) | 4016 | ring_buffer_read_start(struct ring_buffer_iter *iter) |
@@ -4031,7 +4032,7 @@ ring_buffer_read_start(struct ring_buffer_iter *iter) | |||
4031 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 4032 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
4032 | 4033 | ||
4033 | /** | 4034 | /** |
4034 | * ring_buffer_finish - finish reading the iterator of the buffer | 4035 | * ring_buffer_read_finish - finish reading the iterator of the buffer |
4035 | * @iter: The iterator retrieved by ring_buffer_start | 4036 | * @iter: The iterator retrieved by ring_buffer_start |
4036 | * | 4037 | * |
4037 | * This re-enables the recording to the buffer, and frees the | 4038 | * This re-enables the recording to the buffer, and frees the |
@@ -4346,6 +4347,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | |||
4346 | /** | 4347 | /** |
4347 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | 4348 | * ring_buffer_alloc_read_page - allocate a page to read from buffer |
4348 | * @buffer: the buffer to allocate for. | 4349 | * @buffer: the buffer to allocate for. |
4350 | * @cpu: the cpu buffer to allocate. | ||
4349 | * | 4351 | * |
4350 | * This function is used in conjunction with ring_buffer_read_page. | 4352 | * This function is used in conjunction with ring_buffer_read_page. |
4351 | * When reading a full page from the ring buffer, these functions | 4353 | * When reading a full page from the ring buffer, these functions |
@@ -4403,7 +4405,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); | |||
4403 | * to swap with a page in the ring buffer. | 4405 | * to swap with a page in the ring buffer. |
4404 | * | 4406 | * |
4405 | * for example: | 4407 | * for example: |
4406 | * rpage = ring_buffer_alloc_read_page(buffer); | 4408 | * rpage = ring_buffer_alloc_read_page(buffer, cpu); |
4407 | * if (!rpage) | 4409 | * if (!rpage) |
4408 | * return error; | 4410 | * return error; |
4409 | * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); | 4411 | * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0cd500bffd9b..3f2477713aca 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3008,7 +3008,6 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
3008 | 3008 | ||
3009 | iter = m->private; | 3009 | iter = m->private; |
3010 | tr = iter->tr; | 3010 | tr = iter->tr; |
3011 | trace_array_put(tr); | ||
3012 | 3011 | ||
3013 | mutex_lock(&trace_types_lock); | 3012 | mutex_lock(&trace_types_lock); |
3014 | 3013 | ||
@@ -3023,6 +3022,9 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
3023 | if (!iter->snapshot) | 3022 | if (!iter->snapshot) |
3024 | /* reenable tracing if it was previously enabled */ | 3023 | /* reenable tracing if it was previously enabled */ |
3025 | tracing_start_tr(tr); | 3024 | tracing_start_tr(tr); |
3025 | |||
3026 | __trace_array_put(tr); | ||
3027 | |||
3026 | mutex_unlock(&trace_types_lock); | 3028 | mutex_unlock(&trace_types_lock); |
3027 | 3029 | ||
3028 | mutex_destroy(&iter->mutex); | 3030 | mutex_destroy(&iter->mutex); |
@@ -3447,6 +3449,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
3447 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | 3449 | static int tracing_trace_options_open(struct inode *inode, struct file *file) |
3448 | { | 3450 | { |
3449 | struct trace_array *tr = inode->i_private; | 3451 | struct trace_array *tr = inode->i_private; |
3452 | int ret; | ||
3450 | 3453 | ||
3451 | if (tracing_disabled) | 3454 | if (tracing_disabled) |
3452 | return -ENODEV; | 3455 | return -ENODEV; |
@@ -3454,7 +3457,11 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file) | |||
3454 | if (trace_array_get(tr) < 0) | 3457 | if (trace_array_get(tr) < 0) |
3455 | return -ENODEV; | 3458 | return -ENODEV; |
3456 | 3459 | ||
3457 | return single_open(file, tracing_trace_options_show, inode->i_private); | 3460 | ret = single_open(file, tracing_trace_options_show, inode->i_private); |
3461 | if (ret < 0) | ||
3462 | trace_array_put(tr); | ||
3463 | |||
3464 | return ret; | ||
3458 | } | 3465 | } |
3459 | 3466 | ||
3460 | static const struct file_operations tracing_iter_fops = { | 3467 | static const struct file_operations tracing_iter_fops = { |
@@ -3537,14 +3544,14 @@ static const char readme_msg[] = | |||
3537 | "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" | 3544 | "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" |
3538 | "\t\t\t Read the contents for more information\n" | 3545 | "\t\t\t Read the contents for more information\n" |
3539 | #endif | 3546 | #endif |
3540 | #ifdef CONFIG_STACKTRACE | 3547 | #ifdef CONFIG_STACK_TRACER |
3541 | " stack_trace\t\t- Shows the max stack trace when active\n" | 3548 | " stack_trace\t\t- Shows the max stack trace when active\n" |
3542 | " stack_max_size\t- Shows current max stack size that was traced\n" | 3549 | " stack_max_size\t- Shows current max stack size that was traced\n" |
3543 | "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" | 3550 | "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" |
3544 | #ifdef CONFIG_DYNAMIC_FTRACE | 3551 | #ifdef CONFIG_DYNAMIC_FTRACE |
3545 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" | 3552 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" |
3546 | #endif | 3553 | #endif |
3547 | #endif /* CONFIG_STACKTRACE */ | 3554 | #endif /* CONFIG_STACK_TRACER */ |
3548 | ; | 3555 | ; |
3549 | 3556 | ||
3550 | static ssize_t | 3557 | static ssize_t |
@@ -3958,6 +3965,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3958 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 3965 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
3959 | if (!iter) { | 3966 | if (!iter) { |
3960 | ret = -ENOMEM; | 3967 | ret = -ENOMEM; |
3968 | __trace_array_put(tr); | ||
3961 | goto out; | 3969 | goto out; |
3962 | } | 3970 | } |
3963 | 3971 | ||
@@ -4704,21 +4712,24 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file) | |||
4704 | ret = PTR_ERR(iter); | 4712 | ret = PTR_ERR(iter); |
4705 | } else { | 4713 | } else { |
4706 | /* Writes still need the seq_file to hold the private data */ | 4714 | /* Writes still need the seq_file to hold the private data */ |
4715 | ret = -ENOMEM; | ||
4707 | m = kzalloc(sizeof(*m), GFP_KERNEL); | 4716 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
4708 | if (!m) | 4717 | if (!m) |
4709 | return -ENOMEM; | 4718 | goto out; |
4710 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 4719 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
4711 | if (!iter) { | 4720 | if (!iter) { |
4712 | kfree(m); | 4721 | kfree(m); |
4713 | return -ENOMEM; | 4722 | goto out; |
4714 | } | 4723 | } |
4724 | ret = 0; | ||
4725 | |||
4715 | iter->tr = tr; | 4726 | iter->tr = tr; |
4716 | iter->trace_buffer = &tc->tr->max_buffer; | 4727 | iter->trace_buffer = &tc->tr->max_buffer; |
4717 | iter->cpu_file = tc->cpu; | 4728 | iter->cpu_file = tc->cpu; |
4718 | m->private = iter; | 4729 | m->private = iter; |
4719 | file->private_data = m; | 4730 | file->private_data = m; |
4720 | } | 4731 | } |
4721 | 4732 | out: | |
4722 | if (ret < 0) | 4733 | if (ret < 0) |
4723 | trace_array_put(tr); | 4734 | trace_array_put(tr); |
4724 | 4735 | ||
@@ -4948,8 +4959,6 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) | |||
4948 | 4959 | ||
4949 | mutex_lock(&trace_types_lock); | 4960 | mutex_lock(&trace_types_lock); |
4950 | 4961 | ||
4951 | tr->ref++; | ||
4952 | |||
4953 | info->iter.tr = tr; | 4962 | info->iter.tr = tr; |
4954 | info->iter.cpu_file = tc->cpu; | 4963 | info->iter.cpu_file = tc->cpu; |
4955 | info->iter.trace = tr->current_trace; | 4964 | info->iter.trace = tr->current_trace; |
@@ -5328,9 +5337,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
5328 | } | 5337 | } |
5329 | 5338 | ||
5330 | static const struct file_operations tracing_stats_fops = { | 5339 | static const struct file_operations tracing_stats_fops = { |
5331 | .open = tracing_open_generic, | 5340 | .open = tracing_open_generic_tc, |
5332 | .read = tracing_stats_read, | 5341 | .read = tracing_stats_read, |
5333 | .llseek = generic_file_llseek, | 5342 | .llseek = generic_file_llseek, |
5343 | .release = tracing_release_generic_tc, | ||
5334 | }; | 5344 | }; |
5335 | 5345 | ||
5336 | #ifdef CONFIG_DYNAMIC_FTRACE | 5346 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -5973,8 +5983,10 @@ static int new_instance_create(const char *name) | |||
5973 | goto out_free_tr; | 5983 | goto out_free_tr; |
5974 | 5984 | ||
5975 | ret = event_trace_add_tracer(tr->dir, tr); | 5985 | ret = event_trace_add_tracer(tr->dir, tr); |
5976 | if (ret) | 5986 | if (ret) { |
5987 | debugfs_remove_recursive(tr->dir); | ||
5977 | goto out_free_tr; | 5988 | goto out_free_tr; |
5989 | } | ||
5978 | 5990 | ||
5979 | init_tracer_debugfs(tr, tr->dir); | 5991 | init_tracer_debugfs(tr, tr->dir); |
5980 | 5992 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4a4f6e1828b6..e7d643b8a907 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -214,7 +214,6 @@ struct trace_array { | |||
214 | struct dentry *event_dir; | 214 | struct dentry *event_dir; |
215 | struct list_head systems; | 215 | struct list_head systems; |
216 | struct list_head events; | 216 | struct list_head events; |
217 | struct task_struct *waiter; | ||
218 | int ref; | 217 | int ref; |
219 | }; | 218 | }; |
220 | 219 | ||
@@ -680,6 +679,15 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace, | |||
680 | struct trace_array *tr); | 679 | struct trace_array *tr); |
681 | extern int trace_selftest_startup_branch(struct tracer *trace, | 680 | extern int trace_selftest_startup_branch(struct tracer *trace, |
682 | struct trace_array *tr); | 681 | struct trace_array *tr); |
682 | /* | ||
683 | * Tracer data references selftest functions that only occur | ||
684 | * on boot up. These can be __init functions. Thus, when selftests | ||
685 | * are enabled, then the tracers need to reference __init functions. | ||
686 | */ | ||
687 | #define __tracer_data __refdata | ||
688 | #else | ||
689 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ | ||
690 | #define __tracer_data __read_mostly | ||
683 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 691 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
684 | 692 | ||
685 | extern void *head_page(struct trace_array_cpu *data); | 693 | extern void *head_page(struct trace_array_cpu *data); |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 84b1e045faba..80c36bcf66e8 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -236,6 +236,10 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
236 | 236 | ||
237 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | 237 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); |
238 | 238 | ||
239 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
240 | "perf buffer not large enough")) | ||
241 | return NULL; | ||
242 | |||
239 | pc = preempt_count(); | 243 | pc = preempt_count(); |
240 | 244 | ||
241 | *rctxp = perf_swevent_get_recursion_context(); | 245 | *rctxp = perf_swevent_get_recursion_context(); |
@@ -266,6 +270,10 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, | |||
266 | struct pt_regs regs; | 270 | struct pt_regs regs; |
267 | int rctx; | 271 | int rctx; |
268 | 272 | ||
273 | head = this_cpu_ptr(event_function.perf_events); | ||
274 | if (hlist_empty(head)) | ||
275 | return; | ||
276 | |||
269 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ | 277 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ |
270 | sizeof(u64)) - sizeof(u32)) | 278 | sizeof(u64)) - sizeof(u32)) |
271 | 279 | ||
@@ -279,8 +287,6 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, | |||
279 | 287 | ||
280 | entry->ip = ip; | 288 | entry->ip = ip; |
281 | entry->parent_ip = parent_ip; | 289 | entry->parent_ip = parent_ip; |
282 | |||
283 | head = this_cpu_ptr(event_function.perf_events); | ||
284 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, | 290 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, |
285 | 1, ®s, head, NULL); | 291 | 1, ®s, head, NULL); |
286 | 292 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 7d854290bf81..898f868833f2 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -826,59 +826,33 @@ enum { | |||
826 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) | 826 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
827 | { | 827 | { |
828 | struct ftrace_event_call *call = m->private; | 828 | struct ftrace_event_call *call = m->private; |
829 | struct ftrace_event_field *field; | ||
830 | struct list_head *common_head = &ftrace_common_fields; | 829 | struct list_head *common_head = &ftrace_common_fields; |
831 | struct list_head *head = trace_get_fields(call); | 830 | struct list_head *head = trace_get_fields(call); |
831 | struct list_head *node = v; | ||
832 | 832 | ||
833 | (*pos)++; | 833 | (*pos)++; |
834 | 834 | ||
835 | switch ((unsigned long)v) { | 835 | switch ((unsigned long)v) { |
836 | case FORMAT_HEADER: | 836 | case FORMAT_HEADER: |
837 | if (unlikely(list_empty(common_head))) | 837 | node = common_head; |
838 | return NULL; | 838 | break; |
839 | |||
840 | field = list_entry(common_head->prev, | ||
841 | struct ftrace_event_field, link); | ||
842 | return field; | ||
843 | 839 | ||
844 | case FORMAT_FIELD_SEPERATOR: | 840 | case FORMAT_FIELD_SEPERATOR: |
845 | if (unlikely(list_empty(head))) | 841 | node = head; |
846 | return NULL; | 842 | break; |
847 | |||
848 | field = list_entry(head->prev, struct ftrace_event_field, link); | ||
849 | return field; | ||
850 | 843 | ||
851 | case FORMAT_PRINTFMT: | 844 | case FORMAT_PRINTFMT: |
852 | /* all done */ | 845 | /* all done */ |
853 | return NULL; | 846 | return NULL; |
854 | } | 847 | } |
855 | 848 | ||
856 | field = v; | 849 | node = node->prev; |
857 | if (field->link.prev == common_head) | 850 | if (node == common_head) |
858 | return (void *)FORMAT_FIELD_SEPERATOR; | 851 | return (void *)FORMAT_FIELD_SEPERATOR; |
859 | else if (field->link.prev == head) | 852 | else if (node == head) |
860 | return (void *)FORMAT_PRINTFMT; | 853 | return (void *)FORMAT_PRINTFMT; |
861 | 854 | else | |
862 | field = list_entry(field->link.prev, struct ftrace_event_field, link); | 855 | return node; |
863 | |||
864 | return field; | ||
865 | } | ||
866 | |||
867 | static void *f_start(struct seq_file *m, loff_t *pos) | ||
868 | { | ||
869 | loff_t l = 0; | ||
870 | void *p; | ||
871 | |||
872 | /* Start by showing the header */ | ||
873 | if (!*pos) | ||
874 | return (void *)FORMAT_HEADER; | ||
875 | |||
876 | p = (void *)FORMAT_HEADER; | ||
877 | do { | ||
878 | p = f_next(m, p, &l); | ||
879 | } while (p && l < *pos); | ||
880 | |||
881 | return p; | ||
882 | } | 856 | } |
883 | 857 | ||
884 | static int f_show(struct seq_file *m, void *v) | 858 | static int f_show(struct seq_file *m, void *v) |
@@ -904,8 +878,7 @@ static int f_show(struct seq_file *m, void *v) | |||
904 | return 0; | 878 | return 0; |
905 | } | 879 | } |
906 | 880 | ||
907 | field = v; | 881 | field = list_entry(v, struct ftrace_event_field, link); |
908 | |||
909 | /* | 882 | /* |
910 | * Smartly shows the array type(except dynamic array). | 883 | * Smartly shows the array type(except dynamic array). |
911 | * Normal: | 884 | * Normal: |
@@ -932,6 +905,17 @@ static int f_show(struct seq_file *m, void *v) | |||
932 | return 0; | 905 | return 0; |
933 | } | 906 | } |
934 | 907 | ||
908 | static void *f_start(struct seq_file *m, loff_t *pos) | ||
909 | { | ||
910 | void *p = (void *)FORMAT_HEADER; | ||
911 | loff_t l = 0; | ||
912 | |||
913 | while (l < *pos && p) | ||
914 | p = f_next(m, p, &l); | ||
915 | |||
916 | return p; | ||
917 | } | ||
918 | |||
935 | static void f_stop(struct seq_file *m, void *p) | 919 | static void f_stop(struct seq_file *m, void *p) |
936 | { | 920 | { |
937 | } | 921 | } |
@@ -963,23 +947,14 @@ static ssize_t | |||
963 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | 947 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
964 | { | 948 | { |
965 | struct ftrace_event_call *call = filp->private_data; | 949 | struct ftrace_event_call *call = filp->private_data; |
966 | struct trace_seq *s; | 950 | char buf[32]; |
967 | int r; | 951 | int len; |
968 | 952 | ||
969 | if (*ppos) | 953 | if (*ppos) |
970 | return 0; | 954 | return 0; |
971 | 955 | ||
972 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 956 | len = sprintf(buf, "%d\n", call->event.type); |
973 | if (!s) | 957 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); |
974 | return -ENOMEM; | ||
975 | |||
976 | trace_seq_init(s); | ||
977 | trace_seq_printf(s, "%d\n", call->event.type); | ||
978 | |||
979 | r = simple_read_from_buffer(ubuf, cnt, ppos, | ||
980 | s->buffer, s->len); | ||
981 | kfree(s); | ||
982 | return r; | ||
983 | } | 958 | } |
984 | 959 | ||
985 | static ssize_t | 960 | static ssize_t |
@@ -1218,6 +1193,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | |||
1218 | 1193 | ||
1219 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); | 1194 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); |
1220 | static int ftrace_event_set_open(struct inode *inode, struct file *file); | 1195 | static int ftrace_event_set_open(struct inode *inode, struct file *file); |
1196 | static int ftrace_event_release(struct inode *inode, struct file *file); | ||
1221 | 1197 | ||
1222 | static const struct seq_operations show_event_seq_ops = { | 1198 | static const struct seq_operations show_event_seq_ops = { |
1223 | .start = t_start, | 1199 | .start = t_start, |
@@ -1245,7 +1221,7 @@ static const struct file_operations ftrace_set_event_fops = { | |||
1245 | .read = seq_read, | 1221 | .read = seq_read, |
1246 | .write = ftrace_event_write, | 1222 | .write = ftrace_event_write, |
1247 | .llseek = seq_lseek, | 1223 | .llseek = seq_lseek, |
1248 | .release = seq_release, | 1224 | .release = ftrace_event_release, |
1249 | }; | 1225 | }; |
1250 | 1226 | ||
1251 | static const struct file_operations ftrace_enable_fops = { | 1227 | static const struct file_operations ftrace_enable_fops = { |
@@ -1323,6 +1299,15 @@ ftrace_event_open(struct inode *inode, struct file *file, | |||
1323 | return ret; | 1299 | return ret; |
1324 | } | 1300 | } |
1325 | 1301 | ||
1302 | static int ftrace_event_release(struct inode *inode, struct file *file) | ||
1303 | { | ||
1304 | struct trace_array *tr = inode->i_private; | ||
1305 | |||
1306 | trace_array_put(tr); | ||
1307 | |||
1308 | return seq_release(inode, file); | ||
1309 | } | ||
1310 | |||
1326 | static int | 1311 | static int |
1327 | ftrace_event_avail_open(struct inode *inode, struct file *file) | 1312 | ftrace_event_avail_open(struct inode *inode, struct file *file) |
1328 | { | 1313 | { |
@@ -1336,12 +1321,19 @@ ftrace_event_set_open(struct inode *inode, struct file *file) | |||
1336 | { | 1321 | { |
1337 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; | 1322 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; |
1338 | struct trace_array *tr = inode->i_private; | 1323 | struct trace_array *tr = inode->i_private; |
1324 | int ret; | ||
1325 | |||
1326 | if (trace_array_get(tr) < 0) | ||
1327 | return -ENODEV; | ||
1339 | 1328 | ||
1340 | if ((file->f_mode & FMODE_WRITE) && | 1329 | if ((file->f_mode & FMODE_WRITE) && |
1341 | (file->f_flags & O_TRUNC)) | 1330 | (file->f_flags & O_TRUNC)) |
1342 | ftrace_clear_events(tr); | 1331 | ftrace_clear_events(tr); |
1343 | 1332 | ||
1344 | return ftrace_event_open(inode, file, seq_ops); | 1333 | ret = ftrace_event_open(inode, file, seq_ops); |
1334 | if (ret < 0) | ||
1335 | trace_array_put(tr); | ||
1336 | return ret; | ||
1345 | } | 1337 | } |
1346 | 1338 | ||
1347 | static struct event_subsystem * | 1339 | static struct event_subsystem * |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 0d883dc057d6..0c7b75a8acc8 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -646,7 +646,7 @@ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) | |||
646 | if (filter && filter->filter_string) | 646 | if (filter && filter->filter_string) |
647 | trace_seq_printf(s, "%s\n", filter->filter_string); | 647 | trace_seq_printf(s, "%s\n", filter->filter_string); |
648 | else | 648 | else |
649 | trace_seq_printf(s, "none\n"); | 649 | trace_seq_puts(s, "none\n"); |
650 | mutex_unlock(&event_mutex); | 650 | mutex_unlock(&event_mutex); |
651 | } | 651 | } |
652 | 652 | ||
@@ -660,7 +660,7 @@ void print_subsystem_event_filter(struct event_subsystem *system, | |||
660 | if (filter && filter->filter_string) | 660 | if (filter && filter->filter_string) |
661 | trace_seq_printf(s, "%s\n", filter->filter_string); | 661 | trace_seq_printf(s, "%s\n", filter->filter_string); |
662 | else | 662 | else |
663 | trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); | 663 | trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); |
664 | mutex_unlock(&event_mutex); | 664 | mutex_unlock(&event_mutex); |
665 | } | 665 | } |
666 | 666 | ||
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index b863f93b30f3..38fe1483c508 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -199,7 +199,7 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) | |||
199 | return 0; | 199 | return 0; |
200 | } | 200 | } |
201 | 201 | ||
202 | static struct tracer function_trace __read_mostly = | 202 | static struct tracer function_trace __tracer_data = |
203 | { | 203 | { |
204 | .name = "function", | 204 | .name = "function", |
205 | .init = function_trace_init, | 205 | .init = function_trace_init, |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 8388bc99f2ee..b5c09242683d 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -446,7 +446,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
446 | 446 | ||
447 | /* First spaces to align center */ | 447 | /* First spaces to align center */ |
448 | for (i = 0; i < spaces / 2; i++) { | 448 | for (i = 0; i < spaces / 2; i++) { |
449 | ret = trace_seq_printf(s, " "); | 449 | ret = trace_seq_putc(s, ' '); |
450 | if (!ret) | 450 | if (!ret) |
451 | return TRACE_TYPE_PARTIAL_LINE; | 451 | return TRACE_TYPE_PARTIAL_LINE; |
452 | } | 452 | } |
@@ -457,7 +457,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
457 | 457 | ||
458 | /* Last spaces to align center */ | 458 | /* Last spaces to align center */ |
459 | for (i = 0; i < spaces - (spaces / 2); i++) { | 459 | for (i = 0; i < spaces - (spaces / 2); i++) { |
460 | ret = trace_seq_printf(s, " "); | 460 | ret = trace_seq_putc(s, ' '); |
461 | if (!ret) | 461 | if (!ret) |
462 | return TRACE_TYPE_PARTIAL_LINE; | 462 | return TRACE_TYPE_PARTIAL_LINE; |
463 | } | 463 | } |
@@ -503,7 +503,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
503 | ------------------------------------------ | 503 | ------------------------------------------ |
504 | 504 | ||
505 | */ | 505 | */ |
506 | ret = trace_seq_printf(s, | 506 | ret = trace_seq_puts(s, |
507 | " ------------------------------------------\n"); | 507 | " ------------------------------------------\n"); |
508 | if (!ret) | 508 | if (!ret) |
509 | return TRACE_TYPE_PARTIAL_LINE; | 509 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -516,7 +516,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
516 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 516 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
517 | return TRACE_TYPE_PARTIAL_LINE; | 517 | return TRACE_TYPE_PARTIAL_LINE; |
518 | 518 | ||
519 | ret = trace_seq_printf(s, " => "); | 519 | ret = trace_seq_puts(s, " => "); |
520 | if (!ret) | 520 | if (!ret) |
521 | return TRACE_TYPE_PARTIAL_LINE; | 521 | return TRACE_TYPE_PARTIAL_LINE; |
522 | 522 | ||
@@ -524,7 +524,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
524 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 524 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
525 | return TRACE_TYPE_PARTIAL_LINE; | 525 | return TRACE_TYPE_PARTIAL_LINE; |
526 | 526 | ||
527 | ret = trace_seq_printf(s, | 527 | ret = trace_seq_puts(s, |
528 | "\n ------------------------------------------\n\n"); | 528 | "\n ------------------------------------------\n\n"); |
529 | if (!ret) | 529 | if (!ret) |
530 | return TRACE_TYPE_PARTIAL_LINE; | 530 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -645,7 +645,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
645 | ret = print_graph_proc(s, pid); | 645 | ret = print_graph_proc(s, pid); |
646 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 646 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
647 | return TRACE_TYPE_PARTIAL_LINE; | 647 | return TRACE_TYPE_PARTIAL_LINE; |
648 | ret = trace_seq_printf(s, " | "); | 648 | ret = trace_seq_puts(s, " | "); |
649 | if (!ret) | 649 | if (!ret) |
650 | return TRACE_TYPE_PARTIAL_LINE; | 650 | return TRACE_TYPE_PARTIAL_LINE; |
651 | } | 651 | } |
@@ -657,9 +657,9 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
657 | return ret; | 657 | return ret; |
658 | 658 | ||
659 | if (type == TRACE_GRAPH_ENT) | 659 | if (type == TRACE_GRAPH_ENT) |
660 | ret = trace_seq_printf(s, "==========>"); | 660 | ret = trace_seq_puts(s, "==========>"); |
661 | else | 661 | else |
662 | ret = trace_seq_printf(s, "<=========="); | 662 | ret = trace_seq_puts(s, "<=========="); |
663 | 663 | ||
664 | if (!ret) | 664 | if (!ret) |
665 | return TRACE_TYPE_PARTIAL_LINE; | 665 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -668,7 +668,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
668 | if (ret != TRACE_TYPE_HANDLED) | 668 | if (ret != TRACE_TYPE_HANDLED) |
669 | return ret; | 669 | return ret; |
670 | 670 | ||
671 | ret = trace_seq_printf(s, "\n"); | 671 | ret = trace_seq_putc(s, '\n'); |
672 | 672 | ||
673 | if (!ret) | 673 | if (!ret) |
674 | return TRACE_TYPE_PARTIAL_LINE; | 674 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -705,13 +705,13 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
705 | len += strlen(nsecs_str); | 705 | len += strlen(nsecs_str); |
706 | } | 706 | } |
707 | 707 | ||
708 | ret = trace_seq_printf(s, " us "); | 708 | ret = trace_seq_puts(s, " us "); |
709 | if (!ret) | 709 | if (!ret) |
710 | return TRACE_TYPE_PARTIAL_LINE; | 710 | return TRACE_TYPE_PARTIAL_LINE; |
711 | 711 | ||
712 | /* Print remaining spaces to fit the row's width */ | 712 | /* Print remaining spaces to fit the row's width */ |
713 | for (i = len; i < 7; i++) { | 713 | for (i = len; i < 7; i++) { |
714 | ret = trace_seq_printf(s, " "); | 714 | ret = trace_seq_putc(s, ' '); |
715 | if (!ret) | 715 | if (!ret) |
716 | return TRACE_TYPE_PARTIAL_LINE; | 716 | return TRACE_TYPE_PARTIAL_LINE; |
717 | } | 717 | } |
@@ -731,13 +731,13 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
731 | /* No real adata, just filling the column with spaces */ | 731 | /* No real adata, just filling the column with spaces */ |
732 | switch (duration) { | 732 | switch (duration) { |
733 | case DURATION_FILL_FULL: | 733 | case DURATION_FILL_FULL: |
734 | ret = trace_seq_printf(s, " | "); | 734 | ret = trace_seq_puts(s, " | "); |
735 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 735 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
736 | case DURATION_FILL_START: | 736 | case DURATION_FILL_START: |
737 | ret = trace_seq_printf(s, " "); | 737 | ret = trace_seq_puts(s, " "); |
738 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 738 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
739 | case DURATION_FILL_END: | 739 | case DURATION_FILL_END: |
740 | ret = trace_seq_printf(s, " |"); | 740 | ret = trace_seq_puts(s, " |"); |
741 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 741 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
742 | } | 742 | } |
743 | 743 | ||
@@ -745,10 +745,10 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
745 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | 745 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { |
746 | /* Duration exceeded 100 msecs */ | 746 | /* Duration exceeded 100 msecs */ |
747 | if (duration > 100000ULL) | 747 | if (duration > 100000ULL) |
748 | ret = trace_seq_printf(s, "! "); | 748 | ret = trace_seq_puts(s, "! "); |
749 | /* Duration exceeded 10 msecs */ | 749 | /* Duration exceeded 10 msecs */ |
750 | else if (duration > 10000ULL) | 750 | else if (duration > 10000ULL) |
751 | ret = trace_seq_printf(s, "+ "); | 751 | ret = trace_seq_puts(s, "+ "); |
752 | } | 752 | } |
753 | 753 | ||
754 | /* | 754 | /* |
@@ -757,7 +757,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
757 | * to fill out the space. | 757 | * to fill out the space. |
758 | */ | 758 | */ |
759 | if (ret == -1) | 759 | if (ret == -1) |
760 | ret = trace_seq_printf(s, " "); | 760 | ret = trace_seq_puts(s, " "); |
761 | 761 | ||
762 | /* Catching here any failure happenned above */ | 762 | /* Catching here any failure happenned above */ |
763 | if (!ret) | 763 | if (!ret) |
@@ -767,7 +767,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
767 | if (ret != TRACE_TYPE_HANDLED) | 767 | if (ret != TRACE_TYPE_HANDLED) |
768 | return ret; | 768 | return ret; |
769 | 769 | ||
770 | ret = trace_seq_printf(s, "| "); | 770 | ret = trace_seq_puts(s, "| "); |
771 | if (!ret) | 771 | if (!ret) |
772 | return TRACE_TYPE_PARTIAL_LINE; | 772 | return TRACE_TYPE_PARTIAL_LINE; |
773 | 773 | ||
@@ -817,7 +817,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
817 | 817 | ||
818 | /* Function */ | 818 | /* Function */ |
819 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 819 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
820 | ret = trace_seq_printf(s, " "); | 820 | ret = trace_seq_putc(s, ' '); |
821 | if (!ret) | 821 | if (!ret) |
822 | return TRACE_TYPE_PARTIAL_LINE; | 822 | return TRACE_TYPE_PARTIAL_LINE; |
823 | } | 823 | } |
@@ -858,7 +858,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
858 | 858 | ||
859 | /* Function */ | 859 | /* Function */ |
860 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 860 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
861 | ret = trace_seq_printf(s, " "); | 861 | ret = trace_seq_putc(s, ' '); |
862 | if (!ret) | 862 | if (!ret) |
863 | return TRACE_TYPE_PARTIAL_LINE; | 863 | return TRACE_TYPE_PARTIAL_LINE; |
864 | } | 864 | } |
@@ -917,7 +917,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
917 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 917 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
918 | return TRACE_TYPE_PARTIAL_LINE; | 918 | return TRACE_TYPE_PARTIAL_LINE; |
919 | 919 | ||
920 | ret = trace_seq_printf(s, " | "); | 920 | ret = trace_seq_puts(s, " | "); |
921 | if (!ret) | 921 | if (!ret) |
922 | return TRACE_TYPE_PARTIAL_LINE; | 922 | return TRACE_TYPE_PARTIAL_LINE; |
923 | } | 923 | } |
@@ -1117,7 +1117,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1117 | 1117 | ||
1118 | /* Closing brace */ | 1118 | /* Closing brace */ |
1119 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 1119 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
1120 | ret = trace_seq_printf(s, " "); | 1120 | ret = trace_seq_putc(s, ' '); |
1121 | if (!ret) | 1121 | if (!ret) |
1122 | return TRACE_TYPE_PARTIAL_LINE; | 1122 | return TRACE_TYPE_PARTIAL_LINE; |
1123 | } | 1123 | } |
@@ -1129,7 +1129,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1129 | * belongs to, write out the function name. | 1129 | * belongs to, write out the function name. |
1130 | */ | 1130 | */ |
1131 | if (func_match) { | 1131 | if (func_match) { |
1132 | ret = trace_seq_printf(s, "}\n"); | 1132 | ret = trace_seq_puts(s, "}\n"); |
1133 | if (!ret) | 1133 | if (!ret) |
1134 | return TRACE_TYPE_PARTIAL_LINE; | 1134 | return TRACE_TYPE_PARTIAL_LINE; |
1135 | } else { | 1135 | } else { |
@@ -1179,13 +1179,13 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1179 | /* Indentation */ | 1179 | /* Indentation */ |
1180 | if (depth > 0) | 1180 | if (depth > 0) |
1181 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | 1181 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { |
1182 | ret = trace_seq_printf(s, " "); | 1182 | ret = trace_seq_putc(s, ' '); |
1183 | if (!ret) | 1183 | if (!ret) |
1184 | return TRACE_TYPE_PARTIAL_LINE; | 1184 | return TRACE_TYPE_PARTIAL_LINE; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | /* The comment */ | 1187 | /* The comment */ |
1188 | ret = trace_seq_printf(s, "/* "); | 1188 | ret = trace_seq_puts(s, "/* "); |
1189 | if (!ret) | 1189 | if (!ret) |
1190 | return TRACE_TYPE_PARTIAL_LINE; | 1190 | return TRACE_TYPE_PARTIAL_LINE; |
1191 | 1191 | ||
@@ -1216,7 +1216,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1216 | s->len--; | 1216 | s->len--; |
1217 | } | 1217 | } |
1218 | 1218 | ||
1219 | ret = trace_seq_printf(s, " */\n"); | 1219 | ret = trace_seq_puts(s, " */\n"); |
1220 | if (!ret) | 1220 | if (!ret) |
1221 | return TRACE_TYPE_PARTIAL_LINE; | 1221 | return TRACE_TYPE_PARTIAL_LINE; |
1222 | 1222 | ||
@@ -1448,7 +1448,7 @@ static struct trace_event graph_trace_ret_event = { | |||
1448 | .funcs = &graph_functions | 1448 | .funcs = &graph_functions |
1449 | }; | 1449 | }; |
1450 | 1450 | ||
1451 | static struct tracer graph_trace __read_mostly = { | 1451 | static struct tracer graph_trace __tracer_data = { |
1452 | .name = "function_graph", | 1452 | .name = "function_graph", |
1453 | .open = graph_trace_open, | 1453 | .open = graph_trace_open, |
1454 | .pipe_open = graph_trace_open, | 1454 | .pipe_open = graph_trace_open, |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7ed6976493c8..3811487e7a7a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -243,11 +243,11 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | |||
243 | static int | 243 | static int |
244 | disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | 244 | disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) |
245 | { | 245 | { |
246 | struct event_file_link *link = NULL; | ||
247 | int wait = 0; | ||
246 | int ret = 0; | 248 | int ret = 0; |
247 | 249 | ||
248 | if (file) { | 250 | if (file) { |
249 | struct event_file_link *link; | ||
250 | |||
251 | link = find_event_file_link(tp, file); | 251 | link = find_event_file_link(tp, file); |
252 | if (!link) { | 252 | if (!link) { |
253 | ret = -EINVAL; | 253 | ret = -EINVAL; |
@@ -255,10 +255,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
255 | } | 255 | } |
256 | 256 | ||
257 | list_del_rcu(&link->list); | 257 | list_del_rcu(&link->list); |
258 | /* synchronize with kprobe_trace_func/kretprobe_trace_func */ | 258 | wait = 1; |
259 | synchronize_sched(); | ||
260 | kfree(link); | ||
261 | |||
262 | if (!list_empty(&tp->files)) | 259 | if (!list_empty(&tp->files)) |
263 | goto out; | 260 | goto out; |
264 | 261 | ||
@@ -271,8 +268,22 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
271 | disable_kretprobe(&tp->rp); | 268 | disable_kretprobe(&tp->rp); |
272 | else | 269 | else |
273 | disable_kprobe(&tp->rp.kp); | 270 | disable_kprobe(&tp->rp.kp); |
271 | wait = 1; | ||
274 | } | 272 | } |
275 | out: | 273 | out: |
274 | if (wait) { | ||
275 | /* | ||
276 | * Synchronize with kprobe_trace_func/kretprobe_trace_func | ||
277 | * to ensure disabled (all running handlers are finished). | ||
278 | * This is not only for kfree(), but also the caller, | ||
279 | * trace_remove_event_call() supposes it for releasing | ||
280 | * event_call related objects, which will be accessed in | ||
281 | * the kprobe_trace_func/kretprobe_trace_func. | ||
282 | */ | ||
283 | synchronize_sched(); | ||
284 | kfree(link); /* Ignored if link == NULL */ | ||
285 | } | ||
286 | |||
276 | return ret; | 287 | return ret; |
277 | } | 288 | } |
278 | 289 | ||
@@ -1087,9 +1098,6 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | |||
1087 | __size = sizeof(*entry) + tp->size + dsize; | 1098 | __size = sizeof(*entry) + tp->size + dsize; |
1088 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1099 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1089 | size -= sizeof(u32); | 1100 | size -= sizeof(u32); |
1090 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
1091 | "profile buffer not large enough")) | ||
1092 | return; | ||
1093 | 1101 | ||
1094 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); | 1102 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
1095 | if (!entry) | 1103 | if (!entry) |
@@ -1120,9 +1128,6 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
1120 | __size = sizeof(*entry) + tp->size + dsize; | 1128 | __size = sizeof(*entry) + tp->size + dsize; |
1121 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1129 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1122 | size -= sizeof(u32); | 1130 | size -= sizeof(u32); |
1123 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
1124 | "profile buffer not large enough")) | ||
1125 | return; | ||
1126 | 1131 | ||
1127 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); | 1132 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
1128 | if (!entry) | 1133 | if (!entry) |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index a5e8f4878bfa..b3dcfb2f0fef 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -90,7 +90,7 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | |||
90 | if (drv) | 90 | if (drv) |
91 | ret += trace_seq_printf(s, " %s\n", drv->name); | 91 | ret += trace_seq_printf(s, " %s\n", drv->name); |
92 | else | 92 | else |
93 | ret += trace_seq_printf(s, " \n"); | 93 | ret += trace_seq_puts(s, " \n"); |
94 | return ret; | 94 | return ret; |
95 | } | 95 | } |
96 | 96 | ||
@@ -107,7 +107,7 @@ static void mmio_pipe_open(struct trace_iterator *iter) | |||
107 | struct header_iter *hiter; | 107 | struct header_iter *hiter; |
108 | struct trace_seq *s = &iter->seq; | 108 | struct trace_seq *s = &iter->seq; |
109 | 109 | ||
110 | trace_seq_printf(s, "VERSION 20070824\n"); | 110 | trace_seq_puts(s, "VERSION 20070824\n"); |
111 | 111 | ||
112 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); | 112 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); |
113 | if (!hiter) | 113 | if (!hiter) |
@@ -209,7 +209,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
209 | (rw->value >> 0) & 0xff, rw->pc, 0); | 209 | (rw->value >> 0) & 0xff, rw->pc, 0); |
210 | break; | 210 | break; |
211 | default: | 211 | default: |
212 | ret = trace_seq_printf(s, "rw what?\n"); | 212 | ret = trace_seq_puts(s, "rw what?\n"); |
213 | break; | 213 | break; |
214 | } | 214 | } |
215 | if (ret) | 215 | if (ret) |
@@ -245,7 +245,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
245 | secs, usec_rem, m->map_id, 0UL, 0); | 245 | secs, usec_rem, m->map_id, 0UL, 0); |
246 | break; | 246 | break; |
247 | default: | 247 | default: |
248 | ret = trace_seq_printf(s, "map what?\n"); | 248 | ret = trace_seq_puts(s, "map what?\n"); |
249 | break; | 249 | break; |
250 | } | 250 | } |
251 | if (ret) | 251 | if (ret) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index bb922d9ee51b..34e7cbac0c9c 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -78,7 +78,7 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
78 | 78 | ||
79 | trace_assign_type(field, entry); | 79 | trace_assign_type(field, entry); |
80 | 80 | ||
81 | ret = trace_seq_printf(s, "%s", field->buf); | 81 | ret = trace_seq_puts(s, field->buf); |
82 | if (!ret) | 82 | if (!ret) |
83 | return TRACE_TYPE_PARTIAL_LINE; | 83 | return TRACE_TYPE_PARTIAL_LINE; |
84 | 84 | ||
@@ -558,14 +558,14 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | |||
558 | if (ret) | 558 | if (ret) |
559 | ret = trace_seq_puts(s, "??"); | 559 | ret = trace_seq_puts(s, "??"); |
560 | if (ret) | 560 | if (ret) |
561 | ret = trace_seq_puts(s, "\n"); | 561 | ret = trace_seq_putc(s, '\n'); |
562 | continue; | 562 | continue; |
563 | } | 563 | } |
564 | if (!ret) | 564 | if (!ret) |
565 | break; | 565 | break; |
566 | if (ret) | 566 | if (ret) |
567 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | 567 | ret = seq_print_user_ip(s, mm, ip, sym_flags); |
568 | ret = trace_seq_puts(s, "\n"); | 568 | ret = trace_seq_putc(s, '\n'); |
569 | } | 569 | } |
570 | 570 | ||
571 | if (mm) | 571 | if (mm) |
@@ -579,7 +579,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | |||
579 | int ret; | 579 | int ret; |
580 | 580 | ||
581 | if (!ip) | 581 | if (!ip) |
582 | return trace_seq_printf(s, "0"); | 582 | return trace_seq_putc(s, '0'); |
583 | 583 | ||
584 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | 584 | if (sym_flags & TRACE_ITER_SYM_OFFSET) |
585 | ret = seq_print_sym_offset(s, "%s", ip); | 585 | ret = seq_print_sym_offset(s, "%s", ip); |
@@ -964,14 +964,14 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, | |||
964 | goto partial; | 964 | goto partial; |
965 | 965 | ||
966 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { | 966 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { |
967 | if (!trace_seq_printf(s, " <-")) | 967 | if (!trace_seq_puts(s, " <-")) |
968 | goto partial; | 968 | goto partial; |
969 | if (!seq_print_ip_sym(s, | 969 | if (!seq_print_ip_sym(s, |
970 | field->parent_ip, | 970 | field->parent_ip, |
971 | flags)) | 971 | flags)) |
972 | goto partial; | 972 | goto partial; |
973 | } | 973 | } |
974 | if (!trace_seq_printf(s, "\n")) | 974 | if (!trace_seq_putc(s, '\n')) |
975 | goto partial; | 975 | goto partial; |
976 | 976 | ||
977 | return TRACE_TYPE_HANDLED; | 977 | return TRACE_TYPE_HANDLED; |
@@ -1210,7 +1210,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
1210 | 1210 | ||
1211 | if (!seq_print_ip_sym(s, *p, flags)) | 1211 | if (!seq_print_ip_sym(s, *p, flags)) |
1212 | goto partial; | 1212 | goto partial; |
1213 | if (!trace_seq_puts(s, "\n")) | 1213 | if (!trace_seq_putc(s, '\n')) |
1214 | goto partial; | 1214 | goto partial; |
1215 | } | 1215 | } |
1216 | 1216 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 322e16461072..8fd03657bc7d 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -175,7 +175,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
175 | entry = syscall_nr_to_meta(syscall); | 175 | entry = syscall_nr_to_meta(syscall); |
176 | 176 | ||
177 | if (!entry) { | 177 | if (!entry) { |
178 | trace_seq_printf(s, "\n"); | 178 | trace_seq_putc(s, '\n'); |
179 | return TRACE_TYPE_HANDLED; | 179 | return TRACE_TYPE_HANDLED; |
180 | } | 180 | } |
181 | 181 | ||
@@ -566,15 +566,15 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
566 | if (!sys_data) | 566 | if (!sys_data) |
567 | return; | 567 | return; |
568 | 568 | ||
569 | head = this_cpu_ptr(sys_data->enter_event->perf_events); | ||
570 | if (hlist_empty(head)) | ||
571 | return; | ||
572 | |||
569 | /* get the size after alignment with the u32 buffer size field */ | 573 | /* get the size after alignment with the u32 buffer size field */ |
570 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); | 574 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); |
571 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 575 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
572 | size -= sizeof(u32); | 576 | size -= sizeof(u32); |
573 | 577 | ||
574 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
575 | "perf buffer not large enough")) | ||
576 | return; | ||
577 | |||
578 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, | 578 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, |
579 | sys_data->enter_event->event.type, regs, &rctx); | 579 | sys_data->enter_event->event.type, regs, &rctx); |
580 | if (!rec) | 580 | if (!rec) |
@@ -583,8 +583,6 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
583 | rec->nr = syscall_nr; | 583 | rec->nr = syscall_nr; |
584 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 584 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
585 | (unsigned long *)&rec->args); | 585 | (unsigned long *)&rec->args); |
586 | |||
587 | head = this_cpu_ptr(sys_data->enter_event->perf_events); | ||
588 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 586 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
589 | } | 587 | } |
590 | 588 | ||
@@ -642,18 +640,14 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
642 | if (!sys_data) | 640 | if (!sys_data) |
643 | return; | 641 | return; |
644 | 642 | ||
643 | head = this_cpu_ptr(sys_data->exit_event->perf_events); | ||
644 | if (hlist_empty(head)) | ||
645 | return; | ||
646 | |||
645 | /* We can probably do that at build time */ | 647 | /* We can probably do that at build time */ |
646 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); | 648 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); |
647 | size -= sizeof(u32); | 649 | size -= sizeof(u32); |
648 | 650 | ||
649 | /* | ||
650 | * Impossible, but be paranoid with the future | ||
651 | * How to put this check outside runtime? | ||
652 | */ | ||
653 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
654 | "exit event has grown above perf buffer size")) | ||
655 | return; | ||
656 | |||
657 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, | 651 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, |
658 | sys_data->exit_event->event.type, regs, &rctx); | 652 | sys_data->exit_event->event.type, regs, &rctx); |
659 | if (!rec) | 653 | if (!rec) |
@@ -661,8 +655,6 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
661 | 655 | ||
662 | rec->nr = syscall_nr; | 656 | rec->nr = syscall_nr; |
663 | rec->ret = syscall_get_return_value(current, regs); | 657 | rec->ret = syscall_get_return_value(current, regs); |
664 | |||
665 | head = this_cpu_ptr(sys_data->exit_event->perf_events); | ||
666 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 658 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
667 | } | 659 | } |
668 | 660 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index d5d0cd368a56..a23d2d71188e 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -818,8 +818,6 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
818 | 818 | ||
819 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 819 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
820 | size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 820 | size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
821 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | ||
822 | return; | ||
823 | 821 | ||
824 | preempt_disable(); | 822 | preempt_disable(); |
825 | head = this_cpu_ptr(call->perf_events); | 823 | head = this_cpu_ptr(call->perf_events); |