diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-07-21 15:44:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-07-21 15:44:18 -0400 |
commit | 23c2875725ebb03e58e598754309e198648f608a (patch) | |
tree | 35f9ffe9e7b52624642cb651687dea5faf5dc6a3 /kernel | |
parent | 9dcdbf7a33d9018ac5d45debcf261be648bdd56a (diff) | |
parent | b444786f1a797a7f84e2561346a670649f9c7b3c (diff) |
Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/Kconfig | 9 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/trace.c | 73 | ||||
-rw-r--r-- | kernel/trace/trace.h | 10 | ||||
-rw-r--r-- | kernel/trace/trace_entries.h | 17 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 66 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 33 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 330 |
8 files changed, 15 insertions, 524 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index f5306cb0afb1..c7683fd8a03a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -194,15 +194,6 @@ config PREEMPT_TRACER | |||
194 | enabled. This option and the irqs-off timing option can be | 194 | enabled. This option and the irqs-off timing option can be |
195 | used together or separately.) | 195 | used together or separately.) |
196 | 196 | ||
197 | config SYSPROF_TRACER | ||
198 | bool "Sysprof Tracer" | ||
199 | depends on X86 | ||
200 | select GENERIC_TRACER | ||
201 | select CONTEXT_SWITCH_TRACER | ||
202 | help | ||
203 | This tracer provides the trace needed by the 'Sysprof' userspace | ||
204 | tool. | ||
205 | |||
206 | config SCHED_TRACER | 197 | config SCHED_TRACER |
207 | bool "Scheduling Latency Tracer" | 198 | bool "Scheduling Latency Tracer" |
208 | select GENERIC_TRACER | 199 | select GENERIC_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 84b2c9908dae..438e84a56ab3 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -30,7 +30,6 @@ obj-$(CONFIG_TRACING) += trace_output.o | |||
30 | obj-$(CONFIG_TRACING) += trace_stat.o | 30 | obj-$(CONFIG_TRACING) += trace_stat.o |
31 | obj-$(CONFIG_TRACING) += trace_printk.o | 31 | obj-$(CONFIG_TRACING) += trace_printk.o |
32 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 32 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
33 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | ||
34 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | 33 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
35 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | 34 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o |
36 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | 35 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8683dec6946b..c1752dac613e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1331,61 +1331,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
1331 | 1331 | ||
1332 | #endif /* CONFIG_STACKTRACE */ | 1332 | #endif /* CONFIG_STACKTRACE */ |
1333 | 1333 | ||
1334 | static void | ||
1335 | ftrace_trace_special(void *__tr, | ||
1336 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
1337 | int pc) | ||
1338 | { | ||
1339 | struct ftrace_event_call *call = &event_special; | ||
1340 | struct ring_buffer_event *event; | ||
1341 | struct trace_array *tr = __tr; | ||
1342 | struct ring_buffer *buffer = tr->buffer; | ||
1343 | struct special_entry *entry; | ||
1344 | |||
1345 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, | ||
1346 | sizeof(*entry), 0, pc); | ||
1347 | if (!event) | ||
1348 | return; | ||
1349 | entry = ring_buffer_event_data(event); | ||
1350 | entry->arg1 = arg1; | ||
1351 | entry->arg2 = arg2; | ||
1352 | entry->arg3 = arg3; | ||
1353 | |||
1354 | if (!filter_check_discard(call, entry, buffer, event)) | ||
1355 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
1356 | } | ||
1357 | |||
1358 | void | ||
1359 | __trace_special(void *__tr, void *__data, | ||
1360 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1361 | { | ||
1362 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); | ||
1363 | } | ||
1364 | |||
1365 | void | ||
1366 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1367 | { | ||
1368 | struct trace_array *tr = &global_trace; | ||
1369 | struct trace_array_cpu *data; | ||
1370 | unsigned long flags; | ||
1371 | int cpu; | ||
1372 | int pc; | ||
1373 | |||
1374 | if (tracing_disabled) | ||
1375 | return; | ||
1376 | |||
1377 | pc = preempt_count(); | ||
1378 | local_irq_save(flags); | ||
1379 | cpu = raw_smp_processor_id(); | ||
1380 | data = tr->data[cpu]; | ||
1381 | |||
1382 | if (likely(atomic_inc_return(&data->disabled) == 1)) | ||
1383 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); | ||
1384 | |||
1385 | atomic_dec(&data->disabled); | ||
1386 | local_irq_restore(flags); | ||
1387 | } | ||
1388 | |||
1389 | /** | 1334 | /** |
1390 | * trace_vbprintk - write binary msg to tracing buffer | 1335 | * trace_vbprintk - write binary msg to tracing buffer |
1391 | * | 1336 | * |
@@ -2393,6 +2338,7 @@ static const struct file_operations show_traces_fops = { | |||
2393 | .open = show_traces_open, | 2338 | .open = show_traces_open, |
2394 | .read = seq_read, | 2339 | .read = seq_read, |
2395 | .release = seq_release, | 2340 | .release = seq_release, |
2341 | .llseek = seq_lseek, | ||
2396 | }; | 2342 | }; |
2397 | 2343 | ||
2398 | /* | 2344 | /* |
@@ -2486,6 +2432,7 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2486 | .open = tracing_open_generic, | 2432 | .open = tracing_open_generic, |
2487 | .read = tracing_cpumask_read, | 2433 | .read = tracing_cpumask_read, |
2488 | .write = tracing_cpumask_write, | 2434 | .write = tracing_cpumask_write, |
2435 | .llseek = generic_file_llseek, | ||
2489 | }; | 2436 | }; |
2490 | 2437 | ||
2491 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 2438 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
@@ -2652,6 +2599,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf, | |||
2652 | static const struct file_operations tracing_readme_fops = { | 2599 | static const struct file_operations tracing_readme_fops = { |
2653 | .open = tracing_open_generic, | 2600 | .open = tracing_open_generic, |
2654 | .read = tracing_readme_read, | 2601 | .read = tracing_readme_read, |
2602 | .llseek = generic_file_llseek, | ||
2655 | }; | 2603 | }; |
2656 | 2604 | ||
2657 | static ssize_t | 2605 | static ssize_t |
@@ -2702,6 +2650,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | |||
2702 | static const struct file_operations tracing_saved_cmdlines_fops = { | 2650 | static const struct file_operations tracing_saved_cmdlines_fops = { |
2703 | .open = tracing_open_generic, | 2651 | .open = tracing_open_generic, |
2704 | .read = tracing_saved_cmdlines_read, | 2652 | .read = tracing_saved_cmdlines_read, |
2653 | .llseek = generic_file_llseek, | ||
2705 | }; | 2654 | }; |
2706 | 2655 | ||
2707 | static ssize_t | 2656 | static ssize_t |
@@ -3031,6 +2980,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3031 | if (iter->trace->pipe_open) | 2980 | if (iter->trace->pipe_open) |
3032 | iter->trace->pipe_open(iter); | 2981 | iter->trace->pipe_open(iter); |
3033 | 2982 | ||
2983 | nonseekable_open(inode, filp); | ||
3034 | out: | 2984 | out: |
3035 | mutex_unlock(&trace_types_lock); | 2985 | mutex_unlock(&trace_types_lock); |
3036 | return ret; | 2986 | return ret; |
@@ -3589,18 +3539,21 @@ static const struct file_operations tracing_max_lat_fops = { | |||
3589 | .open = tracing_open_generic, | 3539 | .open = tracing_open_generic, |
3590 | .read = tracing_max_lat_read, | 3540 | .read = tracing_max_lat_read, |
3591 | .write = tracing_max_lat_write, | 3541 | .write = tracing_max_lat_write, |
3542 | .llseek = generic_file_llseek, | ||
3592 | }; | 3543 | }; |
3593 | 3544 | ||
3594 | static const struct file_operations tracing_ctrl_fops = { | 3545 | static const struct file_operations tracing_ctrl_fops = { |
3595 | .open = tracing_open_generic, | 3546 | .open = tracing_open_generic, |
3596 | .read = tracing_ctrl_read, | 3547 | .read = tracing_ctrl_read, |
3597 | .write = tracing_ctrl_write, | 3548 | .write = tracing_ctrl_write, |
3549 | .llseek = generic_file_llseek, | ||
3598 | }; | 3550 | }; |
3599 | 3551 | ||
3600 | static const struct file_operations set_tracer_fops = { | 3552 | static const struct file_operations set_tracer_fops = { |
3601 | .open = tracing_open_generic, | 3553 | .open = tracing_open_generic, |
3602 | .read = tracing_set_trace_read, | 3554 | .read = tracing_set_trace_read, |
3603 | .write = tracing_set_trace_write, | 3555 | .write = tracing_set_trace_write, |
3556 | .llseek = generic_file_llseek, | ||
3604 | }; | 3557 | }; |
3605 | 3558 | ||
3606 | static const struct file_operations tracing_pipe_fops = { | 3559 | static const struct file_operations tracing_pipe_fops = { |
@@ -3609,17 +3562,20 @@ static const struct file_operations tracing_pipe_fops = { | |||
3609 | .read = tracing_read_pipe, | 3562 | .read = tracing_read_pipe, |
3610 | .splice_read = tracing_splice_read_pipe, | 3563 | .splice_read = tracing_splice_read_pipe, |
3611 | .release = tracing_release_pipe, | 3564 | .release = tracing_release_pipe, |
3565 | .llseek = no_llseek, | ||
3612 | }; | 3566 | }; |
3613 | 3567 | ||
3614 | static const struct file_operations tracing_entries_fops = { | 3568 | static const struct file_operations tracing_entries_fops = { |
3615 | .open = tracing_open_generic, | 3569 | .open = tracing_open_generic, |
3616 | .read = tracing_entries_read, | 3570 | .read = tracing_entries_read, |
3617 | .write = tracing_entries_write, | 3571 | .write = tracing_entries_write, |
3572 | .llseek = generic_file_llseek, | ||
3618 | }; | 3573 | }; |
3619 | 3574 | ||
3620 | static const struct file_operations tracing_mark_fops = { | 3575 | static const struct file_operations tracing_mark_fops = { |
3621 | .open = tracing_open_generic, | 3576 | .open = tracing_open_generic, |
3622 | .write = tracing_mark_write, | 3577 | .write = tracing_mark_write, |
3578 | .llseek = generic_file_llseek, | ||
3623 | }; | 3579 | }; |
3624 | 3580 | ||
3625 | static const struct file_operations trace_clock_fops = { | 3581 | static const struct file_operations trace_clock_fops = { |
@@ -3925,6 +3881,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3925 | static const struct file_operations tracing_stats_fops = { | 3881 | static const struct file_operations tracing_stats_fops = { |
3926 | .open = tracing_open_generic, | 3882 | .open = tracing_open_generic, |
3927 | .read = tracing_stats_read, | 3883 | .read = tracing_stats_read, |
3884 | .llseek = generic_file_llseek, | ||
3928 | }; | 3885 | }; |
3929 | 3886 | ||
3930 | #ifdef CONFIG_DYNAMIC_FTRACE | 3887 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -3961,6 +3918,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf, | |||
3961 | static const struct file_operations tracing_dyn_info_fops = { | 3918 | static const struct file_operations tracing_dyn_info_fops = { |
3962 | .open = tracing_open_generic, | 3919 | .open = tracing_open_generic, |
3963 | .read = tracing_read_dyn_info, | 3920 | .read = tracing_read_dyn_info, |
3921 | .llseek = generic_file_llseek, | ||
3964 | }; | 3922 | }; |
3965 | #endif | 3923 | #endif |
3966 | 3924 | ||
@@ -4114,6 +4072,7 @@ static const struct file_operations trace_options_fops = { | |||
4114 | .open = tracing_open_generic, | 4072 | .open = tracing_open_generic, |
4115 | .read = trace_options_read, | 4073 | .read = trace_options_read, |
4116 | .write = trace_options_write, | 4074 | .write = trace_options_write, |
4075 | .llseek = generic_file_llseek, | ||
4117 | }; | 4076 | }; |
4118 | 4077 | ||
4119 | static ssize_t | 4078 | static ssize_t |
@@ -4165,6 +4124,7 @@ static const struct file_operations trace_options_core_fops = { | |||
4165 | .open = tracing_open_generic, | 4124 | .open = tracing_open_generic, |
4166 | .read = trace_options_core_read, | 4125 | .read = trace_options_core_read, |
4167 | .write = trace_options_core_write, | 4126 | .write = trace_options_core_write, |
4127 | .llseek = generic_file_llseek, | ||
4168 | }; | 4128 | }; |
4169 | 4129 | ||
4170 | struct dentry *trace_create_file(const char *name, | 4130 | struct dentry *trace_create_file(const char *name, |
@@ -4354,9 +4314,6 @@ static __init int tracer_init_debugfs(void) | |||
4354 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4314 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
4355 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4315 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
4356 | #endif | 4316 | #endif |
4357 | #ifdef CONFIG_SYSPROF_TRACER | ||
4358 | init_tracer_sysprof_debugfs(d_tracer); | ||
4359 | #endif | ||
4360 | 4317 | ||
4361 | create_trace_options_dir(); | 4318 | create_trace_options_dir(); |
4362 | 4319 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 84d3f123e86f..638a5887e2ec 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -22,7 +22,6 @@ enum trace_type { | |||
22 | TRACE_STACK, | 22 | TRACE_STACK, |
23 | TRACE_PRINT, | 23 | TRACE_PRINT, |
24 | TRACE_BPRINT, | 24 | TRACE_BPRINT, |
25 | TRACE_SPECIAL, | ||
26 | TRACE_MMIO_RW, | 25 | TRACE_MMIO_RW, |
27 | TRACE_MMIO_MAP, | 26 | TRACE_MMIO_MAP, |
28 | TRACE_BRANCH, | 27 | TRACE_BRANCH, |
@@ -189,7 +188,6 @@ extern void __ftrace_bad_type(void); | |||
189 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 188 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
190 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 189 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
191 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ | 190 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
192 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | ||
193 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 191 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
194 | TRACE_MMIO_RW); \ | 192 | TRACE_MMIO_RW); \ |
195 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 193 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
@@ -296,7 +294,6 @@ struct dentry *trace_create_file(const char *name, | |||
296 | const struct file_operations *fops); | 294 | const struct file_operations *fops); |
297 | 295 | ||
298 | struct dentry *tracing_init_dentry(void); | 296 | struct dentry *tracing_init_dentry(void); |
299 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | ||
300 | 297 | ||
301 | struct ring_buffer_event; | 298 | struct ring_buffer_event; |
302 | 299 | ||
@@ -333,11 +330,6 @@ void tracing_sched_wakeup_trace(struct trace_array *tr, | |||
333 | struct task_struct *wakee, | 330 | struct task_struct *wakee, |
334 | struct task_struct *cur, | 331 | struct task_struct *cur, |
335 | unsigned long flags, int pc); | 332 | unsigned long flags, int pc); |
336 | void trace_special(struct trace_array *tr, | ||
337 | struct trace_array_cpu *data, | ||
338 | unsigned long arg1, | ||
339 | unsigned long arg2, | ||
340 | unsigned long arg3, int pc); | ||
341 | void trace_function(struct trace_array *tr, | 333 | void trace_function(struct trace_array *tr, |
342 | unsigned long ip, | 334 | unsigned long ip, |
343 | unsigned long parent_ip, | 335 | unsigned long parent_ip, |
@@ -428,8 +420,6 @@ extern int trace_selftest_startup_nop(struct tracer *trace, | |||
428 | struct trace_array *tr); | 420 | struct trace_array *tr); |
429 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, | 421 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
430 | struct trace_array *tr); | 422 | struct trace_array *tr); |
431 | extern int trace_selftest_startup_sysprof(struct tracer *trace, | ||
432 | struct trace_array *tr); | ||
433 | extern int trace_selftest_startup_branch(struct tracer *trace, | 423 | extern int trace_selftest_startup_branch(struct tracer *trace, |
434 | struct trace_array *tr); | 424 | struct trace_array *tr); |
435 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 425 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 84128371f254..e3dfecaf13e6 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -151,23 +151,6 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry, | |||
151 | ); | 151 | ); |
152 | 152 | ||
153 | /* | 153 | /* |
154 | * Special (free-form) trace entry: | ||
155 | */ | ||
156 | FTRACE_ENTRY(special, special_entry, | ||
157 | |||
158 | TRACE_SPECIAL, | ||
159 | |||
160 | F_STRUCT( | ||
161 | __field( unsigned long, arg1 ) | ||
162 | __field( unsigned long, arg2 ) | ||
163 | __field( unsigned long, arg3 ) | ||
164 | ), | ||
165 | |||
166 | F_printk("(%08lx) (%08lx) (%08lx)", | ||
167 | __entry->arg1, __entry->arg2, __entry->arg3) | ||
168 | ); | ||
169 | |||
170 | /* | ||
171 | * Stack-trace entry: | 154 | * Stack-trace entry: |
172 | */ | 155 | */ |
173 | 156 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 57c1b4596470..a46197b80b7f 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -1069,65 +1069,6 @@ static struct trace_event trace_wake_event = { | |||
1069 | .funcs = &trace_wake_funcs, | 1069 | .funcs = &trace_wake_funcs, |
1070 | }; | 1070 | }; |
1071 | 1071 | ||
1072 | /* TRACE_SPECIAL */ | ||
1073 | static enum print_line_t trace_special_print(struct trace_iterator *iter, | ||
1074 | int flags, struct trace_event *event) | ||
1075 | { | ||
1076 | struct special_entry *field; | ||
1077 | |||
1078 | trace_assign_type(field, iter->ent); | ||
1079 | |||
1080 | if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n", | ||
1081 | field->arg1, | ||
1082 | field->arg2, | ||
1083 | field->arg3)) | ||
1084 | return TRACE_TYPE_PARTIAL_LINE; | ||
1085 | |||
1086 | return TRACE_TYPE_HANDLED; | ||
1087 | } | ||
1088 | |||
1089 | static enum print_line_t trace_special_hex(struct trace_iterator *iter, | ||
1090 | int flags, struct trace_event *event) | ||
1091 | { | ||
1092 | struct special_entry *field; | ||
1093 | struct trace_seq *s = &iter->seq; | ||
1094 | |||
1095 | trace_assign_type(field, iter->ent); | ||
1096 | |||
1097 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | ||
1098 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | ||
1099 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | ||
1100 | |||
1101 | return TRACE_TYPE_HANDLED; | ||
1102 | } | ||
1103 | |||
1104 | static enum print_line_t trace_special_bin(struct trace_iterator *iter, | ||
1105 | int flags, struct trace_event *event) | ||
1106 | { | ||
1107 | struct special_entry *field; | ||
1108 | struct trace_seq *s = &iter->seq; | ||
1109 | |||
1110 | trace_assign_type(field, iter->ent); | ||
1111 | |||
1112 | SEQ_PUT_FIELD_RET(s, field->arg1); | ||
1113 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
1114 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
1115 | |||
1116 | return TRACE_TYPE_HANDLED; | ||
1117 | } | ||
1118 | |||
1119 | static struct trace_event_functions trace_special_funcs = { | ||
1120 | .trace = trace_special_print, | ||
1121 | .raw = trace_special_print, | ||
1122 | .hex = trace_special_hex, | ||
1123 | .binary = trace_special_bin, | ||
1124 | }; | ||
1125 | |||
1126 | static struct trace_event trace_special_event = { | ||
1127 | .type = TRACE_SPECIAL, | ||
1128 | .funcs = &trace_special_funcs, | ||
1129 | }; | ||
1130 | |||
1131 | /* TRACE_STACK */ | 1072 | /* TRACE_STACK */ |
1132 | 1073 | ||
1133 | static enum print_line_t trace_stack_print(struct trace_iterator *iter, | 1074 | static enum print_line_t trace_stack_print(struct trace_iterator *iter, |
@@ -1161,9 +1102,6 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
1161 | 1102 | ||
1162 | static struct trace_event_functions trace_stack_funcs = { | 1103 | static struct trace_event_functions trace_stack_funcs = { |
1163 | .trace = trace_stack_print, | 1104 | .trace = trace_stack_print, |
1164 | .raw = trace_special_print, | ||
1165 | .hex = trace_special_hex, | ||
1166 | .binary = trace_special_bin, | ||
1167 | }; | 1105 | }; |
1168 | 1106 | ||
1169 | static struct trace_event trace_stack_event = { | 1107 | static struct trace_event trace_stack_event = { |
@@ -1194,9 +1132,6 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | |||
1194 | 1132 | ||
1195 | static struct trace_event_functions trace_user_stack_funcs = { | 1133 | static struct trace_event_functions trace_user_stack_funcs = { |
1196 | .trace = trace_user_stack_print, | 1134 | .trace = trace_user_stack_print, |
1197 | .raw = trace_special_print, | ||
1198 | .hex = trace_special_hex, | ||
1199 | .binary = trace_special_bin, | ||
1200 | }; | 1135 | }; |
1201 | 1136 | ||
1202 | static struct trace_event trace_user_stack_event = { | 1137 | static struct trace_event trace_user_stack_event = { |
@@ -1314,7 +1249,6 @@ static struct trace_event *events[] __initdata = { | |||
1314 | &trace_fn_event, | 1249 | &trace_fn_event, |
1315 | &trace_ctx_event, | 1250 | &trace_ctx_event, |
1316 | &trace_wake_event, | 1251 | &trace_wake_event, |
1317 | &trace_special_event, | ||
1318 | &trace_stack_event, | 1252 | &trace_stack_event, |
1319 | &trace_user_stack_event, | 1253 | &trace_user_stack_event, |
1320 | &trace_bprint_event, | 1254 | &trace_bprint_event, |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 39a5ca4cf15b..155a415b3209 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -13,7 +13,6 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
13 | case TRACE_WAKE: | 13 | case TRACE_WAKE: |
14 | case TRACE_STACK: | 14 | case TRACE_STACK: |
15 | case TRACE_PRINT: | 15 | case TRACE_PRINT: |
16 | case TRACE_SPECIAL: | ||
17 | case TRACE_BRANCH: | 16 | case TRACE_BRANCH: |
18 | case TRACE_GRAPH_ENT: | 17 | case TRACE_GRAPH_ENT: |
19 | case TRACE_GRAPH_RET: | 18 | case TRACE_GRAPH_RET: |
@@ -690,38 +689,6 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
690 | } | 689 | } |
691 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 690 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
692 | 691 | ||
693 | #ifdef CONFIG_SYSPROF_TRACER | ||
694 | int | ||
695 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | ||
696 | { | ||
697 | unsigned long count; | ||
698 | int ret; | ||
699 | |||
700 | /* start the tracing */ | ||
701 | ret = tracer_init(trace, tr); | ||
702 | if (ret) { | ||
703 | warn_failed_init_tracer(trace, ret); | ||
704 | return ret; | ||
705 | } | ||
706 | |||
707 | /* Sleep for a 1/10 of a second */ | ||
708 | msleep(100); | ||
709 | /* stop the tracing. */ | ||
710 | tracing_stop(); | ||
711 | /* check the trace buffer */ | ||
712 | ret = trace_test_buffer(tr, &count); | ||
713 | trace->reset(tr); | ||
714 | tracing_start(); | ||
715 | |||
716 | if (!ret && !count) { | ||
717 | printk(KERN_CONT ".. no entries found .."); | ||
718 | ret = -1; | ||
719 | } | ||
720 | |||
721 | return ret; | ||
722 | } | ||
723 | #endif /* CONFIG_SYSPROF_TRACER */ | ||
724 | |||
725 | #ifdef CONFIG_BRANCH_TRACER | 692 | #ifdef CONFIG_BRANCH_TRACER |
726 | int | 693 | int |
727 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 694 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c deleted file mode 100644 index c080956f4d8e..000000000000 --- a/kernel/trace/trace_sysprof.c +++ /dev/null | |||
@@ -1,330 +0,0 @@ | |||
1 | /* | ||
2 | * trace stack traces | ||
3 | * | ||
4 | * Copyright (C) 2004-2008, Soeren Sandmann | ||
5 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | ||
6 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | ||
7 | */ | ||
8 | #include <linux/kallsyms.h> | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/hrtimer.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/ftrace.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/fs.h> | ||
16 | |||
17 | #include <asm/stacktrace.h> | ||
18 | |||
19 | #include "trace.h" | ||
20 | |||
21 | static struct trace_array *sysprof_trace; | ||
22 | static int __read_mostly tracer_enabled; | ||
23 | |||
24 | /* | ||
25 | * 1 msec sample interval by default: | ||
26 | */ | ||
27 | static unsigned long sample_period = 1000000; | ||
28 | static const unsigned int sample_max_depth = 512; | ||
29 | |||
30 | static DEFINE_MUTEX(sample_timer_lock); | ||
31 | /* | ||
32 | * Per CPU hrtimers that do the profiling: | ||
33 | */ | ||
34 | static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer); | ||
35 | |||
36 | struct stack_frame_user { | ||
37 | const void __user *next_fp; | ||
38 | unsigned long return_address; | ||
39 | }; | ||
40 | |||
41 | static int | ||
42 | copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) | ||
43 | { | ||
44 | int ret; | ||
45 | |||
46 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) | ||
47 | return 0; | ||
48 | |||
49 | ret = 1; | ||
50 | pagefault_disable(); | ||
51 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | ||
52 | ret = 0; | ||
53 | pagefault_enable(); | ||
54 | |||
55 | return ret; | ||
56 | } | ||
57 | |||
58 | struct backtrace_info { | ||
59 | struct trace_array_cpu *data; | ||
60 | struct trace_array *tr; | ||
61 | int pos; | ||
62 | }; | ||
63 | |||
64 | static void | ||
65 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
66 | { | ||
67 | /* Ignore warnings */ | ||
68 | } | ||
69 | |||
70 | static void backtrace_warning(void *data, char *msg) | ||
71 | { | ||
72 | /* Ignore warnings */ | ||
73 | } | ||
74 | |||
75 | static int backtrace_stack(void *data, char *name) | ||
76 | { | ||
77 | /* Don't bother with IRQ stacks for now */ | ||
78 | return -1; | ||
79 | } | ||
80 | |||
81 | static void backtrace_address(void *data, unsigned long addr, int reliable) | ||
82 | { | ||
83 | struct backtrace_info *info = data; | ||
84 | |||
85 | if (info->pos < sample_max_depth && reliable) { | ||
86 | __trace_special(info->tr, info->data, 1, addr, 0); | ||
87 | |||
88 | info->pos++; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static const struct stacktrace_ops backtrace_ops = { | ||
93 | .warning = backtrace_warning, | ||
94 | .warning_symbol = backtrace_warning_symbol, | ||
95 | .stack = backtrace_stack, | ||
96 | .address = backtrace_address, | ||
97 | .walk_stack = print_context_stack, | ||
98 | }; | ||
99 | |||
100 | static int | ||
101 | trace_kernel(struct pt_regs *regs, struct trace_array *tr, | ||
102 | struct trace_array_cpu *data) | ||
103 | { | ||
104 | struct backtrace_info info; | ||
105 | unsigned long bp; | ||
106 | char *stack; | ||
107 | |||
108 | info.tr = tr; | ||
109 | info.data = data; | ||
110 | info.pos = 1; | ||
111 | |||
112 | __trace_special(info.tr, info.data, 1, regs->ip, 0); | ||
113 | |||
114 | stack = ((char *)regs + sizeof(struct pt_regs)); | ||
115 | #ifdef CONFIG_FRAME_POINTER | ||
116 | bp = regs->bp; | ||
117 | #else | ||
118 | bp = 0; | ||
119 | #endif | ||
120 | |||
121 | dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info); | ||
122 | |||
123 | return info.pos; | ||
124 | } | ||
125 | |||
126 | static void timer_notify(struct pt_regs *regs, int cpu) | ||
127 | { | ||
128 | struct trace_array_cpu *data; | ||
129 | struct stack_frame_user frame; | ||
130 | struct trace_array *tr; | ||
131 | const void __user *fp; | ||
132 | int is_user; | ||
133 | int i; | ||
134 | |||
135 | if (!regs) | ||
136 | return; | ||
137 | |||
138 | tr = sysprof_trace; | ||
139 | data = tr->data[cpu]; | ||
140 | is_user = user_mode(regs); | ||
141 | |||
142 | if (!current || current->pid == 0) | ||
143 | return; | ||
144 | |||
145 | if (is_user && current->state != TASK_RUNNING) | ||
146 | return; | ||
147 | |||
148 | __trace_special(tr, data, 0, 0, current->pid); | ||
149 | |||
150 | if (!is_user) | ||
151 | i = trace_kernel(regs, tr, data); | ||
152 | else | ||
153 | i = 0; | ||
154 | |||
155 | /* | ||
156 | * Trace user stack if we are not a kernel thread | ||
157 | */ | ||
158 | if (current->mm && i < sample_max_depth) { | ||
159 | regs = (struct pt_regs *)current->thread.sp0 - 1; | ||
160 | |||
161 | fp = (void __user *)regs->bp; | ||
162 | |||
163 | __trace_special(tr, data, 2, regs->ip, 0); | ||
164 | |||
165 | while (i < sample_max_depth) { | ||
166 | frame.next_fp = NULL; | ||
167 | frame.return_address = 0; | ||
168 | if (!copy_stack_frame(fp, &frame)) | ||
169 | break; | ||
170 | if ((unsigned long)fp < regs->sp) | ||
171 | break; | ||
172 | |||
173 | __trace_special(tr, data, 2, frame.return_address, | ||
174 | (unsigned long)fp); | ||
175 | fp = frame.next_fp; | ||
176 | |||
177 | i++; | ||
178 | } | ||
179 | |||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Special trace entry if we overflow the max depth: | ||
184 | */ | ||
185 | if (i == sample_max_depth) | ||
186 | __trace_special(tr, data, -1, -1, -1); | ||
187 | |||
188 | __trace_special(tr, data, 3, current->pid, i); | ||
189 | } | ||
190 | |||
191 | static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) | ||
192 | { | ||
193 | /* trace here */ | ||
194 | timer_notify(get_irq_regs(), smp_processor_id()); | ||
195 | |||
196 | hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); | ||
197 | |||
198 | return HRTIMER_RESTART; | ||
199 | } | ||
200 | |||
201 | static void start_stack_timer(void *unused) | ||
202 | { | ||
203 | struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); | ||
204 | |||
205 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
206 | hrtimer->function = stack_trace_timer_fn; | ||
207 | |||
208 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), | ||
209 | HRTIMER_MODE_REL_PINNED); | ||
210 | } | ||
211 | |||
212 | static void start_stack_timers(void) | ||
213 | { | ||
214 | on_each_cpu(start_stack_timer, NULL, 1); | ||
215 | } | ||
216 | |||
217 | static void stop_stack_timer(int cpu) | ||
218 | { | ||
219 | struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); | ||
220 | |||
221 | hrtimer_cancel(hrtimer); | ||
222 | } | ||
223 | |||
224 | static void stop_stack_timers(void) | ||
225 | { | ||
226 | int cpu; | ||
227 | |||
228 | for_each_online_cpu(cpu) | ||
229 | stop_stack_timer(cpu); | ||
230 | } | ||
231 | |||
232 | static void stop_stack_trace(struct trace_array *tr) | ||
233 | { | ||
234 | mutex_lock(&sample_timer_lock); | ||
235 | stop_stack_timers(); | ||
236 | tracer_enabled = 0; | ||
237 | mutex_unlock(&sample_timer_lock); | ||
238 | } | ||
239 | |||
240 | static int stack_trace_init(struct trace_array *tr) | ||
241 | { | ||
242 | sysprof_trace = tr; | ||
243 | |||
244 | tracing_start_cmdline_record(); | ||
245 | |||
246 | mutex_lock(&sample_timer_lock); | ||
247 | start_stack_timers(); | ||
248 | tracer_enabled = 1; | ||
249 | mutex_unlock(&sample_timer_lock); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void stack_trace_reset(struct trace_array *tr) | ||
254 | { | ||
255 | tracing_stop_cmdline_record(); | ||
256 | stop_stack_trace(tr); | ||
257 | } | ||
258 | |||
259 | static struct tracer stack_trace __read_mostly = | ||
260 | { | ||
261 | .name = "sysprof", | ||
262 | .init = stack_trace_init, | ||
263 | .reset = stack_trace_reset, | ||
264 | #ifdef CONFIG_FTRACE_SELFTEST | ||
265 | .selftest = trace_selftest_startup_sysprof, | ||
266 | #endif | ||
267 | }; | ||
268 | |||
269 | __init static int init_stack_trace(void) | ||
270 | { | ||
271 | return register_tracer(&stack_trace); | ||
272 | } | ||
273 | device_initcall(init_stack_trace); | ||
274 | |||
275 | #define MAX_LONG_DIGITS 22 | ||
276 | |||
277 | static ssize_t | ||
278 | sysprof_sample_read(struct file *filp, char __user *ubuf, | ||
279 | size_t cnt, loff_t *ppos) | ||
280 | { | ||
281 | char buf[MAX_LONG_DIGITS]; | ||
282 | int r; | ||
283 | |||
284 | r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period)); | ||
285 | |||
286 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
287 | } | ||
288 | |||
289 | static ssize_t | ||
290 | sysprof_sample_write(struct file *filp, const char __user *ubuf, | ||
291 | size_t cnt, loff_t *ppos) | ||
292 | { | ||
293 | char buf[MAX_LONG_DIGITS]; | ||
294 | unsigned long val; | ||
295 | |||
296 | if (cnt > MAX_LONG_DIGITS-1) | ||
297 | cnt = MAX_LONG_DIGITS-1; | ||
298 | |||
299 | if (copy_from_user(&buf, ubuf, cnt)) | ||
300 | return -EFAULT; | ||
301 | |||
302 | buf[cnt] = 0; | ||
303 | |||
304 | val = simple_strtoul(buf, NULL, 10); | ||
305 | /* | ||
306 | * Enforce a minimum sample period of 100 usecs: | ||
307 | */ | ||
308 | if (val < 100) | ||
309 | val = 100; | ||
310 | |||
311 | mutex_lock(&sample_timer_lock); | ||
312 | stop_stack_timers(); | ||
313 | sample_period = val * 1000; | ||
314 | start_stack_timers(); | ||
315 | mutex_unlock(&sample_timer_lock); | ||
316 | |||
317 | return cnt; | ||
318 | } | ||
319 | |||
320 | static const struct file_operations sysprof_sample_fops = { | ||
321 | .read = sysprof_sample_read, | ||
322 | .write = sysprof_sample_write, | ||
323 | }; | ||
324 | |||
325 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer) | ||
326 | { | ||
327 | |||
328 | trace_create_file("sysprof_sample_period", 0644, | ||
329 | d_tracer, NULL, &sysprof_sample_fops); | ||
330 | } | ||