aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/early_res.c6
-rw-r--r--kernel/trace/Kconfig9
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace.c73
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_entries.h17
-rw-r--r--kernel/trace/trace_output.c66
-rw-r--r--kernel/trace/trace_selftest.c33
-rw-r--r--kernel/trace/trace_sysprof.c330
9 files changed, 21 insertions, 524 deletions
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 31aa9332ef3f..7bfae887f211 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -7,6 +7,8 @@
7#include <linux/bootmem.h> 7#include <linux/bootmem.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/early_res.h> 9#include <linux/early_res.h>
10#include <linux/slab.h>
11#include <linux/kmemleak.h>
10 12
11/* 13/*
12 * Early reserved memory areas. 14 * Early reserved memory areas.
@@ -319,6 +321,8 @@ void __init free_early(u64 start, u64 end)
319 struct early_res *r; 321 struct early_res *r;
320 int i; 322 int i;
321 323
324 kmemleak_free_part(__va(start), end - start);
325
322 i = find_overlapped_early(start, end); 326 i = find_overlapped_early(start, end);
323 r = &early_res[i]; 327 r = &early_res[i];
324 if (i >= max_early_res || r->end != end || r->start != start) 328 if (i >= max_early_res || r->end != end || r->start != start)
@@ -333,6 +337,8 @@ void __init free_early_partial(u64 start, u64 end)
333 struct early_res *r; 337 struct early_res *r;
334 int i; 338 int i;
335 339
340 kmemleak_free_part(__va(start), end - start);
341
336 if (start == end) 342 if (start == end)
337 return; 343 return;
338 344
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index f5306cb0afb1..c7683fd8a03a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -194,15 +194,6 @@ config PREEMPT_TRACER
194 enabled. This option and the irqs-off timing option can be 194 enabled. This option and the irqs-off timing option can be
195 used together or separately.) 195 used together or separately.)
196 196
197config SYSPROF_TRACER
198 bool "Sysprof Tracer"
199 depends on X86
200 select GENERIC_TRACER
201 select CONTEXT_SWITCH_TRACER
202 help
203 This tracer provides the trace needed by the 'Sysprof' userspace
204 tool.
205
206config SCHED_TRACER 197config SCHED_TRACER
207 bool "Scheduling Latency Tracer" 198 bool "Scheduling Latency Tracer"
208 select GENERIC_TRACER 199 select GENERIC_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 84b2c9908dae..438e84a56ab3 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -30,7 +30,6 @@ obj-$(CONFIG_TRACING) += trace_output.o
30obj-$(CONFIG_TRACING) += trace_stat.o 30obj-$(CONFIG_TRACING) += trace_stat.o
31obj-$(CONFIG_TRACING) += trace_printk.o 31obj-$(CONFIG_TRACING) += trace_printk.o
32obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 32obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
33obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
34obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 33obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
35obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 34obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
36obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 35obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index cacb6f083ecb..4b1122d0df37 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1341,61 +1341,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1341 1341
1342#endif /* CONFIG_STACKTRACE */ 1342#endif /* CONFIG_STACKTRACE */
1343 1343
1344static void
1345ftrace_trace_special(void *__tr,
1346 unsigned long arg1, unsigned long arg2, unsigned long arg3,
1347 int pc)
1348{
1349 struct ftrace_event_call *call = &event_special;
1350 struct ring_buffer_event *event;
1351 struct trace_array *tr = __tr;
1352 struct ring_buffer *buffer = tr->buffer;
1353 struct special_entry *entry;
1354
1355 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1356 sizeof(*entry), 0, pc);
1357 if (!event)
1358 return;
1359 entry = ring_buffer_event_data(event);
1360 entry->arg1 = arg1;
1361 entry->arg2 = arg2;
1362 entry->arg3 = arg3;
1363
1364 if (!filter_check_discard(call, entry, buffer, event))
1365 trace_buffer_unlock_commit(buffer, event, 0, pc);
1366}
1367
1368void
1369__trace_special(void *__tr, void *__data,
1370 unsigned long arg1, unsigned long arg2, unsigned long arg3)
1371{
1372 ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
1373}
1374
1375void
1376ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1377{
1378 struct trace_array *tr = &global_trace;
1379 struct trace_array_cpu *data;
1380 unsigned long flags;
1381 int cpu;
1382 int pc;
1383
1384 if (tracing_disabled)
1385 return;
1386
1387 pc = preempt_count();
1388 local_irq_save(flags);
1389 cpu = raw_smp_processor_id();
1390 data = tr->data[cpu];
1391
1392 if (likely(atomic_inc_return(&data->disabled) == 1))
1393 ftrace_trace_special(tr, arg1, arg2, arg3, pc);
1394
1395 atomic_dec(&data->disabled);
1396 local_irq_restore(flags);
1397}
1398
1399/** 1344/**
1400 * trace_vbprintk - write binary msg to tracing buffer 1345 * trace_vbprintk - write binary msg to tracing buffer
1401 * 1346 *
@@ -2403,6 +2348,7 @@ static const struct file_operations show_traces_fops = {
2403 .open = show_traces_open, 2348 .open = show_traces_open,
2404 .read = seq_read, 2349 .read = seq_read,
2405 .release = seq_release, 2350 .release = seq_release,
2351 .llseek = seq_lseek,
2406}; 2352};
2407 2353
2408/* 2354/*
@@ -2496,6 +2442,7 @@ static const struct file_operations tracing_cpumask_fops = {
2496 .open = tracing_open_generic, 2442 .open = tracing_open_generic,
2497 .read = tracing_cpumask_read, 2443 .read = tracing_cpumask_read,
2498 .write = tracing_cpumask_write, 2444 .write = tracing_cpumask_write,
2445 .llseek = generic_file_llseek,
2499}; 2446};
2500 2447
2501static int tracing_trace_options_show(struct seq_file *m, void *v) 2448static int tracing_trace_options_show(struct seq_file *m, void *v)
@@ -2665,6 +2612,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
2665static const struct file_operations tracing_readme_fops = { 2612static const struct file_operations tracing_readme_fops = {
2666 .open = tracing_open_generic, 2613 .open = tracing_open_generic,
2667 .read = tracing_readme_read, 2614 .read = tracing_readme_read,
2615 .llseek = generic_file_llseek,
2668}; 2616};
2669 2617
2670static ssize_t 2618static ssize_t
@@ -2715,6 +2663,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2715static const struct file_operations tracing_saved_cmdlines_fops = { 2663static const struct file_operations tracing_saved_cmdlines_fops = {
2716 .open = tracing_open_generic, 2664 .open = tracing_open_generic,
2717 .read = tracing_saved_cmdlines_read, 2665 .read = tracing_saved_cmdlines_read,
2666 .llseek = generic_file_llseek,
2718}; 2667};
2719 2668
2720static ssize_t 2669static ssize_t
@@ -3064,6 +3013,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3064 if (iter->trace->pipe_open) 3013 if (iter->trace->pipe_open)
3065 iter->trace->pipe_open(iter); 3014 iter->trace->pipe_open(iter);
3066 3015
3016 nonseekable_open(inode, filp);
3067out: 3017out:
3068 mutex_unlock(&trace_types_lock); 3018 mutex_unlock(&trace_types_lock);
3069 return ret; 3019 return ret;
@@ -3621,18 +3571,21 @@ static const struct file_operations tracing_max_lat_fops = {
3621 .open = tracing_open_generic, 3571 .open = tracing_open_generic,
3622 .read = tracing_max_lat_read, 3572 .read = tracing_max_lat_read,
3623 .write = tracing_max_lat_write, 3573 .write = tracing_max_lat_write,
3574 .llseek = generic_file_llseek,
3624}; 3575};
3625 3576
3626static const struct file_operations tracing_ctrl_fops = { 3577static const struct file_operations tracing_ctrl_fops = {
3627 .open = tracing_open_generic, 3578 .open = tracing_open_generic,
3628 .read = tracing_ctrl_read, 3579 .read = tracing_ctrl_read,
3629 .write = tracing_ctrl_write, 3580 .write = tracing_ctrl_write,
3581 .llseek = generic_file_llseek,
3630}; 3582};
3631 3583
3632static const struct file_operations set_tracer_fops = { 3584static const struct file_operations set_tracer_fops = {
3633 .open = tracing_open_generic, 3585 .open = tracing_open_generic,
3634 .read = tracing_set_trace_read, 3586 .read = tracing_set_trace_read,
3635 .write = tracing_set_trace_write, 3587 .write = tracing_set_trace_write,
3588 .llseek = generic_file_llseek,
3636}; 3589};
3637 3590
3638static const struct file_operations tracing_pipe_fops = { 3591static const struct file_operations tracing_pipe_fops = {
@@ -3641,17 +3594,20 @@ static const struct file_operations tracing_pipe_fops = {
3641 .read = tracing_read_pipe, 3594 .read = tracing_read_pipe,
3642 .splice_read = tracing_splice_read_pipe, 3595 .splice_read = tracing_splice_read_pipe,
3643 .release = tracing_release_pipe, 3596 .release = tracing_release_pipe,
3597 .llseek = no_llseek,
3644}; 3598};
3645 3599
3646static const struct file_operations tracing_entries_fops = { 3600static const struct file_operations tracing_entries_fops = {
3647 .open = tracing_open_generic, 3601 .open = tracing_open_generic,
3648 .read = tracing_entries_read, 3602 .read = tracing_entries_read,
3649 .write = tracing_entries_write, 3603 .write = tracing_entries_write,
3604 .llseek = generic_file_llseek,
3650}; 3605};
3651 3606
3652static const struct file_operations tracing_mark_fops = { 3607static const struct file_operations tracing_mark_fops = {
3653 .open = tracing_open_generic, 3608 .open = tracing_open_generic,
3654 .write = tracing_mark_write, 3609 .write = tracing_mark_write,
3610 .llseek = generic_file_llseek,
3655}; 3611};
3656 3612
3657static const struct file_operations trace_clock_fops = { 3613static const struct file_operations trace_clock_fops = {
@@ -3957,6 +3913,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3957static const struct file_operations tracing_stats_fops = { 3913static const struct file_operations tracing_stats_fops = {
3958 .open = tracing_open_generic, 3914 .open = tracing_open_generic,
3959 .read = tracing_stats_read, 3915 .read = tracing_stats_read,
3916 .llseek = generic_file_llseek,
3960}; 3917};
3961 3918
3962#ifdef CONFIG_DYNAMIC_FTRACE 3919#ifdef CONFIG_DYNAMIC_FTRACE
@@ -3993,6 +3950,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf,
3993static const struct file_operations tracing_dyn_info_fops = { 3950static const struct file_operations tracing_dyn_info_fops = {
3994 .open = tracing_open_generic, 3951 .open = tracing_open_generic,
3995 .read = tracing_read_dyn_info, 3952 .read = tracing_read_dyn_info,
3953 .llseek = generic_file_llseek,
3996}; 3954};
3997#endif 3955#endif
3998 3956
@@ -4146,6 +4104,7 @@ static const struct file_operations trace_options_fops = {
4146 .open = tracing_open_generic, 4104 .open = tracing_open_generic,
4147 .read = trace_options_read, 4105 .read = trace_options_read,
4148 .write = trace_options_write, 4106 .write = trace_options_write,
4107 .llseek = generic_file_llseek,
4149}; 4108};
4150 4109
4151static ssize_t 4110static ssize_t
@@ -4197,6 +4156,7 @@ static const struct file_operations trace_options_core_fops = {
4197 .open = tracing_open_generic, 4156 .open = tracing_open_generic,
4198 .read = trace_options_core_read, 4157 .read = trace_options_core_read,
4199 .write = trace_options_core_write, 4158 .write = trace_options_core_write,
4159 .llseek = generic_file_llseek,
4200}; 4160};
4201 4161
4202struct dentry *trace_create_file(const char *name, 4162struct dentry *trace_create_file(const char *name,
@@ -4386,9 +4346,6 @@ static __init int tracer_init_debugfs(void)
4386 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4346 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4387 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 4347 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4388#endif 4348#endif
4389#ifdef CONFIG_SYSPROF_TRACER
4390 init_tracer_sysprof_debugfs(d_tracer);
4391#endif
4392 4349
4393 create_trace_options_dir(); 4350 create_trace_options_dir();
4394 4351
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cb629b3b108c..d05c873dd4b2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -22,7 +22,6 @@ enum trace_type {
22 TRACE_STACK, 22 TRACE_STACK,
23 TRACE_PRINT, 23 TRACE_PRINT,
24 TRACE_BPRINT, 24 TRACE_BPRINT,
25 TRACE_SPECIAL,
26 TRACE_MMIO_RW, 25 TRACE_MMIO_RW,
27 TRACE_MMIO_MAP, 26 TRACE_MMIO_MAP,
28 TRACE_BRANCH, 27 TRACE_BRANCH,
@@ -189,7 +188,6 @@ extern void __ftrace_bad_type(void);
189 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 188 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
190 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 189 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
191 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 190 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
192 IF_ASSIGN(var, ent, struct special_entry, 0); \
193 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 191 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
194 TRACE_MMIO_RW); \ 192 TRACE_MMIO_RW); \
195 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 193 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
@@ -297,7 +295,6 @@ struct dentry *trace_create_file(const char *name,
297 const struct file_operations *fops); 295 const struct file_operations *fops);
298 296
299struct dentry *tracing_init_dentry(void); 297struct dentry *tracing_init_dentry(void);
300void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
301 298
302struct ring_buffer_event; 299struct ring_buffer_event;
303 300
@@ -334,11 +331,6 @@ void tracing_sched_wakeup_trace(struct trace_array *tr,
334 struct task_struct *wakee, 331 struct task_struct *wakee,
335 struct task_struct *cur, 332 struct task_struct *cur,
336 unsigned long flags, int pc); 333 unsigned long flags, int pc);
337void trace_special(struct trace_array *tr,
338 struct trace_array_cpu *data,
339 unsigned long arg1,
340 unsigned long arg2,
341 unsigned long arg3, int pc);
342void trace_function(struct trace_array *tr, 334void trace_function(struct trace_array *tr,
343 unsigned long ip, 335 unsigned long ip,
344 unsigned long parent_ip, 336 unsigned long parent_ip,
@@ -429,8 +421,6 @@ extern int trace_selftest_startup_nop(struct tracer *trace,
429 struct trace_array *tr); 421 struct trace_array *tr);
430extern int trace_selftest_startup_sched_switch(struct tracer *trace, 422extern int trace_selftest_startup_sched_switch(struct tracer *trace,
431 struct trace_array *tr); 423 struct trace_array *tr);
432extern int trace_selftest_startup_sysprof(struct tracer *trace,
433 struct trace_array *tr);
434extern int trace_selftest_startup_branch(struct tracer *trace, 424extern int trace_selftest_startup_branch(struct tracer *trace,
435 struct trace_array *tr); 425 struct trace_array *tr);
436#endif /* CONFIG_FTRACE_STARTUP_TEST */ 426#endif /* CONFIG_FTRACE_STARTUP_TEST */
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 84128371f254..e3dfecaf13e6 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -151,23 +151,6 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
151); 151);
152 152
153/* 153/*
154 * Special (free-form) trace entry:
155 */
156FTRACE_ENTRY(special, special_entry,
157
158 TRACE_SPECIAL,
159
160 F_STRUCT(
161 __field( unsigned long, arg1 )
162 __field( unsigned long, arg2 )
163 __field( unsigned long, arg3 )
164 ),
165
166 F_printk("(%08lx) (%08lx) (%08lx)",
167 __entry->arg1, __entry->arg2, __entry->arg3)
168);
169
170/*
171 * Stack-trace entry: 154 * Stack-trace entry:
172 */ 155 */
173 156
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 1ba64d3cc567..02272baa2206 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1066,65 +1066,6 @@ static struct trace_event trace_wake_event = {
1066 .funcs = &trace_wake_funcs, 1066 .funcs = &trace_wake_funcs,
1067}; 1067};
1068 1068
1069/* TRACE_SPECIAL */
1070static enum print_line_t trace_special_print(struct trace_iterator *iter,
1071 int flags, struct trace_event *event)
1072{
1073 struct special_entry *field;
1074
1075 trace_assign_type(field, iter->ent);
1076
1077 if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
1078 field->arg1,
1079 field->arg2,
1080 field->arg3))
1081 return TRACE_TYPE_PARTIAL_LINE;
1082
1083 return TRACE_TYPE_HANDLED;
1084}
1085
1086static enum print_line_t trace_special_hex(struct trace_iterator *iter,
1087 int flags, struct trace_event *event)
1088{
1089 struct special_entry *field;
1090 struct trace_seq *s = &iter->seq;
1091
1092 trace_assign_type(field, iter->ent);
1093
1094 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1095 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1096 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1097
1098 return TRACE_TYPE_HANDLED;
1099}
1100
1101static enum print_line_t trace_special_bin(struct trace_iterator *iter,
1102 int flags, struct trace_event *event)
1103{
1104 struct special_entry *field;
1105 struct trace_seq *s = &iter->seq;
1106
1107 trace_assign_type(field, iter->ent);
1108
1109 SEQ_PUT_FIELD_RET(s, field->arg1);
1110 SEQ_PUT_FIELD_RET(s, field->arg2);
1111 SEQ_PUT_FIELD_RET(s, field->arg3);
1112
1113 return TRACE_TYPE_HANDLED;
1114}
1115
1116static struct trace_event_functions trace_special_funcs = {
1117 .trace = trace_special_print,
1118 .raw = trace_special_print,
1119 .hex = trace_special_hex,
1120 .binary = trace_special_bin,
1121};
1122
1123static struct trace_event trace_special_event = {
1124 .type = TRACE_SPECIAL,
1125 .funcs = &trace_special_funcs,
1126};
1127
1128/* TRACE_STACK */ 1069/* TRACE_STACK */
1129 1070
1130static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1071static enum print_line_t trace_stack_print(struct trace_iterator *iter,
@@ -1158,9 +1099,6 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1158 1099
1159static struct trace_event_functions trace_stack_funcs = { 1100static struct trace_event_functions trace_stack_funcs = {
1160 .trace = trace_stack_print, 1101 .trace = trace_stack_print,
1161 .raw = trace_special_print,
1162 .hex = trace_special_hex,
1163 .binary = trace_special_bin,
1164}; 1102};
1165 1103
1166static struct trace_event trace_stack_event = { 1104static struct trace_event trace_stack_event = {
@@ -1191,9 +1129,6 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1191 1129
1192static struct trace_event_functions trace_user_stack_funcs = { 1130static struct trace_event_functions trace_user_stack_funcs = {
1193 .trace = trace_user_stack_print, 1131 .trace = trace_user_stack_print,
1194 .raw = trace_special_print,
1195 .hex = trace_special_hex,
1196 .binary = trace_special_bin,
1197}; 1132};
1198 1133
1199static struct trace_event trace_user_stack_event = { 1134static struct trace_event trace_user_stack_event = {
@@ -1311,7 +1246,6 @@ static struct trace_event *events[] __initdata = {
1311 &trace_fn_event, 1246 &trace_fn_event,
1312 &trace_ctx_event, 1247 &trace_ctx_event,
1313 &trace_wake_event, 1248 &trace_wake_event,
1314 &trace_special_event,
1315 &trace_stack_event, 1249 &trace_stack_event,
1316 &trace_user_stack_event, 1250 &trace_user_stack_event,
1317 &trace_bprint_event, 1251 &trace_bprint_event,
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 39a5ca4cf15b..155a415b3209 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -13,7 +13,6 @@ static inline int trace_valid_entry(struct trace_entry *entry)
13 case TRACE_WAKE: 13 case TRACE_WAKE:
14 case TRACE_STACK: 14 case TRACE_STACK:
15 case TRACE_PRINT: 15 case TRACE_PRINT:
16 case TRACE_SPECIAL:
17 case TRACE_BRANCH: 16 case TRACE_BRANCH:
18 case TRACE_GRAPH_ENT: 17 case TRACE_GRAPH_ENT:
19 case TRACE_GRAPH_RET: 18 case TRACE_GRAPH_RET:
@@ -690,38 +689,6 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
690} 689}
691#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 690#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
692 691
693#ifdef CONFIG_SYSPROF_TRACER
694int
695trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
696{
697 unsigned long count;
698 int ret;
699
700 /* start the tracing */
701 ret = tracer_init(trace, tr);
702 if (ret) {
703 warn_failed_init_tracer(trace, ret);
704 return ret;
705 }
706
707 /* Sleep for a 1/10 of a second */
708 msleep(100);
709 /* stop the tracing. */
710 tracing_stop();
711 /* check the trace buffer */
712 ret = trace_test_buffer(tr, &count);
713 trace->reset(tr);
714 tracing_start();
715
716 if (!ret && !count) {
717 printk(KERN_CONT ".. no entries found ..");
718 ret = -1;
719 }
720
721 return ret;
722}
723#endif /* CONFIG_SYSPROF_TRACER */
724
725#ifdef CONFIG_BRANCH_TRACER 692#ifdef CONFIG_BRANCH_TRACER
726int 693int
727trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 694trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
deleted file mode 100644
index c080956f4d8e..000000000000
--- a/kernel/trace/trace_sysprof.c
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * trace stack traces
3 *
4 * Copyright (C) 2004-2008, Soeren Sandmann
5 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 */
8#include <linux/kallsyms.h>
9#include <linux/debugfs.h>
10#include <linux/hrtimer.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <linux/module.h>
14#include <linux/irq.h>
15#include <linux/fs.h>
16
17#include <asm/stacktrace.h>
18
19#include "trace.h"
20
21static struct trace_array *sysprof_trace;
22static int __read_mostly tracer_enabled;
23
24/*
25 * 1 msec sample interval by default:
26 */
27static unsigned long sample_period = 1000000;
28static const unsigned int sample_max_depth = 512;
29
30static DEFINE_MUTEX(sample_timer_lock);
31/*
32 * Per CPU hrtimers that do the profiling:
33 */
34static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
35
36struct stack_frame_user {
37 const void __user *next_fp;
38 unsigned long return_address;
39};
40
41static int
42copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
43{
44 int ret;
45
46 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
47 return 0;
48
49 ret = 1;
50 pagefault_disable();
51 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
52 ret = 0;
53 pagefault_enable();
54
55 return ret;
56}
57
58struct backtrace_info {
59 struct trace_array_cpu *data;
60 struct trace_array *tr;
61 int pos;
62};
63
64static void
65backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
66{
67 /* Ignore warnings */
68}
69
70static void backtrace_warning(void *data, char *msg)
71{
72 /* Ignore warnings */
73}
74
75static int backtrace_stack(void *data, char *name)
76{
77 /* Don't bother with IRQ stacks for now */
78 return -1;
79}
80
81static void backtrace_address(void *data, unsigned long addr, int reliable)
82{
83 struct backtrace_info *info = data;
84
85 if (info->pos < sample_max_depth && reliable) {
86 __trace_special(info->tr, info->data, 1, addr, 0);
87
88 info->pos++;
89 }
90}
91
92static const struct stacktrace_ops backtrace_ops = {
93 .warning = backtrace_warning,
94 .warning_symbol = backtrace_warning_symbol,
95 .stack = backtrace_stack,
96 .address = backtrace_address,
97 .walk_stack = print_context_stack,
98};
99
100static int
101trace_kernel(struct pt_regs *regs, struct trace_array *tr,
102 struct trace_array_cpu *data)
103{
104 struct backtrace_info info;
105 unsigned long bp;
106 char *stack;
107
108 info.tr = tr;
109 info.data = data;
110 info.pos = 1;
111
112 __trace_special(info.tr, info.data, 1, regs->ip, 0);
113
114 stack = ((char *)regs + sizeof(struct pt_regs));
115#ifdef CONFIG_FRAME_POINTER
116 bp = regs->bp;
117#else
118 bp = 0;
119#endif
120
121 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);
122
123 return info.pos;
124}
125
126static void timer_notify(struct pt_regs *regs, int cpu)
127{
128 struct trace_array_cpu *data;
129 struct stack_frame_user frame;
130 struct trace_array *tr;
131 const void __user *fp;
132 int is_user;
133 int i;
134
135 if (!regs)
136 return;
137
138 tr = sysprof_trace;
139 data = tr->data[cpu];
140 is_user = user_mode(regs);
141
142 if (!current || current->pid == 0)
143 return;
144
145 if (is_user && current->state != TASK_RUNNING)
146 return;
147
148 __trace_special(tr, data, 0, 0, current->pid);
149
150 if (!is_user)
151 i = trace_kernel(regs, tr, data);
152 else
153 i = 0;
154
155 /*
156 * Trace user stack if we are not a kernel thread
157 */
158 if (current->mm && i < sample_max_depth) {
159 regs = (struct pt_regs *)current->thread.sp0 - 1;
160
161 fp = (void __user *)regs->bp;
162
163 __trace_special(tr, data, 2, regs->ip, 0);
164
165 while (i < sample_max_depth) {
166 frame.next_fp = NULL;
167 frame.return_address = 0;
168 if (!copy_stack_frame(fp, &frame))
169 break;
170 if ((unsigned long)fp < regs->sp)
171 break;
172
173 __trace_special(tr, data, 2, frame.return_address,
174 (unsigned long)fp);
175 fp = frame.next_fp;
176
177 i++;
178 }
179
180 }
181
182 /*
183 * Special trace entry if we overflow the max depth:
184 */
185 if (i == sample_max_depth)
186 __trace_special(tr, data, -1, -1, -1);
187
188 __trace_special(tr, data, 3, current->pid, i);
189}
190
191static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
192{
193 /* trace here */
194 timer_notify(get_irq_regs(), smp_processor_id());
195
196 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
197
198 return HRTIMER_RESTART;
199}
200
201static void start_stack_timer(void *unused)
202{
203 struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
204
205 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
206 hrtimer->function = stack_trace_timer_fn;
207
208 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
209 HRTIMER_MODE_REL_PINNED);
210}
211
212static void start_stack_timers(void)
213{
214 on_each_cpu(start_stack_timer, NULL, 1);
215}
216
217static void stop_stack_timer(int cpu)
218{
219 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
220
221 hrtimer_cancel(hrtimer);
222}
223
224static void stop_stack_timers(void)
225{
226 int cpu;
227
228 for_each_online_cpu(cpu)
229 stop_stack_timer(cpu);
230}
231
232static void stop_stack_trace(struct trace_array *tr)
233{
234 mutex_lock(&sample_timer_lock);
235 stop_stack_timers();
236 tracer_enabled = 0;
237 mutex_unlock(&sample_timer_lock);
238}
239
240static int stack_trace_init(struct trace_array *tr)
241{
242 sysprof_trace = tr;
243
244 tracing_start_cmdline_record();
245
246 mutex_lock(&sample_timer_lock);
247 start_stack_timers();
248 tracer_enabled = 1;
249 mutex_unlock(&sample_timer_lock);
250 return 0;
251}
252
253static void stack_trace_reset(struct trace_array *tr)
254{
255 tracing_stop_cmdline_record();
256 stop_stack_trace(tr);
257}
258
259static struct tracer stack_trace __read_mostly =
260{
261 .name = "sysprof",
262 .init = stack_trace_init,
263 .reset = stack_trace_reset,
264#ifdef CONFIG_FTRACE_SELFTEST
265 .selftest = trace_selftest_startup_sysprof,
266#endif
267};
268
269__init static int init_stack_trace(void)
270{
271 return register_tracer(&stack_trace);
272}
273device_initcall(init_stack_trace);
274
275#define MAX_LONG_DIGITS 22
276
277static ssize_t
278sysprof_sample_read(struct file *filp, char __user *ubuf,
279 size_t cnt, loff_t *ppos)
280{
281 char buf[MAX_LONG_DIGITS];
282 int r;
283
284 r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period));
285
286 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
287}
288
289static ssize_t
290sysprof_sample_write(struct file *filp, const char __user *ubuf,
291 size_t cnt, loff_t *ppos)
292{
293 char buf[MAX_LONG_DIGITS];
294 unsigned long val;
295
296 if (cnt > MAX_LONG_DIGITS-1)
297 cnt = MAX_LONG_DIGITS-1;
298
299 if (copy_from_user(&buf, ubuf, cnt))
300 return -EFAULT;
301
302 buf[cnt] = 0;
303
304 val = simple_strtoul(buf, NULL, 10);
305 /*
306 * Enforce a minimum sample period of 100 usecs:
307 */
308 if (val < 100)
309 val = 100;
310
311 mutex_lock(&sample_timer_lock);
312 stop_stack_timers();
313 sample_period = val * 1000;
314 start_stack_timers();
315 mutex_unlock(&sample_timer_lock);
316
317 return cnt;
318}
319
320static const struct file_operations sysprof_sample_fops = {
321 .read = sysprof_sample_read,
322 .write = sysprof_sample_write,
323};
324
325void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
326{
327
328 trace_create_file("sysprof_sample_period", 0644,
329 d_tracer, NULL, &sysprof_sample_fops);
330}