diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-11-13 13:21:32 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-11-13 13:21:32 -0500 |
commit | 84e53ff77cb1e005f49966cd6789109d84acc9e2 (patch) | |
tree | 4d1426dc45c8af73e78279bd737eb1dd63909f11 | |
parent | ccf59d8da119ab03dcbdf95fb5e5adcef6ba51f2 (diff) | |
parent | 7bcfaf54f591a0775254c4ea679faf615152ee3a (diff) |
Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
Pull tracing updates from Steven Rostedt.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | Documentation/kernel-parameters.txt | 16 | ||||
-rw-r--r-- | include/linux/ftrace_event.h | 14 | ||||
-rw-r--r-- | include/linux/kernel.h | 7 | ||||
-rw-r--r-- | include/linux/ring_buffer.h | 3 | ||||
-rw-r--r-- | include/trace/ftrace.h | 3 | ||||
-rw-r--r-- | include/trace/syscall.h | 23 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 51 | ||||
-rw-r--r-- | kernel/trace/trace.c | 372 | ||||
-rw-r--r-- | kernel/trace/trace.h | 14 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 51 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_probe.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 13 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 61 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 2 |
24 files changed, 365 insertions, 343 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 9776f068306..2b48c52464a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2859,6 +2859,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2859 | to facilitate early boot debugging. | 2859 | to facilitate early boot debugging. |
2860 | See also Documentation/trace/events.txt | 2860 | See also Documentation/trace/events.txt |
2861 | 2861 | ||
2862 | trace_options=[option-list] | ||
2863 | [FTRACE] Enable or disable tracer options at boot. | ||
2864 | The option-list is a comma delimited list of options | ||
2865 | that can be enabled or disabled just as if you were | ||
2866 | to echo the option name into | ||
2867 | |||
2868 | /sys/kernel/debug/tracing/trace_options | ||
2869 | |||
2870 | For example, to enable stacktrace option (to dump the | ||
2871 | stack trace of each event), add to the command line: | ||
2872 | |||
2873 | trace_options=stacktrace | ||
2874 | |||
2875 | See also Documentation/trace/ftrace.txt "trace options" | ||
2876 | section. | ||
2877 | |||
2862 | transparent_hugepage= | 2878 | transparent_hugepage= |
2863 | [KNL] | 2879 | [KNL] |
2864 | Format: [always|madvise|never] | 2880 | Format: [always|madvise|never] |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 642928cf57b..b80c8ddfbbd 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -127,13 +127,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, | |||
127 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | 127 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
128 | struct ring_buffer_event *event, | 128 | struct ring_buffer_event *event, |
129 | unsigned long flags, int pc); | 129 | unsigned long flags, int pc); |
130 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | 130 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
131 | struct ring_buffer_event *event, | 131 | struct ring_buffer_event *event, |
132 | unsigned long flags, int pc); | 132 | unsigned long flags, int pc); |
133 | void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, | 133 | void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, |
134 | struct ring_buffer_event *event, | 134 | struct ring_buffer_event *event, |
135 | unsigned long flags, int pc, | 135 | unsigned long flags, int pc, |
136 | struct pt_regs *regs); | 136 | struct pt_regs *regs); |
137 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 137 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
138 | struct ring_buffer_event *event); | 138 | struct ring_buffer_event *event); |
139 | 139 | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index a123b13b70f..7785d5df6d8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -527,9 +527,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | |||
527 | 527 | ||
528 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); | 528 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
529 | #else | 529 | #else |
530 | static inline __printf(1, 2) | ||
531 | int trace_printk(const char *fmt, ...); | ||
532 | |||
533 | static inline void tracing_start(void) { } | 530 | static inline void tracing_start(void) { } |
534 | static inline void tracing_stop(void) { } | 531 | static inline void tracing_stop(void) { } |
535 | static inline void ftrace_off_permanent(void) { } | 532 | static inline void ftrace_off_permanent(void) { } |
@@ -539,8 +536,8 @@ static inline void tracing_on(void) { } | |||
539 | static inline void tracing_off(void) { } | 536 | static inline void tracing_off(void) { } |
540 | static inline int tracing_is_on(void) { return 0; } | 537 | static inline int tracing_is_on(void) { return 0; } |
541 | 538 | ||
542 | static inline int | 539 | static inline __printf(1, 2) |
543 | trace_printk(const char *fmt, ...) | 540 | int trace_printk(const char *fmt, ...) |
544 | { | 541 | { |
545 | return 0; | 542 | return 0; |
546 | } | 543 | } |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 6c8835f74f7..519777e3fa0 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -159,13 +159,14 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer); | |||
159 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); | 159 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); |
160 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); | 160 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); |
161 | 161 | ||
162 | unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); | 162 | u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); |
163 | unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); | 163 | unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); |
164 | unsigned long ring_buffer_entries(struct ring_buffer *buffer); | 164 | unsigned long ring_buffer_entries(struct ring_buffer *buffer); |
165 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | 165 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); |
166 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 166 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); | 168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); |
169 | unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); | ||
169 | 170 | ||
170 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); | 171 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
171 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 172 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index a763888a36f..698f2a89032 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \ | |||
545 | { assign; } \ | 545 | { assign; } \ |
546 | \ | 546 | \ |
547 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ | 547 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ |
548 | trace_nowake_buffer_unlock_commit(buffer, \ | 548 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ |
549 | event, irq_flags, pc); \ | ||
550 | } | 549 | } |
551 | /* | 550 | /* |
552 | * The ftrace_test_probe is compiled out, it is only here as a build time check | 551 | * The ftrace_test_probe is compiled out, it is only here as a build time check |
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 31966a4fb8c..84bc4197e73 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -31,27 +31,4 @@ struct syscall_metadata { | |||
31 | struct ftrace_event_call *exit_event; | 31 | struct ftrace_event_call *exit_event; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
35 | extern unsigned long arch_syscall_addr(int nr); | ||
36 | extern int init_syscall_trace(struct ftrace_event_call *call); | ||
37 | |||
38 | extern int reg_event_syscall_enter(struct ftrace_event_call *call); | ||
39 | extern void unreg_event_syscall_enter(struct ftrace_event_call *call); | ||
40 | extern int reg_event_syscall_exit(struct ftrace_event_call *call); | ||
41 | extern void unreg_event_syscall_exit(struct ftrace_event_call *call); | ||
42 | extern int | ||
43 | ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); | ||
44 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, | ||
45 | struct trace_event *event); | ||
46 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, | ||
47 | struct trace_event *event); | ||
48 | #endif | ||
49 | |||
50 | #ifdef CONFIG_PERF_EVENTS | ||
51 | int perf_sysenter_enable(struct ftrace_event_call *call); | ||
52 | void perf_sysenter_disable(struct ftrace_event_call *call); | ||
53 | int perf_sysexit_enable(struct ftrace_event_call *call); | ||
54 | void perf_sysexit_disable(struct ftrace_event_call *call); | ||
55 | #endif | ||
56 | |||
57 | #endif /* _TRACE_SYSCALL_H */ | 34 | #endif /* _TRACE_SYSCALL_H */ |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 4cea4f41c1d..5d89335a485 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -119,6 +119,7 @@ config TRACING | |||
119 | select BINARY_PRINTF | 119 | select BINARY_PRINTF |
120 | select EVENT_TRACING | 120 | select EVENT_TRACING |
121 | select TRACE_CLOCK | 121 | select TRACE_CLOCK |
122 | select IRQ_WORK | ||
122 | 123 | ||
123 | config GENERIC_TRACER | 124 | config GENERIC_TRACER |
124 | bool | 125 | bool |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9dcf15d3838..4451aa3a55a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void) | |||
2868 | { | 2868 | { |
2869 | return register_ftrace_command(&ftrace_mod_cmd); | 2869 | return register_ftrace_command(&ftrace_mod_cmd); |
2870 | } | 2870 | } |
2871 | device_initcall(ftrace_mod_cmd_init); | 2871 | core_initcall(ftrace_mod_cmd_init); |
2872 | 2872 | ||
2873 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | 2873 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
2874 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 2874 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
@@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void) | |||
4055 | ftrace_enabled = 1; | 4055 | ftrace_enabled = 1; |
4056 | return 0; | 4056 | return 0; |
4057 | } | 4057 | } |
4058 | device_initcall(ftrace_nodyn_init); | 4058 | core_initcall(ftrace_nodyn_init); |
4059 | 4059 | ||
4060 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 4060 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
4061 | static inline void ftrace_startup_enable(int command) { } | 4061 | static inline void ftrace_startup_enable(int command) { } |
@@ -4381,7 +4381,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
4381 | if (strlen(tmp) == 0) | 4381 | if (strlen(tmp) == 0) |
4382 | return 1; | 4382 | return 1; |
4383 | 4383 | ||
4384 | ret = strict_strtol(tmp, 10, &val); | 4384 | ret = kstrtol(tmp, 10, &val); |
4385 | if (ret < 0) | 4385 | if (ret < 0) |
4386 | return ret; | 4386 | return ret; |
4387 | 4387 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b979426d16c..3c7834c24e5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu { | |||
460 | unsigned long lost_events; | 460 | unsigned long lost_events; |
461 | unsigned long last_overrun; | 461 | unsigned long last_overrun; |
462 | local_t entries_bytes; | 462 | local_t entries_bytes; |
463 | local_t commit_overrun; | ||
464 | local_t overrun; | ||
465 | local_t entries; | 463 | local_t entries; |
464 | local_t overrun; | ||
465 | local_t commit_overrun; | ||
466 | local_t dropped_events; | ||
466 | local_t committing; | 467 | local_t committing; |
467 | local_t commits; | 468 | local_t commits; |
468 | unsigned long read; | 469 | unsigned long read; |
@@ -1820,7 +1821,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) | |||
1820 | } | 1821 | } |
1821 | 1822 | ||
1822 | /** | 1823 | /** |
1823 | * ring_buffer_update_event - update event type and data | 1824 | * rb_update_event - update event type and data |
1824 | * @event: the even to update | 1825 | * @event: the even to update |
1825 | * @type: the type of event | 1826 | * @type: the type of event |
1826 | * @length: the size of the event field in the ring buffer | 1827 | * @length: the size of the event field in the ring buffer |
@@ -2155,8 +2156,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
2155 | * If we are not in overwrite mode, | 2156 | * If we are not in overwrite mode, |
2156 | * this is easy, just stop here. | 2157 | * this is easy, just stop here. |
2157 | */ | 2158 | */ |
2158 | if (!(buffer->flags & RB_FL_OVERWRITE)) | 2159 | if (!(buffer->flags & RB_FL_OVERWRITE)) { |
2160 | local_inc(&cpu_buffer->dropped_events); | ||
2159 | goto out_reset; | 2161 | goto out_reset; |
2162 | } | ||
2160 | 2163 | ||
2161 | ret = rb_handle_head_page(cpu_buffer, | 2164 | ret = rb_handle_head_page(cpu_buffer, |
2162 | tail_page, | 2165 | tail_page, |
@@ -2720,8 +2723,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); | |||
2720 | * and not the length of the event which would hold the header. | 2723 | * and not the length of the event which would hold the header. |
2721 | */ | 2724 | */ |
2722 | int ring_buffer_write(struct ring_buffer *buffer, | 2725 | int ring_buffer_write(struct ring_buffer *buffer, |
2723 | unsigned long length, | 2726 | unsigned long length, |
2724 | void *data) | 2727 | void *data) |
2725 | { | 2728 | { |
2726 | struct ring_buffer_per_cpu *cpu_buffer; | 2729 | struct ring_buffer_per_cpu *cpu_buffer; |
2727 | struct ring_buffer_event *event; | 2730 | struct ring_buffer_event *event; |
@@ -2929,12 +2932,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) | |||
2929 | * @buffer: The ring buffer | 2932 | * @buffer: The ring buffer |
2930 | * @cpu: The per CPU buffer to read from. | 2933 | * @cpu: The per CPU buffer to read from. |
2931 | */ | 2934 | */ |
2932 | unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) | 2935 | u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) |
2933 | { | 2936 | { |
2934 | unsigned long flags; | 2937 | unsigned long flags; |
2935 | struct ring_buffer_per_cpu *cpu_buffer; | 2938 | struct ring_buffer_per_cpu *cpu_buffer; |
2936 | struct buffer_page *bpage; | 2939 | struct buffer_page *bpage; |
2937 | unsigned long ret; | 2940 | u64 ret; |
2938 | 2941 | ||
2939 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2942 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2940 | return 0; | 2943 | return 0; |
@@ -2995,7 +2998,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | |||
2995 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | 2998 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); |
2996 | 2999 | ||
2997 | /** | 3000 | /** |
2998 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer | 3001 | * ring_buffer_overrun_cpu - get the number of overruns caused by the ring |
3002 | * buffer wrapping around (only if RB_FL_OVERWRITE is on). | ||
2999 | * @buffer: The ring buffer | 3003 | * @buffer: The ring buffer |
3000 | * @cpu: The per CPU buffer to get the number of overruns from | 3004 | * @cpu: The per CPU buffer to get the number of overruns from |
3001 | */ | 3005 | */ |
@@ -3015,7 +3019,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
3015 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | 3019 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); |
3016 | 3020 | ||
3017 | /** | 3021 | /** |
3018 | * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits | 3022 | * ring_buffer_commit_overrun_cpu - get the number of overruns caused by |
3023 | * commits failing due to the buffer wrapping around while there are uncommitted | ||
3024 | * events, such as during an interrupt storm. | ||
3019 | * @buffer: The ring buffer | 3025 | * @buffer: The ring buffer |
3020 | * @cpu: The per CPU buffer to get the number of overruns from | 3026 | * @cpu: The per CPU buffer to get the number of overruns from |
3021 | */ | 3027 | */ |
@@ -3036,6 +3042,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
3036 | EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); | 3042 | EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); |
3037 | 3043 | ||
3038 | /** | 3044 | /** |
3045 | * ring_buffer_dropped_events_cpu - get the number of dropped events caused by | ||
3046 | * the ring buffer filling up (only if RB_FL_OVERWRITE is off). | ||
3047 | * @buffer: The ring buffer | ||
3048 | * @cpu: The per CPU buffer to get the number of overruns from | ||
3049 | */ | ||
3050 | unsigned long | ||
3051 | ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) | ||
3052 | { | ||
3053 | struct ring_buffer_per_cpu *cpu_buffer; | ||
3054 | unsigned long ret; | ||
3055 | |||
3056 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | ||
3057 | return 0; | ||
3058 | |||
3059 | cpu_buffer = buffer->buffers[cpu]; | ||
3060 | ret = local_read(&cpu_buffer->dropped_events); | ||
3061 | |||
3062 | return ret; | ||
3063 | } | ||
3064 | EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); | ||
3065 | |||
3066 | /** | ||
3039 | * ring_buffer_entries - get the number of entries in a buffer | 3067 | * ring_buffer_entries - get the number of entries in a buffer |
3040 | * @buffer: The ring buffer | 3068 | * @buffer: The ring buffer |
3041 | * | 3069 | * |
@@ -3864,9 +3892,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
3864 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 3892 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
3865 | cpu_buffer->reader_page->read = 0; | 3893 | cpu_buffer->reader_page->read = 0; |
3866 | 3894 | ||
3867 | local_set(&cpu_buffer->commit_overrun, 0); | ||
3868 | local_set(&cpu_buffer->entries_bytes, 0); | 3895 | local_set(&cpu_buffer->entries_bytes, 0); |
3869 | local_set(&cpu_buffer->overrun, 0); | 3896 | local_set(&cpu_buffer->overrun, 0); |
3897 | local_set(&cpu_buffer->commit_overrun, 0); | ||
3898 | local_set(&cpu_buffer->dropped_events, 0); | ||
3870 | local_set(&cpu_buffer->entries, 0); | 3899 | local_set(&cpu_buffer->entries, 0); |
3871 | local_set(&cpu_buffer->committing, 0); | 3900 | local_set(&cpu_buffer->committing, 0); |
3872 | local_set(&cpu_buffer->commits, 0); | 3901 | local_set(&cpu_buffer->commits, 0); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 31e4f55773f..c1434b5ce4d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
21 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
22 | #include <linux/irq_work.h> | ||
22 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
23 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
24 | #include <linux/hardirq.h> | 25 | #include <linux/hardirq.h> |
@@ -78,6 +79,21 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
78 | } | 79 | } |
79 | 80 | ||
80 | /* | 81 | /* |
82 | * To prevent the comm cache from being overwritten when no | ||
83 | * tracing is active, only save the comm when a trace event | ||
84 | * occurred. | ||
85 | */ | ||
86 | static DEFINE_PER_CPU(bool, trace_cmdline_save); | ||
87 | |||
88 | /* | ||
89 | * When a reader is waiting for data, then this variable is | ||
90 | * set to true. | ||
91 | */ | ||
92 | static bool trace_wakeup_needed; | ||
93 | |||
94 | static struct irq_work trace_work_wakeup; | ||
95 | |||
96 | /* | ||
81 | * Kill all tracing for good (never come back). | 97 | * Kill all tracing for good (never come back). |
82 | * It is initialized to 1 but will turn to zero if the initialization | 98 | * It is initialized to 1 but will turn to zero if the initialization |
83 | * of the tracer is successful. But that is the only place that sets | 99 | * of the tracer is successful. But that is the only place that sets |
@@ -139,6 +155,18 @@ static int __init set_ftrace_dump_on_oops(char *str) | |||
139 | } | 155 | } |
140 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 156 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
141 | 157 | ||
158 | |||
159 | static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; | ||
160 | static char *trace_boot_options __initdata; | ||
161 | |||
162 | static int __init set_trace_boot_options(char *str) | ||
163 | { | ||
164 | strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); | ||
165 | trace_boot_options = trace_boot_options_buf; | ||
166 | return 0; | ||
167 | } | ||
168 | __setup("trace_options=", set_trace_boot_options); | ||
169 | |||
142 | unsigned long long ns2usecs(cycle_t nsec) | 170 | unsigned long long ns2usecs(cycle_t nsec) |
143 | { | 171 | { |
144 | nsec += 500; | 172 | nsec += 500; |
@@ -198,20 +226,9 @@ static struct trace_array max_tr; | |||
198 | 226 | ||
199 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); | 227 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
200 | 228 | ||
201 | /* tracer_enabled is used to toggle activation of a tracer */ | ||
202 | static int tracer_enabled = 1; | ||
203 | |||
204 | /** | ||
205 | * tracing_is_enabled - return tracer_enabled status | ||
206 | * | ||
207 | * This function is used by other tracers to know the status | ||
208 | * of the tracer_enabled flag. Tracers may use this function | ||
209 | * to know if it should enable their features when starting | ||
210 | * up. See irqsoff tracer for an example (start_irqsoff_tracer). | ||
211 | */ | ||
212 | int tracing_is_enabled(void) | 229 | int tracing_is_enabled(void) |
213 | { | 230 | { |
214 | return tracer_enabled; | 231 | return tracing_is_on(); |
215 | } | 232 | } |
216 | 233 | ||
217 | /* | 234 | /* |
@@ -333,12 +350,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
333 | static int trace_stop_count; | 350 | static int trace_stop_count; |
334 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); | 351 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); |
335 | 352 | ||
336 | static void wakeup_work_handler(struct work_struct *work) | 353 | /** |
354 | * trace_wake_up - wake up tasks waiting for trace input | ||
355 | * | ||
356 | * Schedules a delayed work to wake up any task that is blocked on the | ||
357 | * trace_wait queue. These is used with trace_poll for tasks polling the | ||
358 | * trace. | ||
359 | */ | ||
360 | static void trace_wake_up(struct irq_work *work) | ||
337 | { | 361 | { |
338 | wake_up(&trace_wait); | 362 | wake_up_all(&trace_wait); |
339 | } | ||
340 | 363 | ||
341 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); | 364 | } |
342 | 365 | ||
343 | /** | 366 | /** |
344 | * tracing_on - enable tracing buffers | 367 | * tracing_on - enable tracing buffers |
@@ -393,22 +416,6 @@ int tracing_is_on(void) | |||
393 | } | 416 | } |
394 | EXPORT_SYMBOL_GPL(tracing_is_on); | 417 | EXPORT_SYMBOL_GPL(tracing_is_on); |
395 | 418 | ||
396 | /** | ||
397 | * trace_wake_up - wake up tasks waiting for trace input | ||
398 | * | ||
399 | * Schedules a delayed work to wake up any task that is blocked on the | ||
400 | * trace_wait queue. These is used with trace_poll for tasks polling the | ||
401 | * trace. | ||
402 | */ | ||
403 | void trace_wake_up(void) | ||
404 | { | ||
405 | const unsigned long delay = msecs_to_jiffies(2); | ||
406 | |||
407 | if (trace_flags & TRACE_ITER_BLOCK) | ||
408 | return; | ||
409 | schedule_delayed_work(&wakeup_work, delay); | ||
410 | } | ||
411 | |||
412 | static int __init set_buf_size(char *str) | 419 | static int __init set_buf_size(char *str) |
413 | { | 420 | { |
414 | unsigned long buf_size; | 421 | unsigned long buf_size; |
@@ -431,7 +438,7 @@ static int __init set_tracing_thresh(char *str) | |||
431 | 438 | ||
432 | if (!str) | 439 | if (!str) |
433 | return 0; | 440 | return 0; |
434 | ret = strict_strtoul(str, 0, &threshold); | 441 | ret = kstrtoul(str, 0, &threshold); |
435 | if (ret < 0) | 442 | if (ret < 0) |
436 | return 0; | 443 | return 0; |
437 | tracing_thresh = threshold * 1000; | 444 | tracing_thresh = threshold * 1000; |
@@ -757,6 +764,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
757 | } | 764 | } |
758 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 765 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
759 | 766 | ||
767 | static void default_wait_pipe(struct trace_iterator *iter) | ||
768 | { | ||
769 | DEFINE_WAIT(wait); | ||
770 | |||
771 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | ||
772 | |||
773 | /* | ||
774 | * The events can happen in critical sections where | ||
775 | * checking a work queue can cause deadlocks. | ||
776 | * After adding a task to the queue, this flag is set | ||
777 | * only to notify events to try to wake up the queue | ||
778 | * using irq_work. | ||
779 | * | ||
780 | * We don't clear it even if the buffer is no longer | ||
781 | * empty. The flag only causes the next event to run | ||
782 | * irq_work to do the work queue wake up. The worse | ||
783 | * that can happen if we race with !trace_empty() is that | ||
784 | * an event will cause an irq_work to try to wake up | ||
785 | * an empty queue. | ||
786 | * | ||
787 | * There's no reason to protect this flag either, as | ||
788 | * the work queue and irq_work logic will do the necessary | ||
789 | * synchronization for the wake ups. The only thing | ||
790 | * that is necessary is that the wake up happens after | ||
791 | * a task has been queued. It's OK for spurious wake ups. | ||
792 | */ | ||
793 | trace_wakeup_needed = true; | ||
794 | |||
795 | if (trace_empty(iter)) | ||
796 | schedule(); | ||
797 | |||
798 | finish_wait(&trace_wait, &wait); | ||
799 | } | ||
800 | |||
760 | /** | 801 | /** |
761 | * register_tracer - register a tracer with the ftrace system. | 802 | * register_tracer - register a tracer with the ftrace system. |
762 | * @type - the plugin for the tracer | 803 | * @type - the plugin for the tracer |
@@ -875,32 +916,6 @@ int register_tracer(struct tracer *type) | |||
875 | return ret; | 916 | return ret; |
876 | } | 917 | } |
877 | 918 | ||
878 | void unregister_tracer(struct tracer *type) | ||
879 | { | ||
880 | struct tracer **t; | ||
881 | |||
882 | mutex_lock(&trace_types_lock); | ||
883 | for (t = &trace_types; *t; t = &(*t)->next) { | ||
884 | if (*t == type) | ||
885 | goto found; | ||
886 | } | ||
887 | pr_info("Tracer %s not registered\n", type->name); | ||
888 | goto out; | ||
889 | |||
890 | found: | ||
891 | *t = (*t)->next; | ||
892 | |||
893 | if (type == current_trace && tracer_enabled) { | ||
894 | tracer_enabled = 0; | ||
895 | tracing_stop(); | ||
896 | if (current_trace->stop) | ||
897 | current_trace->stop(&global_trace); | ||
898 | current_trace = &nop_trace; | ||
899 | } | ||
900 | out: | ||
901 | mutex_unlock(&trace_types_lock); | ||
902 | } | ||
903 | |||
904 | void tracing_reset(struct trace_array *tr, int cpu) | 919 | void tracing_reset(struct trace_array *tr, int cpu) |
905 | { | 920 | { |
906 | struct ring_buffer *buffer = tr->buffer; | 921 | struct ring_buffer *buffer = tr->buffer; |
@@ -1131,10 +1146,14 @@ void trace_find_cmdline(int pid, char comm[]) | |||
1131 | 1146 | ||
1132 | void tracing_record_cmdline(struct task_struct *tsk) | 1147 | void tracing_record_cmdline(struct task_struct *tsk) |
1133 | { | 1148 | { |
1134 | if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || | 1149 | if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) |
1135 | !tracing_is_on()) | 1150 | return; |
1151 | |||
1152 | if (!__this_cpu_read(trace_cmdline_save)) | ||
1136 | return; | 1153 | return; |
1137 | 1154 | ||
1155 | __this_cpu_write(trace_cmdline_save, false); | ||
1156 | |||
1138 | trace_save_cmdline(tsk); | 1157 | trace_save_cmdline(tsk); |
1139 | } | 1158 | } |
1140 | 1159 | ||
@@ -1178,27 +1197,36 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1178 | return event; | 1197 | return event; |
1179 | } | 1198 | } |
1180 | 1199 | ||
1200 | void | ||
1201 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) | ||
1202 | { | ||
1203 | __this_cpu_write(trace_cmdline_save, true); | ||
1204 | if (trace_wakeup_needed) { | ||
1205 | trace_wakeup_needed = false; | ||
1206 | /* irq_work_queue() supplies it's own memory barriers */ | ||
1207 | irq_work_queue(&trace_work_wakeup); | ||
1208 | } | ||
1209 | ring_buffer_unlock_commit(buffer, event); | ||
1210 | } | ||
1211 | |||
1181 | static inline void | 1212 | static inline void |
1182 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, | 1213 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, |
1183 | struct ring_buffer_event *event, | 1214 | struct ring_buffer_event *event, |
1184 | unsigned long flags, int pc, | 1215 | unsigned long flags, int pc) |
1185 | int wake) | ||
1186 | { | 1216 | { |
1187 | ring_buffer_unlock_commit(buffer, event); | 1217 | __buffer_unlock_commit(buffer, event); |
1188 | 1218 | ||
1189 | ftrace_trace_stack(buffer, flags, 6, pc); | 1219 | ftrace_trace_stack(buffer, flags, 6, pc); |
1190 | ftrace_trace_userstack(buffer, flags, pc); | 1220 | ftrace_trace_userstack(buffer, flags, pc); |
1191 | |||
1192 | if (wake) | ||
1193 | trace_wake_up(); | ||
1194 | } | 1221 | } |
1195 | 1222 | ||
1196 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | 1223 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
1197 | struct ring_buffer_event *event, | 1224 | struct ring_buffer_event *event, |
1198 | unsigned long flags, int pc) | 1225 | unsigned long flags, int pc) |
1199 | { | 1226 | { |
1200 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); | 1227 | __trace_buffer_unlock_commit(buffer, event, flags, pc); |
1201 | } | 1228 | } |
1229 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); | ||
1202 | 1230 | ||
1203 | struct ring_buffer_event * | 1231 | struct ring_buffer_event * |
1204 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, | 1232 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, |
@@ -1215,29 +1243,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1215 | struct ring_buffer_event *event, | 1243 | struct ring_buffer_event *event, |
1216 | unsigned long flags, int pc) | 1244 | unsigned long flags, int pc) |
1217 | { | 1245 | { |
1218 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); | 1246 | __trace_buffer_unlock_commit(buffer, event, flags, pc); |
1219 | } | 1247 | } |
1220 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 1248 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); |
1221 | 1249 | ||
1222 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | 1250 | void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, |
1223 | struct ring_buffer_event *event, | 1251 | struct ring_buffer_event *event, |
1224 | unsigned long flags, int pc) | 1252 | unsigned long flags, int pc, |
1225 | { | 1253 | struct pt_regs *regs) |
1226 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); | ||
1227 | } | ||
1228 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | ||
1229 | |||
1230 | void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, | ||
1231 | struct ring_buffer_event *event, | ||
1232 | unsigned long flags, int pc, | ||
1233 | struct pt_regs *regs) | ||
1234 | { | 1254 | { |
1235 | ring_buffer_unlock_commit(buffer, event); | 1255 | __buffer_unlock_commit(buffer, event); |
1236 | 1256 | ||
1237 | ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); | 1257 | ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); |
1238 | ftrace_trace_userstack(buffer, flags, pc); | 1258 | ftrace_trace_userstack(buffer, flags, pc); |
1239 | } | 1259 | } |
1240 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); | 1260 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); |
1241 | 1261 | ||
1242 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 1262 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
1243 | struct ring_buffer_event *event) | 1263 | struct ring_buffer_event *event) |
@@ -1269,7 +1289,7 @@ trace_function(struct trace_array *tr, | |||
1269 | entry->parent_ip = parent_ip; | 1289 | entry->parent_ip = parent_ip; |
1270 | 1290 | ||
1271 | if (!filter_check_discard(call, entry, buffer, event)) | 1291 | if (!filter_check_discard(call, entry, buffer, event)) |
1272 | ring_buffer_unlock_commit(buffer, event); | 1292 | __buffer_unlock_commit(buffer, event); |
1273 | } | 1293 | } |
1274 | 1294 | ||
1275 | void | 1295 | void |
@@ -1362,7 +1382,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1362 | entry->size = trace.nr_entries; | 1382 | entry->size = trace.nr_entries; |
1363 | 1383 | ||
1364 | if (!filter_check_discard(call, entry, buffer, event)) | 1384 | if (!filter_check_discard(call, entry, buffer, event)) |
1365 | ring_buffer_unlock_commit(buffer, event); | 1385 | __buffer_unlock_commit(buffer, event); |
1366 | 1386 | ||
1367 | out: | 1387 | out: |
1368 | /* Again, don't let gcc optimize things here */ | 1388 | /* Again, don't let gcc optimize things here */ |
@@ -1458,7 +1478,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1458 | 1478 | ||
1459 | save_stack_trace_user(&trace); | 1479 | save_stack_trace_user(&trace); |
1460 | if (!filter_check_discard(call, entry, buffer, event)) | 1480 | if (!filter_check_discard(call, entry, buffer, event)) |
1461 | ring_buffer_unlock_commit(buffer, event); | 1481 | __buffer_unlock_commit(buffer, event); |
1462 | 1482 | ||
1463 | out_drop_count: | 1483 | out_drop_count: |
1464 | __this_cpu_dec(user_stack_count); | 1484 | __this_cpu_dec(user_stack_count); |
@@ -1559,10 +1579,10 @@ static int alloc_percpu_trace_buffer(void) | |||
1559 | return -ENOMEM; | 1579 | return -ENOMEM; |
1560 | } | 1580 | } |
1561 | 1581 | ||
1582 | static int buffers_allocated; | ||
1583 | |||
1562 | void trace_printk_init_buffers(void) | 1584 | void trace_printk_init_buffers(void) |
1563 | { | 1585 | { |
1564 | static int buffers_allocated; | ||
1565 | |||
1566 | if (buffers_allocated) | 1586 | if (buffers_allocated) |
1567 | return; | 1587 | return; |
1568 | 1588 | ||
@@ -1571,7 +1591,38 @@ void trace_printk_init_buffers(void) | |||
1571 | 1591 | ||
1572 | pr_info("ftrace: Allocated trace_printk buffers\n"); | 1592 | pr_info("ftrace: Allocated trace_printk buffers\n"); |
1573 | 1593 | ||
1594 | /* Expand the buffers to set size */ | ||
1595 | tracing_update_buffers(); | ||
1596 | |||
1574 | buffers_allocated = 1; | 1597 | buffers_allocated = 1; |
1598 | |||
1599 | /* | ||
1600 | * trace_printk_init_buffers() can be called by modules. | ||
1601 | * If that happens, then we need to start cmdline recording | ||
1602 | * directly here. If the global_trace.buffer is already | ||
1603 | * allocated here, then this was called by module code. | ||
1604 | */ | ||
1605 | if (global_trace.buffer) | ||
1606 | tracing_start_cmdline_record(); | ||
1607 | } | ||
1608 | |||
1609 | void trace_printk_start_comm(void) | ||
1610 | { | ||
1611 | /* Start tracing comms if trace printk is set */ | ||
1612 | if (!buffers_allocated) | ||
1613 | return; | ||
1614 | tracing_start_cmdline_record(); | ||
1615 | } | ||
1616 | |||
1617 | static void trace_printk_start_stop_comm(int enabled) | ||
1618 | { | ||
1619 | if (!buffers_allocated) | ||
1620 | return; | ||
1621 | |||
1622 | if (enabled) | ||
1623 | tracing_start_cmdline_record(); | ||
1624 | else | ||
1625 | tracing_stop_cmdline_record(); | ||
1575 | } | 1626 | } |
1576 | 1627 | ||
1577 | /** | 1628 | /** |
@@ -1622,7 +1673,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1622 | 1673 | ||
1623 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); | 1674 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
1624 | if (!filter_check_discard(call, entry, buffer, event)) { | 1675 | if (!filter_check_discard(call, entry, buffer, event)) { |
1625 | ring_buffer_unlock_commit(buffer, event); | 1676 | __buffer_unlock_commit(buffer, event); |
1626 | ftrace_trace_stack(buffer, flags, 6, pc); | 1677 | ftrace_trace_stack(buffer, flags, 6, pc); |
1627 | } | 1678 | } |
1628 | 1679 | ||
@@ -1693,7 +1744,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1693 | memcpy(&entry->buf, tbuffer, len); | 1744 | memcpy(&entry->buf, tbuffer, len); |
1694 | entry->buf[len] = '\0'; | 1745 | entry->buf[len] = '\0'; |
1695 | if (!filter_check_discard(call, entry, buffer, event)) { | 1746 | if (!filter_check_discard(call, entry, buffer, event)) { |
1696 | ring_buffer_unlock_commit(buffer, event); | 1747 | __buffer_unlock_commit(buffer, event); |
1697 | ftrace_trace_stack(buffer, flags, 6, pc); | 1748 | ftrace_trace_stack(buffer, flags, 6, pc); |
1698 | } | 1749 | } |
1699 | out: | 1750 | out: |
@@ -2794,26 +2845,19 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2794 | 2845 | ||
2795 | if (mask == TRACE_ITER_OVERWRITE) | 2846 | if (mask == TRACE_ITER_OVERWRITE) |
2796 | ring_buffer_change_overwrite(global_trace.buffer, enabled); | 2847 | ring_buffer_change_overwrite(global_trace.buffer, enabled); |
2848 | |||
2849 | if (mask == TRACE_ITER_PRINTK) | ||
2850 | trace_printk_start_stop_comm(enabled); | ||
2797 | } | 2851 | } |
2798 | 2852 | ||
2799 | static ssize_t | 2853 | static int trace_set_options(char *option) |
2800 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | ||
2801 | size_t cnt, loff_t *ppos) | ||
2802 | { | 2854 | { |
2803 | char buf[64]; | ||
2804 | char *cmp; | 2855 | char *cmp; |
2805 | int neg = 0; | 2856 | int neg = 0; |
2806 | int ret; | 2857 | int ret = 0; |
2807 | int i; | 2858 | int i; |
2808 | 2859 | ||
2809 | if (cnt >= sizeof(buf)) | 2860 | cmp = strstrip(option); |
2810 | return -EINVAL; | ||
2811 | |||
2812 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2813 | return -EFAULT; | ||
2814 | |||
2815 | buf[cnt] = 0; | ||
2816 | cmp = strstrip(buf); | ||
2817 | 2861 | ||
2818 | if (strncmp(cmp, "no", 2) == 0) { | 2862 | if (strncmp(cmp, "no", 2) == 0) { |
2819 | neg = 1; | 2863 | neg = 1; |
@@ -2832,10 +2876,25 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2832 | mutex_lock(&trace_types_lock); | 2876 | mutex_lock(&trace_types_lock); |
2833 | ret = set_tracer_option(current_trace, cmp, neg); | 2877 | ret = set_tracer_option(current_trace, cmp, neg); |
2834 | mutex_unlock(&trace_types_lock); | 2878 | mutex_unlock(&trace_types_lock); |
2835 | if (ret) | ||
2836 | return ret; | ||
2837 | } | 2879 | } |
2838 | 2880 | ||
2881 | return ret; | ||
2882 | } | ||
2883 | |||
2884 | static ssize_t | ||
2885 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | ||
2886 | size_t cnt, loff_t *ppos) | ||
2887 | { | ||
2888 | char buf[64]; | ||
2889 | |||
2890 | if (cnt >= sizeof(buf)) | ||
2891 | return -EINVAL; | ||
2892 | |||
2893 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2894 | return -EFAULT; | ||
2895 | |||
2896 | trace_set_options(buf); | ||
2897 | |||
2839 | *ppos += cnt; | 2898 | *ppos += cnt; |
2840 | 2899 | ||
2841 | return cnt; | 2900 | return cnt; |
@@ -2940,56 +2999,6 @@ static const struct file_operations tracing_saved_cmdlines_fops = { | |||
2940 | }; | 2999 | }; |
2941 | 3000 | ||
2942 | static ssize_t | 3001 | static ssize_t |
2943 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | ||
2944 | size_t cnt, loff_t *ppos) | ||
2945 | { | ||
2946 | char buf[64]; | ||
2947 | int r; | ||
2948 | |||
2949 | r = sprintf(buf, "%u\n", tracer_enabled); | ||
2950 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
2951 | } | ||
2952 | |||
2953 | static ssize_t | ||
2954 | tracing_ctrl_write(struct file *filp, const char __user *ubuf, | ||
2955 | size_t cnt, loff_t *ppos) | ||
2956 | { | ||
2957 | struct trace_array *tr = filp->private_data; | ||
2958 | unsigned long val; | ||
2959 | int ret; | ||
2960 | |||
2961 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | ||
2962 | if (ret) | ||
2963 | return ret; | ||
2964 | |||
2965 | val = !!val; | ||
2966 | |||
2967 | mutex_lock(&trace_types_lock); | ||
2968 | if (tracer_enabled ^ val) { | ||
2969 | |||
2970 | /* Only need to warn if this is used to change the state */ | ||
2971 | WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on"); | ||
2972 | |||
2973 | if (val) { | ||
2974 | tracer_enabled = 1; | ||
2975 | if (current_trace->start) | ||
2976 | current_trace->start(tr); | ||
2977 | tracing_start(); | ||
2978 | } else { | ||
2979 | tracer_enabled = 0; | ||
2980 | tracing_stop(); | ||
2981 | if (current_trace->stop) | ||
2982 | current_trace->stop(tr); | ||
2983 | } | ||
2984 | } | ||
2985 | mutex_unlock(&trace_types_lock); | ||
2986 | |||
2987 | *ppos += cnt; | ||
2988 | |||
2989 | return cnt; | ||
2990 | } | ||
2991 | |||
2992 | static ssize_t | ||
2993 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 3002 | tracing_set_trace_read(struct file *filp, char __user *ubuf, |
2994 | size_t cnt, loff_t *ppos) | 3003 | size_t cnt, loff_t *ppos) |
2995 | { | 3004 | { |
@@ -3030,6 +3039,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu) | |||
3030 | */ | 3039 | */ |
3031 | ring_buffer_expanded = 1; | 3040 | ring_buffer_expanded = 1; |
3032 | 3041 | ||
3042 | /* May be called before buffers are initialized */ | ||
3043 | if (!global_trace.buffer) | ||
3044 | return 0; | ||
3045 | |||
3033 | ret = ring_buffer_resize(global_trace.buffer, size, cpu); | 3046 | ret = ring_buffer_resize(global_trace.buffer, size, cpu); |
3034 | if (ret < 0) | 3047 | if (ret < 0) |
3035 | return ret; | 3048 | return ret; |
@@ -3385,19 +3398,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |||
3385 | } | 3398 | } |
3386 | } | 3399 | } |
3387 | 3400 | ||
3388 | |||
3389 | void default_wait_pipe(struct trace_iterator *iter) | ||
3390 | { | ||
3391 | DEFINE_WAIT(wait); | ||
3392 | |||
3393 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | ||
3394 | |||
3395 | if (trace_empty(iter)) | ||
3396 | schedule(); | ||
3397 | |||
3398 | finish_wait(&trace_wait, &wait); | ||
3399 | } | ||
3400 | |||
3401 | /* | 3401 | /* |
3402 | * This is a make-shift waitqueue. | 3402 | * This is a make-shift waitqueue. |
3403 | * A tracer might use this callback on some rare cases: | 3403 | * A tracer might use this callback on some rare cases: |
@@ -3438,7 +3438,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
3438 | return -EINTR; | 3438 | return -EINTR; |
3439 | 3439 | ||
3440 | /* | 3440 | /* |
3441 | * We block until we read something and tracing is disabled. | 3441 | * We block until we read something and tracing is enabled. |
3442 | * We still block if tracing is disabled, but we have never | 3442 | * We still block if tracing is disabled, but we have never |
3443 | * read anything. This allows a user to cat this file, and | 3443 | * read anything. This allows a user to cat this file, and |
3444 | * then enable tracing. But after we have read something, | 3444 | * then enable tracing. But after we have read something, |
@@ -3446,7 +3446,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
3446 | * | 3446 | * |
3447 | * iter->pos will be 0 if we haven't read anything. | 3447 | * iter->pos will be 0 if we haven't read anything. |
3448 | */ | 3448 | */ |
3449 | if (!tracer_enabled && iter->pos) | 3449 | if (tracing_is_enabled() && iter->pos) |
3450 | break; | 3450 | break; |
3451 | } | 3451 | } |
3452 | 3452 | ||
@@ -3955,7 +3955,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3955 | } else | 3955 | } else |
3956 | entry->buf[cnt] = '\0'; | 3956 | entry->buf[cnt] = '\0'; |
3957 | 3957 | ||
3958 | ring_buffer_unlock_commit(buffer, event); | 3958 | __buffer_unlock_commit(buffer, event); |
3959 | 3959 | ||
3960 | written = cnt; | 3960 | written = cnt; |
3961 | 3961 | ||
@@ -4016,6 +4016,14 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4016 | if (max_tr.buffer) | 4016 | if (max_tr.buffer) |
4017 | ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); | 4017 | ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); |
4018 | 4018 | ||
4019 | /* | ||
4020 | * New clock may not be consistent with the previous clock. | ||
4021 | * Reset the buffer so that it doesn't have incomparable timestamps. | ||
4022 | */ | ||
4023 | tracing_reset_online_cpus(&global_trace); | ||
4024 | if (max_tr.buffer) | ||
4025 | tracing_reset_online_cpus(&max_tr); | ||
4026 | |||
4019 | mutex_unlock(&trace_types_lock); | 4027 | mutex_unlock(&trace_types_lock); |
4020 | 4028 | ||
4021 | *fpos += cnt; | 4029 | *fpos += cnt; |
@@ -4037,13 +4045,6 @@ static const struct file_operations tracing_max_lat_fops = { | |||
4037 | .llseek = generic_file_llseek, | 4045 | .llseek = generic_file_llseek, |
4038 | }; | 4046 | }; |
4039 | 4047 | ||
4040 | static const struct file_operations tracing_ctrl_fops = { | ||
4041 | .open = tracing_open_generic, | ||
4042 | .read = tracing_ctrl_read, | ||
4043 | .write = tracing_ctrl_write, | ||
4044 | .llseek = generic_file_llseek, | ||
4045 | }; | ||
4046 | |||
4047 | static const struct file_operations set_tracer_fops = { | 4048 | static const struct file_operations set_tracer_fops = { |
4048 | .open = tracing_open_generic, | 4049 | .open = tracing_open_generic, |
4049 | .read = tracing_set_trace_read, | 4050 | .read = tracing_set_trace_read, |
@@ -4385,6 +4386,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
4385 | usec_rem = do_div(t, USEC_PER_SEC); | 4386 | usec_rem = do_div(t, USEC_PER_SEC); |
4386 | trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); | 4387 | trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); |
4387 | 4388 | ||
4389 | cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); | ||
4390 | trace_seq_printf(s, "dropped events: %ld\n", cnt); | ||
4391 | |||
4388 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 4392 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); |
4389 | 4393 | ||
4390 | kfree(s); | 4394 | kfree(s); |
@@ -4815,9 +4819,6 @@ static __init int tracer_init_debugfs(void) | |||
4815 | 4819 | ||
4816 | d_tracer = tracing_init_dentry(); | 4820 | d_tracer = tracing_init_dentry(); |
4817 | 4821 | ||
4818 | trace_create_file("tracing_enabled", 0644, d_tracer, | ||
4819 | &global_trace, &tracing_ctrl_fops); | ||
4820 | |||
4821 | trace_create_file("trace_options", 0644, d_tracer, | 4822 | trace_create_file("trace_options", 0644, d_tracer, |
4822 | NULL, &tracing_iter_fops); | 4823 | NULL, &tracing_iter_fops); |
4823 | 4824 | ||
@@ -5089,6 +5090,7 @@ __init static int tracer_alloc_buffers(void) | |||
5089 | 5090 | ||
5090 | /* Only allocate trace_printk buffers if a trace_printk exists */ | 5091 | /* Only allocate trace_printk buffers if a trace_printk exists */ |
5091 | if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) | 5092 | if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) |
5093 | /* Must be called before global_trace.buffer is allocated */ | ||
5092 | trace_printk_init_buffers(); | 5094 | trace_printk_init_buffers(); |
5093 | 5095 | ||
5094 | /* To save memory, keep the ring buffer size to its minimum */ | 5096 | /* To save memory, keep the ring buffer size to its minimum */ |
@@ -5136,6 +5138,7 @@ __init static int tracer_alloc_buffers(void) | |||
5136 | #endif | 5138 | #endif |
5137 | 5139 | ||
5138 | trace_init_cmdlines(); | 5140 | trace_init_cmdlines(); |
5141 | init_irq_work(&trace_work_wakeup, trace_wake_up); | ||
5139 | 5142 | ||
5140 | register_tracer(&nop_trace); | 5143 | register_tracer(&nop_trace); |
5141 | current_trace = &nop_trace; | 5144 | current_trace = &nop_trace; |
@@ -5147,6 +5150,13 @@ __init static int tracer_alloc_buffers(void) | |||
5147 | 5150 | ||
5148 | register_die_notifier(&trace_die_notifier); | 5151 | register_die_notifier(&trace_die_notifier); |
5149 | 5152 | ||
5153 | while (trace_boot_options) { | ||
5154 | char *option; | ||
5155 | |||
5156 | option = strsep(&trace_boot_options, ","); | ||
5157 | trace_set_options(option); | ||
5158 | } | ||
5159 | |||
5150 | return 0; | 5160 | return 0; |
5151 | 5161 | ||
5152 | out_free_cpumask: | 5162 | out_free_cpumask: |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index c15f528c1af..55010ed175f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -285,8 +285,8 @@ struct tracer { | |||
285 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 285 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
286 | struct tracer *next; | 286 | struct tracer *next; |
287 | struct tracer_flags *flags; | 287 | struct tracer_flags *flags; |
288 | int print_max; | 288 | bool print_max; |
289 | int use_max_tr; | 289 | bool use_max_tr; |
290 | }; | 290 | }; |
291 | 291 | ||
292 | 292 | ||
@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu) | |||
327 | 327 | ||
328 | int tracer_init(struct tracer *t, struct trace_array *tr); | 328 | int tracer_init(struct tracer *t, struct trace_array *tr); |
329 | int tracing_is_enabled(void); | 329 | int tracing_is_enabled(void); |
330 | void trace_wake_up(void); | ||
331 | void tracing_reset(struct trace_array *tr, int cpu); | 330 | void tracing_reset(struct trace_array *tr, int cpu); |
332 | void tracing_reset_online_cpus(struct trace_array *tr); | 331 | void tracing_reset_online_cpus(struct trace_array *tr); |
333 | void tracing_reset_current(int cpu); | 332 | void tracing_reset_current(int cpu); |
@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer, | |||
349 | unsigned long len, | 348 | unsigned long len, |
350 | unsigned long flags, | 349 | unsigned long flags, |
351 | int pc); | 350 | int pc); |
352 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | ||
353 | struct ring_buffer_event *event, | ||
354 | unsigned long flags, int pc); | ||
355 | 351 | ||
356 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 352 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
357 | struct trace_array_cpu *data); | 353 | struct trace_array_cpu *data); |
@@ -359,6 +355,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
359 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 355 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
360 | int *ent_cpu, u64 *ent_ts); | 356 | int *ent_cpu, u64 *ent_ts); |
361 | 357 | ||
358 | void __buffer_unlock_commit(struct ring_buffer *buffer, | ||
359 | struct ring_buffer_event *event); | ||
360 | |||
362 | int trace_empty(struct trace_iterator *iter); | 361 | int trace_empty(struct trace_iterator *iter); |
363 | 362 | ||
364 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | 363 | void *trace_find_next_entry_inc(struct trace_iterator *iter); |
@@ -367,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter); | |||
367 | 366 | ||
368 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | 367 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
369 | 368 | ||
370 | void default_wait_pipe(struct trace_iterator *iter); | ||
371 | void poll_wait_pipe(struct trace_iterator *iter); | 369 | void poll_wait_pipe(struct trace_iterator *iter); |
372 | 370 | ||
373 | void ftrace(struct trace_array *tr, | 371 | void ftrace(struct trace_array *tr, |
@@ -407,7 +405,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr); | |||
407 | void tracing_stop_sched_switch_record(void); | 405 | void tracing_stop_sched_switch_record(void); |
408 | void tracing_start_sched_switch_record(void); | 406 | void tracing_start_sched_switch_record(void); |
409 | int register_tracer(struct tracer *type); | 407 | int register_tracer(struct tracer *type); |
410 | void unregister_tracer(struct tracer *type); | ||
411 | int is_tracing_stopped(void); | 408 | int is_tracing_stopped(void); |
412 | enum trace_file_type { | 409 | enum trace_file_type { |
413 | TRACE_FILE_LAT_FMT = 1, | 410 | TRACE_FILE_LAT_FMT = 1, |
@@ -841,6 +838,7 @@ extern const char *__start___trace_bprintk_fmt[]; | |||
841 | extern const char *__stop___trace_bprintk_fmt[]; | 838 | extern const char *__stop___trace_bprintk_fmt[]; |
842 | 839 | ||
843 | void trace_printk_init_buffers(void); | 840 | void trace_printk_init_buffers(void); |
841 | void trace_printk_start_comm(void); | ||
844 | 842 | ||
845 | #undef FTRACE_ENTRY | 843 | #undef FTRACE_ENTRY |
846 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 844 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 8d3538b4ea5..95e96842ed2 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
77 | entry->correct = val == expect; | 77 | entry->correct = val == expect; |
78 | 78 | ||
79 | if (!filter_check_discard(call, entry, buffer, event)) | 79 | if (!filter_check_discard(call, entry, buffer, event)) |
80 | ring_buffer_unlock_commit(buffer, event); | 80 | __buffer_unlock_commit(buffer, event); |
81 | 81 | ||
82 | out: | 82 | out: |
83 | atomic_dec(&tr->data[cpu]->disabled); | 83 | atomic_dec(&tr->data[cpu]->disabled); |
@@ -199,7 +199,7 @@ __init static int init_branch_tracer(void) | |||
199 | } | 199 | } |
200 | return register_tracer(&branch_trace); | 200 | return register_tracer(&branch_trace); |
201 | } | 201 | } |
202 | device_initcall(init_branch_tracer); | 202 | core_initcall(init_branch_tracer); |
203 | 203 | ||
204 | #else | 204 | #else |
205 | static inline | 205 | static inline |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index d608d09d08c..880073d0b94 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -491,19 +491,6 @@ static void t_stop(struct seq_file *m, void *p) | |||
491 | mutex_unlock(&event_mutex); | 491 | mutex_unlock(&event_mutex); |
492 | } | 492 | } |
493 | 493 | ||
494 | static int | ||
495 | ftrace_event_seq_open(struct inode *inode, struct file *file) | ||
496 | { | ||
497 | const struct seq_operations *seq_ops; | ||
498 | |||
499 | if ((file->f_mode & FMODE_WRITE) && | ||
500 | (file->f_flags & O_TRUNC)) | ||
501 | ftrace_clear_events(); | ||
502 | |||
503 | seq_ops = inode->i_private; | ||
504 | return seq_open(file, seq_ops); | ||
505 | } | ||
506 | |||
507 | static ssize_t | 494 | static ssize_t |
508 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | 495 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
509 | loff_t *ppos) | 496 | loff_t *ppos) |
@@ -980,6 +967,9 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | |||
980 | return r; | 967 | return r; |
981 | } | 968 | } |
982 | 969 | ||
970 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); | ||
971 | static int ftrace_event_set_open(struct inode *inode, struct file *file); | ||
972 | |||
983 | static const struct seq_operations show_event_seq_ops = { | 973 | static const struct seq_operations show_event_seq_ops = { |
984 | .start = t_start, | 974 | .start = t_start, |
985 | .next = t_next, | 975 | .next = t_next, |
@@ -995,14 +985,14 @@ static const struct seq_operations show_set_event_seq_ops = { | |||
995 | }; | 985 | }; |
996 | 986 | ||
997 | static const struct file_operations ftrace_avail_fops = { | 987 | static const struct file_operations ftrace_avail_fops = { |
998 | .open = ftrace_event_seq_open, | 988 | .open = ftrace_event_avail_open, |
999 | .read = seq_read, | 989 | .read = seq_read, |
1000 | .llseek = seq_lseek, | 990 | .llseek = seq_lseek, |
1001 | .release = seq_release, | 991 | .release = seq_release, |
1002 | }; | 992 | }; |
1003 | 993 | ||
1004 | static const struct file_operations ftrace_set_event_fops = { | 994 | static const struct file_operations ftrace_set_event_fops = { |
1005 | .open = ftrace_event_seq_open, | 995 | .open = ftrace_event_set_open, |
1006 | .read = seq_read, | 996 | .read = seq_read, |
1007 | .write = ftrace_event_write, | 997 | .write = ftrace_event_write, |
1008 | .llseek = seq_lseek, | 998 | .llseek = seq_lseek, |
@@ -1078,6 +1068,26 @@ static struct dentry *event_trace_events_dir(void) | |||
1078 | return d_events; | 1068 | return d_events; |
1079 | } | 1069 | } |
1080 | 1070 | ||
1071 | static int | ||
1072 | ftrace_event_avail_open(struct inode *inode, struct file *file) | ||
1073 | { | ||
1074 | const struct seq_operations *seq_ops = &show_event_seq_ops; | ||
1075 | |||
1076 | return seq_open(file, seq_ops); | ||
1077 | } | ||
1078 | |||
1079 | static int | ||
1080 | ftrace_event_set_open(struct inode *inode, struct file *file) | ||
1081 | { | ||
1082 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; | ||
1083 | |||
1084 | if ((file->f_mode & FMODE_WRITE) && | ||
1085 | (file->f_flags & O_TRUNC)) | ||
1086 | ftrace_clear_events(); | ||
1087 | |||
1088 | return seq_open(file, seq_ops); | ||
1089 | } | ||
1090 | |||
1081 | static struct dentry * | 1091 | static struct dentry * |
1082 | event_subsystem_dir(const char *name, struct dentry *d_events) | 1092 | event_subsystem_dir(const char *name, struct dentry *d_events) |
1083 | { | 1093 | { |
@@ -1489,6 +1499,9 @@ static __init int event_trace_enable(void) | |||
1489 | if (ret) | 1499 | if (ret) |
1490 | pr_warn("Failed to enable trace event: %s\n", token); | 1500 | pr_warn("Failed to enable trace event: %s\n", token); |
1491 | } | 1501 | } |
1502 | |||
1503 | trace_printk_start_comm(); | ||
1504 | |||
1492 | return 0; | 1505 | return 0; |
1493 | } | 1506 | } |
1494 | 1507 | ||
@@ -1505,15 +1518,13 @@ static __init int event_trace_init(void) | |||
1505 | return 0; | 1518 | return 0; |
1506 | 1519 | ||
1507 | entry = debugfs_create_file("available_events", 0444, d_tracer, | 1520 | entry = debugfs_create_file("available_events", 0444, d_tracer, |
1508 | (void *)&show_event_seq_ops, | 1521 | NULL, &ftrace_avail_fops); |
1509 | &ftrace_avail_fops); | ||
1510 | if (!entry) | 1522 | if (!entry) |
1511 | pr_warning("Could not create debugfs " | 1523 | pr_warning("Could not create debugfs " |
1512 | "'available_events' entry\n"); | 1524 | "'available_events' entry\n"); |
1513 | 1525 | ||
1514 | entry = debugfs_create_file("set_event", 0644, d_tracer, | 1526 | entry = debugfs_create_file("set_event", 0644, d_tracer, |
1515 | (void *)&show_set_event_seq_ops, | 1527 | NULL, &ftrace_set_event_fops); |
1516 | &ftrace_set_event_fops); | ||
1517 | if (!entry) | 1528 | if (!entry) |
1518 | pr_warning("Could not create debugfs " | 1529 | pr_warning("Could not create debugfs " |
1519 | "'set_event' entry\n"); | 1530 | "'set_event' entry\n"); |
@@ -1749,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip, | |||
1749 | entry->ip = ip; | 1760 | entry->ip = ip; |
1750 | entry->parent_ip = parent_ip; | 1761 | entry->parent_ip = parent_ip; |
1751 | 1762 | ||
1752 | trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); | 1763 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
1753 | 1764 | ||
1754 | out: | 1765 | out: |
1755 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); | 1766 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index c154797a7ff..e5b0ca8b8d4 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -1000,9 +1000,9 @@ static int init_pred(struct filter_parse_state *ps, | |||
1000 | } | 1000 | } |
1001 | } else { | 1001 | } else { |
1002 | if (field->is_signed) | 1002 | if (field->is_signed) |
1003 | ret = strict_strtoll(pred->regex.pattern, 0, &val); | 1003 | ret = kstrtoll(pred->regex.pattern, 0, &val); |
1004 | else | 1004 | else |
1005 | ret = strict_strtoull(pred->regex.pattern, 0, &val); | 1005 | ret = kstrtoull(pred->regex.pattern, 0, &val); |
1006 | if (ret) { | 1006 | if (ret) { |
1007 | parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); | 1007 | parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); |
1008 | return -EINVAL; | 1008 | return -EINVAL; |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 507a7a9630b..bb227e380cb 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, | |||
366 | * We use the callback data field (which is a pointer) | 366 | * We use the callback data field (which is a pointer) |
367 | * as our counter. | 367 | * as our counter. |
368 | */ | 368 | */ |
369 | ret = strict_strtoul(number, 0, (unsigned long *)&count); | 369 | ret = kstrtoul(number, 0, (unsigned long *)&count); |
370 | if (ret) | 370 | if (ret) |
371 | return ret; | 371 | return ret; |
372 | 372 | ||
@@ -411,5 +411,4 @@ static __init int init_function_trace(void) | |||
411 | init_func_cmd_traceon(); | 411 | init_func_cmd_traceon(); |
412 | return register_tracer(&function_trace); | 412 | return register_tracer(&function_trace); |
413 | } | 413 | } |
414 | device_initcall(init_function_trace); | 414 | core_initcall(init_function_trace); |
415 | |||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 99b4378393d..4edb4b74eb7 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr, | |||
223 | entry = ring_buffer_event_data(event); | 223 | entry = ring_buffer_event_data(event); |
224 | entry->graph_ent = *trace; | 224 | entry->graph_ent = *trace; |
225 | if (!filter_current_check_discard(buffer, call, entry, event)) | 225 | if (!filter_current_check_discard(buffer, call, entry, event)) |
226 | ring_buffer_unlock_commit(buffer, event); | 226 | __buffer_unlock_commit(buffer, event); |
227 | 227 | ||
228 | return 1; | 228 | return 1; |
229 | } | 229 | } |
@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr, | |||
327 | entry = ring_buffer_event_data(event); | 327 | entry = ring_buffer_event_data(event); |
328 | entry->ret = *trace; | 328 | entry->ret = *trace; |
329 | if (!filter_current_check_discard(buffer, call, entry, event)) | 329 | if (!filter_current_check_discard(buffer, call, entry, event)) |
330 | ring_buffer_unlock_commit(buffer, event); | 330 | __buffer_unlock_commit(buffer, event); |
331 | } | 331 | } |
332 | 332 | ||
333 | void trace_graph_return(struct ftrace_graph_ret *trace) | 333 | void trace_graph_return(struct ftrace_graph_ret *trace) |
@@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void) | |||
1474 | return register_tracer(&graph_trace); | 1474 | return register_tracer(&graph_trace); |
1475 | } | 1475 | } |
1476 | 1476 | ||
1477 | device_initcall(init_graph_trace); | 1477 | core_initcall(init_graph_trace); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index d98ee8283b2..5ffce7b0f33 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
604 | .reset = irqsoff_tracer_reset, | 604 | .reset = irqsoff_tracer_reset, |
605 | .start = irqsoff_tracer_start, | 605 | .start = irqsoff_tracer_start, |
606 | .stop = irqsoff_tracer_stop, | 606 | .stop = irqsoff_tracer_stop, |
607 | .print_max = 1, | 607 | .print_max = true, |
608 | .print_header = irqsoff_print_header, | 608 | .print_header = irqsoff_print_header, |
609 | .print_line = irqsoff_print_line, | 609 | .print_line = irqsoff_print_line, |
610 | .flags = &tracer_flags, | 610 | .flags = &tracer_flags, |
@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
614 | #endif | 614 | #endif |
615 | .open = irqsoff_trace_open, | 615 | .open = irqsoff_trace_open, |
616 | .close = irqsoff_trace_close, | 616 | .close = irqsoff_trace_close, |
617 | .use_max_tr = 1, | 617 | .use_max_tr = true, |
618 | }; | 618 | }; |
619 | # define register_irqsoff(trace) register_tracer(&trace) | 619 | # define register_irqsoff(trace) register_tracer(&trace) |
620 | #else | 620 | #else |
@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
637 | .reset = irqsoff_tracer_reset, | 637 | .reset = irqsoff_tracer_reset, |
638 | .start = irqsoff_tracer_start, | 638 | .start = irqsoff_tracer_start, |
639 | .stop = irqsoff_tracer_stop, | 639 | .stop = irqsoff_tracer_stop, |
640 | .print_max = 1, | 640 | .print_max = true, |
641 | .print_header = irqsoff_print_header, | 641 | .print_header = irqsoff_print_header, |
642 | .print_line = irqsoff_print_line, | 642 | .print_line = irqsoff_print_line, |
643 | .flags = &tracer_flags, | 643 | .flags = &tracer_flags, |
@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
647 | #endif | 647 | #endif |
648 | .open = irqsoff_trace_open, | 648 | .open = irqsoff_trace_open, |
649 | .close = irqsoff_trace_close, | 649 | .close = irqsoff_trace_close, |
650 | .use_max_tr = 1, | 650 | .use_max_tr = true, |
651 | }; | 651 | }; |
652 | # define register_preemptoff(trace) register_tracer(&trace) | 652 | # define register_preemptoff(trace) register_tracer(&trace) |
653 | #else | 653 | #else |
@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
672 | .reset = irqsoff_tracer_reset, | 672 | .reset = irqsoff_tracer_reset, |
673 | .start = irqsoff_tracer_start, | 673 | .start = irqsoff_tracer_start, |
674 | .stop = irqsoff_tracer_stop, | 674 | .stop = irqsoff_tracer_stop, |
675 | .print_max = 1, | 675 | .print_max = true, |
676 | .print_header = irqsoff_print_header, | 676 | .print_header = irqsoff_print_header, |
677 | .print_line = irqsoff_print_line, | 677 | .print_line = irqsoff_print_line, |
678 | .flags = &tracer_flags, | 678 | .flags = &tracer_flags, |
@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
682 | #endif | 682 | #endif |
683 | .open = irqsoff_trace_open, | 683 | .open = irqsoff_trace_open, |
684 | .close = irqsoff_trace_close, | 684 | .close = irqsoff_trace_close, |
685 | .use_max_tr = 1, | 685 | .use_max_tr = true, |
686 | }; | 686 | }; |
687 | 687 | ||
688 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 688 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
@@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void) | |||
698 | 698 | ||
699 | return 0; | 699 | return 0; |
700 | } | 700 | } |
701 | device_initcall(init_irqsoff_tracer); | 701 | core_initcall(init_irqsoff_tracer); |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1a2117043bb..1865d5f7653 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -444,7 +444,7 @@ static int create_trace_probe(int argc, char **argv) | |||
444 | return -EINVAL; | 444 | return -EINVAL; |
445 | } | 445 | } |
446 | /* an address specified */ | 446 | /* an address specified */ |
447 | ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr); | 447 | ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr); |
448 | if (ret) { | 448 | if (ret) { |
449 | pr_info("Failed to parse address.\n"); | 449 | pr_info("Failed to parse address.\n"); |
450 | return ret; | 450 | return ret; |
@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
751 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 751 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
752 | 752 | ||
753 | if (!filter_current_check_discard(buffer, call, entry, event)) | 753 | if (!filter_current_check_discard(buffer, call, entry, event)) |
754 | trace_nowake_buffer_unlock_commit_regs(buffer, event, | 754 | trace_buffer_unlock_commit_regs(buffer, event, |
755 | irq_flags, pc, regs); | 755 | irq_flags, pc, regs); |
756 | } | 756 | } |
757 | 757 | ||
758 | /* Kretprobe handler */ | 758 | /* Kretprobe handler */ |
@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
784 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 784 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
785 | 785 | ||
786 | if (!filter_current_check_discard(buffer, call, entry, event)) | 786 | if (!filter_current_check_discard(buffer, call, entry, event)) |
787 | trace_nowake_buffer_unlock_commit_regs(buffer, event, | 787 | trace_buffer_unlock_commit_regs(buffer, event, |
788 | irq_flags, pc, regs); | 788 | irq_flags, pc, regs); |
789 | } | 789 | } |
790 | 790 | ||
791 | /* Event entry printers */ | 791 | /* Event entry printers */ |
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index daa9980153a..412e959709b 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c | |||
@@ -441,7 +441,7 @@ static const struct fetch_type *find_fetch_type(const char *type) | |||
441 | goto fail; | 441 | goto fail; |
442 | 442 | ||
443 | type++; | 443 | type++; |
444 | if (strict_strtoul(type, 0, &bs)) | 444 | if (kstrtoul(type, 0, &bs)) |
445 | goto fail; | 445 | goto fail; |
446 | 446 | ||
447 | switch (bs) { | 447 | switch (bs) { |
@@ -501,8 +501,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) | |||
501 | 501 | ||
502 | tmp = strchr(symbol, '+'); | 502 | tmp = strchr(symbol, '+'); |
503 | if (tmp) { | 503 | if (tmp) { |
504 | /* skip sign because strict_strtol doesn't accept '+' */ | 504 | /* skip sign because kstrtoul doesn't accept '+' */ |
505 | ret = strict_strtoul(tmp + 1, 0, offset); | 505 | ret = kstrtoul(tmp + 1, 0, offset); |
506 | if (ret) | 506 | if (ret) |
507 | return ret; | 507 | return ret; |
508 | 508 | ||
@@ -533,7 +533,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, | |||
533 | else | 533 | else |
534 | ret = -EINVAL; | 534 | ret = -EINVAL; |
535 | } else if (isdigit(arg[5])) { | 535 | } else if (isdigit(arg[5])) { |
536 | ret = strict_strtoul(arg + 5, 10, ¶m); | 536 | ret = kstrtoul(arg + 5, 10, ¶m); |
537 | if (ret || param > PARAM_MAX_STACK) | 537 | if (ret || param > PARAM_MAX_STACK) |
538 | ret = -EINVAL; | 538 | ret = -EINVAL; |
539 | else { | 539 | else { |
@@ -579,7 +579,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, | |||
579 | 579 | ||
580 | case '@': /* memory or symbol */ | 580 | case '@': /* memory or symbol */ |
581 | if (isdigit(arg[1])) { | 581 | if (isdigit(arg[1])) { |
582 | ret = strict_strtoul(arg + 1, 0, ¶m); | 582 | ret = kstrtoul(arg + 1, 0, ¶m); |
583 | if (ret) | 583 | if (ret) |
584 | break; | 584 | break; |
585 | 585 | ||
@@ -597,14 +597,14 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, | |||
597 | break; | 597 | break; |
598 | 598 | ||
599 | case '+': /* deref memory */ | 599 | case '+': /* deref memory */ |
600 | arg++; /* Skip '+', because strict_strtol() rejects it. */ | 600 | arg++; /* Skip '+', because kstrtol() rejects it. */ |
601 | case '-': | 601 | case '-': |
602 | tmp = strchr(arg, '('); | 602 | tmp = strchr(arg, '('); |
603 | if (!tmp) | 603 | if (!tmp) |
604 | break; | 604 | break; |
605 | 605 | ||
606 | *tmp = '\0'; | 606 | *tmp = '\0'; |
607 | ret = strict_strtol(arg, 0, &offset); | 607 | ret = kstrtol(arg, 0, &offset); |
608 | 608 | ||
609 | if (ret) | 609 | if (ret) |
610 | break; | 610 | break; |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 7e62c0a1845..3374c792ccd 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -102,9 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
102 | entry->next_cpu = task_cpu(wakee); | 102 | entry->next_cpu = task_cpu(wakee); |
103 | 103 | ||
104 | if (!filter_check_discard(call, entry, buffer, event)) | 104 | if (!filter_check_discard(call, entry, buffer, event)) |
105 | ring_buffer_unlock_commit(buffer, event); | 105 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
106 | ftrace_trace_stack(tr->buffer, flags, 6, pc); | ||
107 | ftrace_trace_userstack(tr->buffer, flags, pc); | ||
108 | } | 106 | } |
109 | 107 | ||
110 | static void | 108 | static void |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 02170c00c41..bc64fc13755 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
589 | .reset = wakeup_tracer_reset, | 589 | .reset = wakeup_tracer_reset, |
590 | .start = wakeup_tracer_start, | 590 | .start = wakeup_tracer_start, |
591 | .stop = wakeup_tracer_stop, | 591 | .stop = wakeup_tracer_stop, |
592 | .print_max = 1, | 592 | .print_max = true, |
593 | .print_header = wakeup_print_header, | 593 | .print_header = wakeup_print_header, |
594 | .print_line = wakeup_print_line, | 594 | .print_line = wakeup_print_line, |
595 | .flags = &tracer_flags, | 595 | .flags = &tracer_flags, |
@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
599 | #endif | 599 | #endif |
600 | .open = wakeup_trace_open, | 600 | .open = wakeup_trace_open, |
601 | .close = wakeup_trace_close, | 601 | .close = wakeup_trace_close, |
602 | .use_max_tr = 1, | 602 | .use_max_tr = true, |
603 | }; | 603 | }; |
604 | 604 | ||
605 | static struct tracer wakeup_rt_tracer __read_mostly = | 605 | static struct tracer wakeup_rt_tracer __read_mostly = |
@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
610 | .start = wakeup_tracer_start, | 610 | .start = wakeup_tracer_start, |
611 | .stop = wakeup_tracer_stop, | 611 | .stop = wakeup_tracer_stop, |
612 | .wait_pipe = poll_wait_pipe, | 612 | .wait_pipe = poll_wait_pipe, |
613 | .print_max = 1, | 613 | .print_max = true, |
614 | .print_header = wakeup_print_header, | 614 | .print_header = wakeup_print_header, |
615 | .print_line = wakeup_print_line, | 615 | .print_line = wakeup_print_line, |
616 | .flags = &tracer_flags, | 616 | .flags = &tracer_flags, |
@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
620 | #endif | 620 | #endif |
621 | .open = wakeup_trace_open, | 621 | .open = wakeup_trace_open, |
622 | .close = wakeup_trace_close, | 622 | .close = wakeup_trace_close, |
623 | .use_max_tr = 1, | 623 | .use_max_tr = true, |
624 | }; | 624 | }; |
625 | 625 | ||
626 | __init static int init_wakeup_tracer(void) | 626 | __init static int init_wakeup_tracer(void) |
@@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void) | |||
637 | 637 | ||
638 | return 0; | 638 | return 0; |
639 | } | 639 | } |
640 | device_initcall(init_wakeup_tracer); | 640 | core_initcall(init_wakeup_tracer); |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 2c00a691a54..47623169a81 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -320,7 +320,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
320 | int (*func)(void)) | 320 | int (*func)(void)) |
321 | { | 321 | { |
322 | int save_ftrace_enabled = ftrace_enabled; | 322 | int save_ftrace_enabled = ftrace_enabled; |
323 | int save_tracer_enabled = tracer_enabled; | ||
324 | unsigned long count; | 323 | unsigned long count; |
325 | char *func_name; | 324 | char *func_name; |
326 | int ret; | 325 | int ret; |
@@ -331,7 +330,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
331 | 330 | ||
332 | /* enable tracing, and record the filter function */ | 331 | /* enable tracing, and record the filter function */ |
333 | ftrace_enabled = 1; | 332 | ftrace_enabled = 1; |
334 | tracer_enabled = 1; | ||
335 | 333 | ||
336 | /* passed in by parameter to fool gcc from optimizing */ | 334 | /* passed in by parameter to fool gcc from optimizing */ |
337 | func(); | 335 | func(); |
@@ -395,7 +393,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
395 | 393 | ||
396 | out: | 394 | out: |
397 | ftrace_enabled = save_ftrace_enabled; | 395 | ftrace_enabled = save_ftrace_enabled; |
398 | tracer_enabled = save_tracer_enabled; | ||
399 | 396 | ||
400 | /* Enable tracing on all functions again */ | 397 | /* Enable tracing on all functions again */ |
401 | ftrace_set_global_filter(NULL, 0, 1); | 398 | ftrace_set_global_filter(NULL, 0, 1); |
@@ -452,7 +449,6 @@ static int | |||
452 | trace_selftest_function_recursion(void) | 449 | trace_selftest_function_recursion(void) |
453 | { | 450 | { |
454 | int save_ftrace_enabled = ftrace_enabled; | 451 | int save_ftrace_enabled = ftrace_enabled; |
455 | int save_tracer_enabled = tracer_enabled; | ||
456 | char *func_name; | 452 | char *func_name; |
457 | int len; | 453 | int len; |
458 | int ret; | 454 | int ret; |
@@ -465,7 +461,6 @@ trace_selftest_function_recursion(void) | |||
465 | 461 | ||
466 | /* enable tracing, and record the filter function */ | 462 | /* enable tracing, and record the filter function */ |
467 | ftrace_enabled = 1; | 463 | ftrace_enabled = 1; |
468 | tracer_enabled = 1; | ||
469 | 464 | ||
470 | /* Handle PPC64 '.' name */ | 465 | /* Handle PPC64 '.' name */ |
471 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 466 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
@@ -534,7 +529,6 @@ trace_selftest_function_recursion(void) | |||
534 | ret = 0; | 529 | ret = 0; |
535 | out: | 530 | out: |
536 | ftrace_enabled = save_ftrace_enabled; | 531 | ftrace_enabled = save_ftrace_enabled; |
537 | tracer_enabled = save_tracer_enabled; | ||
538 | 532 | ||
539 | return ret; | 533 | return ret; |
540 | } | 534 | } |
@@ -569,7 +563,6 @@ static int | |||
569 | trace_selftest_function_regs(void) | 563 | trace_selftest_function_regs(void) |
570 | { | 564 | { |
571 | int save_ftrace_enabled = ftrace_enabled; | 565 | int save_ftrace_enabled = ftrace_enabled; |
572 | int save_tracer_enabled = tracer_enabled; | ||
573 | char *func_name; | 566 | char *func_name; |
574 | int len; | 567 | int len; |
575 | int ret; | 568 | int ret; |
@@ -586,7 +579,6 @@ trace_selftest_function_regs(void) | |||
586 | 579 | ||
587 | /* enable tracing, and record the filter function */ | 580 | /* enable tracing, and record the filter function */ |
588 | ftrace_enabled = 1; | 581 | ftrace_enabled = 1; |
589 | tracer_enabled = 1; | ||
590 | 582 | ||
591 | /* Handle PPC64 '.' name */ | 583 | /* Handle PPC64 '.' name */ |
592 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 584 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
@@ -648,7 +640,6 @@ trace_selftest_function_regs(void) | |||
648 | ret = 0; | 640 | ret = 0; |
649 | out: | 641 | out: |
650 | ftrace_enabled = save_ftrace_enabled; | 642 | ftrace_enabled = save_ftrace_enabled; |
651 | tracer_enabled = save_tracer_enabled; | ||
652 | 643 | ||
653 | return ret; | 644 | return ret; |
654 | } | 645 | } |
@@ -662,7 +653,6 @@ int | |||
662 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 653 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
663 | { | 654 | { |
664 | int save_ftrace_enabled = ftrace_enabled; | 655 | int save_ftrace_enabled = ftrace_enabled; |
665 | int save_tracer_enabled = tracer_enabled; | ||
666 | unsigned long count; | 656 | unsigned long count; |
667 | int ret; | 657 | int ret; |
668 | 658 | ||
@@ -671,7 +661,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
671 | 661 | ||
672 | /* start the tracing */ | 662 | /* start the tracing */ |
673 | ftrace_enabled = 1; | 663 | ftrace_enabled = 1; |
674 | tracer_enabled = 1; | ||
675 | 664 | ||
676 | ret = tracer_init(trace, tr); | 665 | ret = tracer_init(trace, tr); |
677 | if (ret) { | 666 | if (ret) { |
@@ -708,7 +697,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
708 | ret = trace_selftest_function_regs(); | 697 | ret = trace_selftest_function_regs(); |
709 | out: | 698 | out: |
710 | ftrace_enabled = save_ftrace_enabled; | 699 | ftrace_enabled = save_ftrace_enabled; |
711 | tracer_enabled = save_tracer_enabled; | ||
712 | 700 | ||
713 | /* kill ftrace totally if we failed */ | 701 | /* kill ftrace totally if we failed */ |
714 | if (ret) | 702 | if (ret) |
@@ -1106,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
1106 | tracing_stop(); | 1094 | tracing_stop(); |
1107 | /* check both trace buffers */ | 1095 | /* check both trace buffers */ |
1108 | ret = trace_test_buffer(tr, NULL); | 1096 | ret = trace_test_buffer(tr, NULL); |
1097 | printk("ret = %d\n", ret); | ||
1109 | if (!ret) | 1098 | if (!ret) |
1110 | ret = trace_test_buffer(&max_tr, &count); | 1099 | ret = trace_test_buffer(&max_tr, &count); |
1111 | 1100 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 2485a7d09b1..7609dd6714c 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -21,9 +21,6 @@ static int syscall_enter_register(struct ftrace_event_call *event, | |||
21 | static int syscall_exit_register(struct ftrace_event_call *event, | 21 | static int syscall_exit_register(struct ftrace_event_call *event, |
22 | enum trace_reg type, void *data); | 22 | enum trace_reg type, void *data); |
23 | 23 | ||
24 | static int syscall_enter_define_fields(struct ftrace_event_call *call); | ||
25 | static int syscall_exit_define_fields(struct ftrace_event_call *call); | ||
26 | |||
27 | static struct list_head * | 24 | static struct list_head * |
28 | syscall_get_enter_fields(struct ftrace_event_call *call) | 25 | syscall_get_enter_fields(struct ftrace_event_call *call) |
29 | { | 26 | { |
@@ -32,30 +29,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call) | |||
32 | return &entry->enter_fields; | 29 | return &entry->enter_fields; |
33 | } | 30 | } |
34 | 31 | ||
35 | struct trace_event_functions enter_syscall_print_funcs = { | ||
36 | .trace = print_syscall_enter, | ||
37 | }; | ||
38 | |||
39 | struct trace_event_functions exit_syscall_print_funcs = { | ||
40 | .trace = print_syscall_exit, | ||
41 | }; | ||
42 | |||
43 | struct ftrace_event_class event_class_syscall_enter = { | ||
44 | .system = "syscalls", | ||
45 | .reg = syscall_enter_register, | ||
46 | .define_fields = syscall_enter_define_fields, | ||
47 | .get_fields = syscall_get_enter_fields, | ||
48 | .raw_init = init_syscall_trace, | ||
49 | }; | ||
50 | |||
51 | struct ftrace_event_class event_class_syscall_exit = { | ||
52 | .system = "syscalls", | ||
53 | .reg = syscall_exit_register, | ||
54 | .define_fields = syscall_exit_define_fields, | ||
55 | .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), | ||
56 | .raw_init = init_syscall_trace, | ||
57 | }; | ||
58 | |||
59 | extern struct syscall_metadata *__start_syscalls_metadata[]; | 32 | extern struct syscall_metadata *__start_syscalls_metadata[]; |
60 | extern struct syscall_metadata *__stop_syscalls_metadata[]; | 33 | extern struct syscall_metadata *__stop_syscalls_metadata[]; |
61 | 34 | ||
@@ -432,7 +405,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) | |||
432 | mutex_unlock(&syscall_trace_lock); | 405 | mutex_unlock(&syscall_trace_lock); |
433 | } | 406 | } |
434 | 407 | ||
435 | int init_syscall_trace(struct ftrace_event_call *call) | 408 | static int init_syscall_trace(struct ftrace_event_call *call) |
436 | { | 409 | { |
437 | int id; | 410 | int id; |
438 | int num; | 411 | int num; |
@@ -457,6 +430,30 @@ int init_syscall_trace(struct ftrace_event_call *call) | |||
457 | return id; | 430 | return id; |
458 | } | 431 | } |
459 | 432 | ||
433 | struct trace_event_functions enter_syscall_print_funcs = { | ||
434 | .trace = print_syscall_enter, | ||
435 | }; | ||
436 | |||
437 | struct trace_event_functions exit_syscall_print_funcs = { | ||
438 | .trace = print_syscall_exit, | ||
439 | }; | ||
440 | |||
441 | struct ftrace_event_class event_class_syscall_enter = { | ||
442 | .system = "syscalls", | ||
443 | .reg = syscall_enter_register, | ||
444 | .define_fields = syscall_enter_define_fields, | ||
445 | .get_fields = syscall_get_enter_fields, | ||
446 | .raw_init = init_syscall_trace, | ||
447 | }; | ||
448 | |||
449 | struct ftrace_event_class event_class_syscall_exit = { | ||
450 | .system = "syscalls", | ||
451 | .reg = syscall_exit_register, | ||
452 | .define_fields = syscall_exit_define_fields, | ||
453 | .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), | ||
454 | .raw_init = init_syscall_trace, | ||
455 | }; | ||
456 | |||
460 | unsigned long __init __weak arch_syscall_addr(int nr) | 457 | unsigned long __init __weak arch_syscall_addr(int nr) |
461 | { | 458 | { |
462 | return (unsigned long)sys_call_table[nr]; | 459 | return (unsigned long)sys_call_table[nr]; |
@@ -537,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
537 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 534 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
538 | } | 535 | } |
539 | 536 | ||
540 | int perf_sysenter_enable(struct ftrace_event_call *call) | 537 | static int perf_sysenter_enable(struct ftrace_event_call *call) |
541 | { | 538 | { |
542 | int ret = 0; | 539 | int ret = 0; |
543 | int num; | 540 | int num; |
@@ -558,7 +555,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call) | |||
558 | return ret; | 555 | return ret; |
559 | } | 556 | } |
560 | 557 | ||
561 | void perf_sysenter_disable(struct ftrace_event_call *call) | 558 | static void perf_sysenter_disable(struct ftrace_event_call *call) |
562 | { | 559 | { |
563 | int num; | 560 | int num; |
564 | 561 | ||
@@ -615,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
615 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 612 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
616 | } | 613 | } |
617 | 614 | ||
618 | int perf_sysexit_enable(struct ftrace_event_call *call) | 615 | static int perf_sysexit_enable(struct ftrace_event_call *call) |
619 | { | 616 | { |
620 | int ret = 0; | 617 | int ret = 0; |
621 | int num; | 618 | int num; |
@@ -636,7 +633,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call) | |||
636 | return ret; | 633 | return ret; |
637 | } | 634 | } |
638 | 635 | ||
639 | void perf_sysexit_disable(struct ftrace_event_call *call) | 636 | static void perf_sysexit_disable(struct ftrace_event_call *call) |
640 | { | 637 | { |
641 | int num; | 638 | int num; |
642 | 639 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 03003cd7dd9..4ff9ca4f359 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -252,7 +252,7 @@ static int create_trace_uprobe(int argc, char **argv) | |||
252 | if (ret) | 252 | if (ret) |
253 | goto fail_address_parse; | 253 | goto fail_address_parse; |
254 | 254 | ||
255 | ret = strict_strtoul(arg, 0, &offset); | 255 | ret = kstrtoul(arg, 0, &offset); |
256 | if (ret) | 256 | if (ret) |
257 | goto fail_address_parse; | 257 | goto fail_address_parse; |
258 | 258 | ||