diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 68 | ||||
-rw-r--r-- | kernel/trace/Makefile | 4 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 5 | ||||
-rw-r--r-- | kernel/trace/kmemtrace.c | 529 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 40 | ||||
-rw-r--r-- | kernel/trace/trace.c | 127 | ||||
-rw-r--r-- | kernel/trace/trace.h | 90 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 185 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_entries.h | 94 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 27 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 299 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 27 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 383 | ||||
-rw-r--r-- | kernel/trace/trace_ksym.c | 508 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 69 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 87 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 329 |
25 files changed, 572 insertions, 2344 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8b1797c4545b..c7683fd8a03a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -194,15 +194,6 @@ config PREEMPT_TRACER | |||
194 | enabled. This option and the irqs-off timing option can be | 194 | enabled. This option and the irqs-off timing option can be |
195 | used together or separately.) | 195 | used together or separately.) |
196 | 196 | ||
197 | config SYSPROF_TRACER | ||
198 | bool "Sysprof Tracer" | ||
199 | depends on X86 | ||
200 | select GENERIC_TRACER | ||
201 | select CONTEXT_SWITCH_TRACER | ||
202 | help | ||
203 | This tracer provides the trace needed by the 'Sysprof' userspace | ||
204 | tool. | ||
205 | |||
206 | config SCHED_TRACER | 197 | config SCHED_TRACER |
207 | bool "Scheduling Latency Tracer" | 198 | bool "Scheduling Latency Tracer" |
208 | select GENERIC_TRACER | 199 | select GENERIC_TRACER |
@@ -229,23 +220,6 @@ config FTRACE_SYSCALLS | |||
229 | help | 220 | help |
230 | Basic tracer to catch the syscall entry and exit events. | 221 | Basic tracer to catch the syscall entry and exit events. |
231 | 222 | ||
232 | config BOOT_TRACER | ||
233 | bool "Trace boot initcalls" | ||
234 | select GENERIC_TRACER | ||
235 | select CONTEXT_SWITCH_TRACER | ||
236 | help | ||
237 | This tracer helps developers to optimize boot times: it records | ||
238 | the timings of the initcalls and traces key events and the identity | ||
239 | of tasks that can cause boot delays, such as context-switches. | ||
240 | |||
241 | Its aim is to be parsed by the scripts/bootgraph.pl tool to | ||
242 | produce pretty graphics about boot inefficiencies, giving a visual | ||
243 | representation of the delays during initcalls - but the raw | ||
244 | /debug/tracing/trace text output is readable too. | ||
245 | |||
246 | You must pass in initcall_debug and ftrace=initcall to the kernel | ||
247 | command line to enable this on bootup. | ||
248 | |||
249 | config TRACE_BRANCH_PROFILING | 223 | config TRACE_BRANCH_PROFILING |
250 | bool | 224 | bool |
251 | select GENERIC_TRACER | 225 | select GENERIC_TRACER |
@@ -325,28 +299,6 @@ config BRANCH_TRACER | |||
325 | 299 | ||
326 | Say N if unsure. | 300 | Say N if unsure. |
327 | 301 | ||
328 | config KSYM_TRACER | ||
329 | bool "Trace read and write access on kernel memory locations" | ||
330 | depends on HAVE_HW_BREAKPOINT | ||
331 | select TRACING | ||
332 | help | ||
333 | This tracer helps find read and write operations on any given kernel | ||
334 | symbol i.e. /proc/kallsyms. | ||
335 | |||
336 | config PROFILE_KSYM_TRACER | ||
337 | bool "Profile all kernel memory accesses on 'watched' variables" | ||
338 | depends on KSYM_TRACER | ||
339 | help | ||
340 | This tracer profiles kernel accesses on variables watched through the | ||
341 | ksym tracer ftrace plugin. Depending upon the hardware, all read | ||
342 | and write operations on kernel variables can be monitored for | ||
343 | accesses. | ||
344 | |||
345 | The results will be displayed in: | ||
346 | /debugfs/tracing/profile_ksym | ||
347 | |||
348 | Say N if unsure. | ||
349 | |||
350 | config STACK_TRACER | 302 | config STACK_TRACER |
351 | bool "Trace max stack" | 303 | bool "Trace max stack" |
352 | depends on HAVE_FUNCTION_TRACER | 304 | depends on HAVE_FUNCTION_TRACER |
@@ -371,26 +323,6 @@ config STACK_TRACER | |||
371 | 323 | ||
372 | Say N if unsure. | 324 | Say N if unsure. |
373 | 325 | ||
374 | config KMEMTRACE | ||
375 | bool "Trace SLAB allocations" | ||
376 | select GENERIC_TRACER | ||
377 | help | ||
378 | kmemtrace provides tracing for slab allocator functions, such as | ||
379 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected | ||
380 | data is then fed to the userspace application in order to analyse | ||
381 | allocation hotspots, internal fragmentation and so on, making it | ||
382 | possible to see how well an allocator performs, as well as debug | ||
383 | and profile kernel code. | ||
384 | |||
385 | This requires an userspace application to use. See | ||
386 | Documentation/trace/kmemtrace.txt for more information. | ||
387 | |||
388 | Saying Y will make the kernel somewhat larger and slower. However, | ||
389 | if you disable kmemtrace at run-time or boot-time, the performance | ||
390 | impact is minimal (depending on the arch the kernel is built for). | ||
391 | |||
392 | If unsure, say N. | ||
393 | |||
394 | config WORKQUEUE_TRACER | 326 | config WORKQUEUE_TRACER |
395 | bool "Trace workqueues" | 327 | bool "Trace workqueues" |
396 | select GENERIC_TRACER | 328 | select GENERIC_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 4215530b490b..53f338190b26 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -30,7 +30,6 @@ obj-$(CONFIG_TRACING) += trace_output.o | |||
30 | obj-$(CONFIG_TRACING) += trace_stat.o | 30 | obj-$(CONFIG_TRACING) += trace_stat.o |
31 | obj-$(CONFIG_TRACING) += trace_printk.o | 31 | obj-$(CONFIG_TRACING) += trace_printk.o |
32 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 32 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
33 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | ||
34 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | 33 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
35 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | 34 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o |
36 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | 35 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o |
@@ -38,10 +37,8 @@ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | |||
38 | obj-$(CONFIG_NOP_TRACER) += trace_nop.o | 37 | obj-$(CONFIG_NOP_TRACER) += trace_nop.o |
39 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o | 38 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o |
40 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | 39 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o |
41 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | ||
42 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | 40 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o |
43 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 41 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
44 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | ||
45 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o | 42 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o |
46 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | 43 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o |
47 | ifeq ($(CONFIG_BLOCK),y) | 44 | ifeq ($(CONFIG_BLOCK),y) |
@@ -55,7 +52,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o | |||
55 | endif | 52 | endif |
56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
57 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
58 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o | ||
59 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o | 55 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o |
60 | ifeq ($(CONFIG_TRACING),y) | 56 | ifeq ($(CONFIG_TRACING),y) |
61 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o | 57 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6d2cb14f9449..0d88ce9b9fb8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1883,7 +1883,6 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1883 | struct hlist_head *hhd; | 1883 | struct hlist_head *hhd; |
1884 | struct hlist_node *n; | 1884 | struct hlist_node *n; |
1885 | unsigned long key; | 1885 | unsigned long key; |
1886 | int resched; | ||
1887 | 1886 | ||
1888 | key = hash_long(ip, FTRACE_HASH_BITS); | 1887 | key = hash_long(ip, FTRACE_HASH_BITS); |
1889 | 1888 | ||
@@ -1897,12 +1896,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1897 | * period. This syncs the hash iteration and freeing of items | 1896 | * period. This syncs the hash iteration and freeing of items |
1898 | * on the hash. rcu_read_lock is too dangerous here. | 1897 | * on the hash. rcu_read_lock is too dangerous here. |
1899 | */ | 1898 | */ |
1900 | resched = ftrace_preempt_disable(); | 1899 | preempt_disable_notrace(); |
1901 | hlist_for_each_entry_rcu(entry, n, hhd, node) { | 1900 | hlist_for_each_entry_rcu(entry, n, hhd, node) { |
1902 | if (entry->ip == ip) | 1901 | if (entry->ip == ip) |
1903 | entry->ops->func(ip, parent_ip, &entry->data); | 1902 | entry->ops->func(ip, parent_ip, &entry->data); |
1904 | } | 1903 | } |
1905 | ftrace_preempt_enable(resched); | 1904 | preempt_enable_notrace(); |
1906 | } | 1905 | } |
1907 | 1906 | ||
1908 | static struct ftrace_ops trace_probe_ops __read_mostly = | 1907 | static struct ftrace_ops trace_probe_ops __read_mostly = |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c deleted file mode 100644 index bbfc1bb1660b..000000000000 --- a/kernel/trace/kmemtrace.c +++ /dev/null | |||
@@ -1,529 +0,0 @@ | |||
1 | /* | ||
2 | * Memory allocator tracing | ||
3 | * | ||
4 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
5 | * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi> | ||
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/tracepoint.h> | ||
10 | #include <linux/seq_file.h> | ||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/dcache.h> | ||
13 | #include <linux/fs.h> | ||
14 | |||
15 | #include <linux/kmemtrace.h> | ||
16 | |||
17 | #include "trace_output.h" | ||
18 | #include "trace.h" | ||
19 | |||
20 | /* Select an alternative, minimalistic output than the original one */ | ||
21 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | ||
22 | |||
23 | static struct tracer_opt kmem_opts[] = { | ||
24 | /* Default disable the minimalistic output */ | ||
25 | { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, | ||
26 | { } | ||
27 | }; | ||
28 | |||
29 | static struct tracer_flags kmem_tracer_flags = { | ||
30 | .val = 0, | ||
31 | .opts = kmem_opts | ||
32 | }; | ||
33 | |||
34 | static struct trace_array *kmemtrace_array; | ||
35 | |||
36 | /* Trace allocations */ | ||
37 | static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | ||
38 | unsigned long call_site, | ||
39 | const void *ptr, | ||
40 | size_t bytes_req, | ||
41 | size_t bytes_alloc, | ||
42 | gfp_t gfp_flags, | ||
43 | int node) | ||
44 | { | ||
45 | struct ftrace_event_call *call = &event_kmem_alloc; | ||
46 | struct trace_array *tr = kmemtrace_array; | ||
47 | struct kmemtrace_alloc_entry *entry; | ||
48 | struct ring_buffer_event *event; | ||
49 | |||
50 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | ||
51 | if (!event) | ||
52 | return; | ||
53 | |||
54 | entry = ring_buffer_event_data(event); | ||
55 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
56 | |||
57 | entry->ent.type = TRACE_KMEM_ALLOC; | ||
58 | entry->type_id = type_id; | ||
59 | entry->call_site = call_site; | ||
60 | entry->ptr = ptr; | ||
61 | entry->bytes_req = bytes_req; | ||
62 | entry->bytes_alloc = bytes_alloc; | ||
63 | entry->gfp_flags = gfp_flags; | ||
64 | entry->node = node; | ||
65 | |||
66 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
67 | ring_buffer_unlock_commit(tr->buffer, event); | ||
68 | |||
69 | trace_wake_up(); | ||
70 | } | ||
71 | |||
72 | static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | ||
73 | unsigned long call_site, | ||
74 | const void *ptr) | ||
75 | { | ||
76 | struct ftrace_event_call *call = &event_kmem_free; | ||
77 | struct trace_array *tr = kmemtrace_array; | ||
78 | struct kmemtrace_free_entry *entry; | ||
79 | struct ring_buffer_event *event; | ||
80 | |||
81 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | ||
82 | if (!event) | ||
83 | return; | ||
84 | entry = ring_buffer_event_data(event); | ||
85 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
86 | |||
87 | entry->ent.type = TRACE_KMEM_FREE; | ||
88 | entry->type_id = type_id; | ||
89 | entry->call_site = call_site; | ||
90 | entry->ptr = ptr; | ||
91 | |||
92 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
93 | ring_buffer_unlock_commit(tr->buffer, event); | ||
94 | |||
95 | trace_wake_up(); | ||
96 | } | ||
97 | |||
98 | static void kmemtrace_kmalloc(void *ignore, | ||
99 | unsigned long call_site, | ||
100 | const void *ptr, | ||
101 | size_t bytes_req, | ||
102 | size_t bytes_alloc, | ||
103 | gfp_t gfp_flags) | ||
104 | { | ||
105 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | ||
106 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
107 | } | ||
108 | |||
109 | static void kmemtrace_kmem_cache_alloc(void *ignore, | ||
110 | unsigned long call_site, | ||
111 | const void *ptr, | ||
112 | size_t bytes_req, | ||
113 | size_t bytes_alloc, | ||
114 | gfp_t gfp_flags) | ||
115 | { | ||
116 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | ||
117 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
118 | } | ||
119 | |||
120 | static void kmemtrace_kmalloc_node(void *ignore, | ||
121 | unsigned long call_site, | ||
122 | const void *ptr, | ||
123 | size_t bytes_req, | ||
124 | size_t bytes_alloc, | ||
125 | gfp_t gfp_flags, | ||
126 | int node) | ||
127 | { | ||
128 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | ||
129 | bytes_req, bytes_alloc, gfp_flags, node); | ||
130 | } | ||
131 | |||
132 | static void kmemtrace_kmem_cache_alloc_node(void *ignore, | ||
133 | unsigned long call_site, | ||
134 | const void *ptr, | ||
135 | size_t bytes_req, | ||
136 | size_t bytes_alloc, | ||
137 | gfp_t gfp_flags, | ||
138 | int node) | ||
139 | { | ||
140 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | ||
141 | bytes_req, bytes_alloc, gfp_flags, node); | ||
142 | } | ||
143 | |||
144 | static void | ||
145 | kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr) | ||
146 | { | ||
147 | kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); | ||
148 | } | ||
149 | |||
150 | static void kmemtrace_kmem_cache_free(void *ignore, | ||
151 | unsigned long call_site, const void *ptr) | ||
152 | { | ||
153 | kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); | ||
154 | } | ||
155 | |||
156 | static int kmemtrace_start_probes(void) | ||
157 | { | ||
158 | int err; | ||
159 | |||
160 | err = register_trace_kmalloc(kmemtrace_kmalloc, NULL); | ||
161 | if (err) | ||
162 | return err; | ||
163 | err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); | ||
164 | if (err) | ||
165 | return err; | ||
166 | err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); | ||
167 | if (err) | ||
168 | return err; | ||
169 | err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); | ||
170 | if (err) | ||
171 | return err; | ||
172 | err = register_trace_kfree(kmemtrace_kfree, NULL); | ||
173 | if (err) | ||
174 | return err; | ||
175 | err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); | ||
176 | |||
177 | return err; | ||
178 | } | ||
179 | |||
180 | static void kmemtrace_stop_probes(void) | ||
181 | { | ||
182 | unregister_trace_kmalloc(kmemtrace_kmalloc, NULL); | ||
183 | unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); | ||
184 | unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); | ||
185 | unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); | ||
186 | unregister_trace_kfree(kmemtrace_kfree, NULL); | ||
187 | unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); | ||
188 | } | ||
189 | |||
190 | static int kmem_trace_init(struct trace_array *tr) | ||
191 | { | ||
192 | kmemtrace_array = tr; | ||
193 | |||
194 | tracing_reset_online_cpus(tr); | ||
195 | |||
196 | kmemtrace_start_probes(); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void kmem_trace_reset(struct trace_array *tr) | ||
202 | { | ||
203 | kmemtrace_stop_probes(); | ||
204 | } | ||
205 | |||
206 | static void kmemtrace_headers(struct seq_file *s) | ||
207 | { | ||
208 | /* Don't need headers for the original kmemtrace output */ | ||
209 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
210 | return; | ||
211 | |||
212 | seq_printf(s, "#\n"); | ||
213 | seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " | ||
214 | " POINTER NODE CALLER\n"); | ||
215 | seq_printf(s, "# FREE | | | | " | ||
216 | " | | | |\n"); | ||
217 | seq_printf(s, "# |\n\n"); | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * The following functions give the original output from kmemtrace, | ||
222 | * plus the origin CPU, since reordering occurs in-kernel now. | ||
223 | */ | ||
224 | |||
225 | #define KMEMTRACE_USER_ALLOC 0 | ||
226 | #define KMEMTRACE_USER_FREE 1 | ||
227 | |||
228 | struct kmemtrace_user_event { | ||
229 | u8 event_id; | ||
230 | u8 type_id; | ||
231 | u16 event_size; | ||
232 | u32 cpu; | ||
233 | u64 timestamp; | ||
234 | unsigned long call_site; | ||
235 | unsigned long ptr; | ||
236 | }; | ||
237 | |||
238 | struct kmemtrace_user_event_alloc { | ||
239 | size_t bytes_req; | ||
240 | size_t bytes_alloc; | ||
241 | unsigned gfp_flags; | ||
242 | int node; | ||
243 | }; | ||
244 | |||
245 | static enum print_line_t | ||
246 | kmemtrace_print_alloc(struct trace_iterator *iter, int flags, | ||
247 | struct trace_event *event) | ||
248 | { | ||
249 | struct trace_seq *s = &iter->seq; | ||
250 | struct kmemtrace_alloc_entry *entry; | ||
251 | int ret; | ||
252 | |||
253 | trace_assign_type(entry, iter->ent); | ||
254 | |||
255 | ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu " | ||
256 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | ||
257 | entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr, | ||
258 | (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc, | ||
259 | (unsigned long)entry->gfp_flags, entry->node); | ||
260 | |||
261 | if (!ret) | ||
262 | return TRACE_TYPE_PARTIAL_LINE; | ||
263 | return TRACE_TYPE_HANDLED; | ||
264 | } | ||
265 | |||
266 | static enum print_line_t | ||
267 | kmemtrace_print_free(struct trace_iterator *iter, int flags, | ||
268 | struct trace_event *event) | ||
269 | { | ||
270 | struct trace_seq *s = &iter->seq; | ||
271 | struct kmemtrace_free_entry *entry; | ||
272 | int ret; | ||
273 | |||
274 | trace_assign_type(entry, iter->ent); | ||
275 | |||
276 | ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n", | ||
277 | entry->type_id, (void *)entry->call_site, | ||
278 | (unsigned long)entry->ptr); | ||
279 | |||
280 | if (!ret) | ||
281 | return TRACE_TYPE_PARTIAL_LINE; | ||
282 | return TRACE_TYPE_HANDLED; | ||
283 | } | ||
284 | |||
285 | static enum print_line_t | ||
286 | kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags, | ||
287 | struct trace_event *event) | ||
288 | { | ||
289 | struct trace_seq *s = &iter->seq; | ||
290 | struct kmemtrace_alloc_entry *entry; | ||
291 | struct kmemtrace_user_event *ev; | ||
292 | struct kmemtrace_user_event_alloc *ev_alloc; | ||
293 | |||
294 | trace_assign_type(entry, iter->ent); | ||
295 | |||
296 | ev = trace_seq_reserve(s, sizeof(*ev)); | ||
297 | if (!ev) | ||
298 | return TRACE_TYPE_PARTIAL_LINE; | ||
299 | |||
300 | ev->event_id = KMEMTRACE_USER_ALLOC; | ||
301 | ev->type_id = entry->type_id; | ||
302 | ev->event_size = sizeof(*ev) + sizeof(*ev_alloc); | ||
303 | ev->cpu = iter->cpu; | ||
304 | ev->timestamp = iter->ts; | ||
305 | ev->call_site = entry->call_site; | ||
306 | ev->ptr = (unsigned long)entry->ptr; | ||
307 | |||
308 | ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc)); | ||
309 | if (!ev_alloc) | ||
310 | return TRACE_TYPE_PARTIAL_LINE; | ||
311 | |||
312 | ev_alloc->bytes_req = entry->bytes_req; | ||
313 | ev_alloc->bytes_alloc = entry->bytes_alloc; | ||
314 | ev_alloc->gfp_flags = entry->gfp_flags; | ||
315 | ev_alloc->node = entry->node; | ||
316 | |||
317 | return TRACE_TYPE_HANDLED; | ||
318 | } | ||
319 | |||
320 | static enum print_line_t | ||
321 | kmemtrace_print_free_user(struct trace_iterator *iter, int flags, | ||
322 | struct trace_event *event) | ||
323 | { | ||
324 | struct trace_seq *s = &iter->seq; | ||
325 | struct kmemtrace_free_entry *entry; | ||
326 | struct kmemtrace_user_event *ev; | ||
327 | |||
328 | trace_assign_type(entry, iter->ent); | ||
329 | |||
330 | ev = trace_seq_reserve(s, sizeof(*ev)); | ||
331 | if (!ev) | ||
332 | return TRACE_TYPE_PARTIAL_LINE; | ||
333 | |||
334 | ev->event_id = KMEMTRACE_USER_FREE; | ||
335 | ev->type_id = entry->type_id; | ||
336 | ev->event_size = sizeof(*ev); | ||
337 | ev->cpu = iter->cpu; | ||
338 | ev->timestamp = iter->ts; | ||
339 | ev->call_site = entry->call_site; | ||
340 | ev->ptr = (unsigned long)entry->ptr; | ||
341 | |||
342 | return TRACE_TYPE_HANDLED; | ||
343 | } | ||
344 | |||
345 | /* The two other following provide a more minimalistic output */ | ||
346 | static enum print_line_t | ||
347 | kmemtrace_print_alloc_compress(struct trace_iterator *iter) | ||
348 | { | ||
349 | struct kmemtrace_alloc_entry *entry; | ||
350 | struct trace_seq *s = &iter->seq; | ||
351 | int ret; | ||
352 | |||
353 | trace_assign_type(entry, iter->ent); | ||
354 | |||
355 | /* Alloc entry */ | ||
356 | ret = trace_seq_printf(s, " + "); | ||
357 | if (!ret) | ||
358 | return TRACE_TYPE_PARTIAL_LINE; | ||
359 | |||
360 | /* Type */ | ||
361 | switch (entry->type_id) { | ||
362 | case KMEMTRACE_TYPE_KMALLOC: | ||
363 | ret = trace_seq_printf(s, "K "); | ||
364 | break; | ||
365 | case KMEMTRACE_TYPE_CACHE: | ||
366 | ret = trace_seq_printf(s, "C "); | ||
367 | break; | ||
368 | case KMEMTRACE_TYPE_PAGES: | ||
369 | ret = trace_seq_printf(s, "P "); | ||
370 | break; | ||
371 | default: | ||
372 | ret = trace_seq_printf(s, "? "); | ||
373 | } | ||
374 | |||
375 | if (!ret) | ||
376 | return TRACE_TYPE_PARTIAL_LINE; | ||
377 | |||
378 | /* Requested */ | ||
379 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_req); | ||
380 | if (!ret) | ||
381 | return TRACE_TYPE_PARTIAL_LINE; | ||
382 | |||
383 | /* Allocated */ | ||
384 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc); | ||
385 | if (!ret) | ||
386 | return TRACE_TYPE_PARTIAL_LINE; | ||
387 | |||
388 | /* Flags | ||
389 | * TODO: would be better to see the name of the GFP flag names | ||
390 | */ | ||
391 | ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); | ||
392 | if (!ret) | ||
393 | return TRACE_TYPE_PARTIAL_LINE; | ||
394 | |||
395 | /* Pointer to allocated */ | ||
396 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
397 | if (!ret) | ||
398 | return TRACE_TYPE_PARTIAL_LINE; | ||
399 | |||
400 | /* Node and call site*/ | ||
401 | ret = trace_seq_printf(s, "%4d %pf\n", entry->node, | ||
402 | (void *)entry->call_site); | ||
403 | if (!ret) | ||
404 | return TRACE_TYPE_PARTIAL_LINE; | ||
405 | |||
406 | return TRACE_TYPE_HANDLED; | ||
407 | } | ||
408 | |||
409 | static enum print_line_t | ||
410 | kmemtrace_print_free_compress(struct trace_iterator *iter) | ||
411 | { | ||
412 | struct kmemtrace_free_entry *entry; | ||
413 | struct trace_seq *s = &iter->seq; | ||
414 | int ret; | ||
415 | |||
416 | trace_assign_type(entry, iter->ent); | ||
417 | |||
418 | /* Free entry */ | ||
419 | ret = trace_seq_printf(s, " - "); | ||
420 | if (!ret) | ||
421 | return TRACE_TYPE_PARTIAL_LINE; | ||
422 | |||
423 | /* Type */ | ||
424 | switch (entry->type_id) { | ||
425 | case KMEMTRACE_TYPE_KMALLOC: | ||
426 | ret = trace_seq_printf(s, "K "); | ||
427 | break; | ||
428 | case KMEMTRACE_TYPE_CACHE: | ||
429 | ret = trace_seq_printf(s, "C "); | ||
430 | break; | ||
431 | case KMEMTRACE_TYPE_PAGES: | ||
432 | ret = trace_seq_printf(s, "P "); | ||
433 | break; | ||
434 | default: | ||
435 | ret = trace_seq_printf(s, "? "); | ||
436 | } | ||
437 | |||
438 | if (!ret) | ||
439 | return TRACE_TYPE_PARTIAL_LINE; | ||
440 | |||
441 | /* Skip requested/allocated/flags */ | ||
442 | ret = trace_seq_printf(s, " "); | ||
443 | if (!ret) | ||
444 | return TRACE_TYPE_PARTIAL_LINE; | ||
445 | |||
446 | /* Pointer to allocated */ | ||
447 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
448 | if (!ret) | ||
449 | return TRACE_TYPE_PARTIAL_LINE; | ||
450 | |||
451 | /* Skip node and print call site*/ | ||
452 | ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site); | ||
453 | if (!ret) | ||
454 | return TRACE_TYPE_PARTIAL_LINE; | ||
455 | |||
456 | return TRACE_TYPE_HANDLED; | ||
457 | } | ||
458 | |||
459 | static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | ||
460 | { | ||
461 | struct trace_entry *entry = iter->ent; | ||
462 | |||
463 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
464 | return TRACE_TYPE_UNHANDLED; | ||
465 | |||
466 | switch (entry->type) { | ||
467 | case TRACE_KMEM_ALLOC: | ||
468 | return kmemtrace_print_alloc_compress(iter); | ||
469 | case TRACE_KMEM_FREE: | ||
470 | return kmemtrace_print_free_compress(iter); | ||
471 | default: | ||
472 | return TRACE_TYPE_UNHANDLED; | ||
473 | } | ||
474 | } | ||
475 | |||
476 | static struct trace_event_functions kmem_trace_alloc_funcs = { | ||
477 | .trace = kmemtrace_print_alloc, | ||
478 | .binary = kmemtrace_print_alloc_user, | ||
479 | }; | ||
480 | |||
481 | static struct trace_event kmem_trace_alloc = { | ||
482 | .type = TRACE_KMEM_ALLOC, | ||
483 | .funcs = &kmem_trace_alloc_funcs, | ||
484 | }; | ||
485 | |||
486 | static struct trace_event_functions kmem_trace_free_funcs = { | ||
487 | .trace = kmemtrace_print_free, | ||
488 | .binary = kmemtrace_print_free_user, | ||
489 | }; | ||
490 | |||
491 | static struct trace_event kmem_trace_free = { | ||
492 | .type = TRACE_KMEM_FREE, | ||
493 | .funcs = &kmem_trace_free_funcs, | ||
494 | }; | ||
495 | |||
496 | static struct tracer kmem_tracer __read_mostly = { | ||
497 | .name = "kmemtrace", | ||
498 | .init = kmem_trace_init, | ||
499 | .reset = kmem_trace_reset, | ||
500 | .print_line = kmemtrace_print_line, | ||
501 | .print_header = kmemtrace_headers, | ||
502 | .flags = &kmem_tracer_flags | ||
503 | }; | ||
504 | |||
505 | void kmemtrace_init(void) | ||
506 | { | ||
507 | /* earliest opportunity to start kmem tracing */ | ||
508 | } | ||
509 | |||
510 | static int __init init_kmem_tracer(void) | ||
511 | { | ||
512 | if (!register_ftrace_event(&kmem_trace_alloc)) { | ||
513 | pr_warning("Warning: could not register kmem events\n"); | ||
514 | return 1; | ||
515 | } | ||
516 | |||
517 | if (!register_ftrace_event(&kmem_trace_free)) { | ||
518 | pr_warning("Warning: could not register kmem events\n"); | ||
519 | return 1; | ||
520 | } | ||
521 | |||
522 | if (register_tracer(&kmem_tracer) != 0) { | ||
523 | pr_warning("Warning: could not register the kmem tracer\n"); | ||
524 | return 1; | ||
525 | } | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | device_initcall(init_kmem_tracer); | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1da7b6ea8b85..3632ce87674f 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -443,6 +443,7 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
443 | */ | 443 | */ |
444 | struct ring_buffer_per_cpu { | 444 | struct ring_buffer_per_cpu { |
445 | int cpu; | 445 | int cpu; |
446 | atomic_t record_disabled; | ||
446 | struct ring_buffer *buffer; | 447 | struct ring_buffer *buffer; |
447 | spinlock_t reader_lock; /* serialize readers */ | 448 | spinlock_t reader_lock; /* serialize readers */ |
448 | arch_spinlock_t lock; | 449 | arch_spinlock_t lock; |
@@ -462,7 +463,6 @@ struct ring_buffer_per_cpu { | |||
462 | unsigned long read; | 463 | unsigned long read; |
463 | u64 write_stamp; | 464 | u64 write_stamp; |
464 | u64 read_stamp; | 465 | u64 read_stamp; |
465 | atomic_t record_disabled; | ||
466 | }; | 466 | }; |
467 | 467 | ||
468 | struct ring_buffer { | 468 | struct ring_buffer { |
@@ -2242,8 +2242,6 @@ static void trace_recursive_unlock(void) | |||
2242 | 2242 | ||
2243 | #endif | 2243 | #endif |
2244 | 2244 | ||
2245 | static DEFINE_PER_CPU(int, rb_need_resched); | ||
2246 | |||
2247 | /** | 2245 | /** |
2248 | * ring_buffer_lock_reserve - reserve a part of the buffer | 2246 | * ring_buffer_lock_reserve - reserve a part of the buffer |
2249 | * @buffer: the ring buffer to reserve from | 2247 | * @buffer: the ring buffer to reserve from |
@@ -2264,13 +2262,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2264 | { | 2262 | { |
2265 | struct ring_buffer_per_cpu *cpu_buffer; | 2263 | struct ring_buffer_per_cpu *cpu_buffer; |
2266 | struct ring_buffer_event *event; | 2264 | struct ring_buffer_event *event; |
2267 | int cpu, resched; | 2265 | int cpu; |
2268 | 2266 | ||
2269 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2267 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2270 | return NULL; | 2268 | return NULL; |
2271 | 2269 | ||
2272 | /* If we are tracing schedule, we don't want to recurse */ | 2270 | /* If we are tracing schedule, we don't want to recurse */ |
2273 | resched = ftrace_preempt_disable(); | 2271 | preempt_disable_notrace(); |
2274 | 2272 | ||
2275 | if (atomic_read(&buffer->record_disabled)) | 2273 | if (atomic_read(&buffer->record_disabled)) |
2276 | goto out_nocheck; | 2274 | goto out_nocheck; |
@@ -2295,21 +2293,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2295 | if (!event) | 2293 | if (!event) |
2296 | goto out; | 2294 | goto out; |
2297 | 2295 | ||
2298 | /* | ||
2299 | * Need to store resched state on this cpu. | ||
2300 | * Only the first needs to. | ||
2301 | */ | ||
2302 | |||
2303 | if (preempt_count() == 1) | ||
2304 | per_cpu(rb_need_resched, cpu) = resched; | ||
2305 | |||
2306 | return event; | 2296 | return event; |
2307 | 2297 | ||
2308 | out: | 2298 | out: |
2309 | trace_recursive_unlock(); | 2299 | trace_recursive_unlock(); |
2310 | 2300 | ||
2311 | out_nocheck: | 2301 | out_nocheck: |
2312 | ftrace_preempt_enable(resched); | 2302 | preempt_enable_notrace(); |
2313 | return NULL; | 2303 | return NULL; |
2314 | } | 2304 | } |
2315 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | 2305 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); |
@@ -2355,13 +2345,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
2355 | 2345 | ||
2356 | trace_recursive_unlock(); | 2346 | trace_recursive_unlock(); |
2357 | 2347 | ||
2358 | /* | 2348 | preempt_enable_notrace(); |
2359 | * Only the last preempt count needs to restore preemption. | ||
2360 | */ | ||
2361 | if (preempt_count() == 1) | ||
2362 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
2363 | else | ||
2364 | preempt_enable_no_resched_notrace(); | ||
2365 | 2349 | ||
2366 | return 0; | 2350 | return 0; |
2367 | } | 2351 | } |
@@ -2469,13 +2453,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
2469 | 2453 | ||
2470 | trace_recursive_unlock(); | 2454 | trace_recursive_unlock(); |
2471 | 2455 | ||
2472 | /* | 2456 | preempt_enable_notrace(); |
2473 | * Only the last preempt count needs to restore preemption. | ||
2474 | */ | ||
2475 | if (preempt_count() == 1) | ||
2476 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
2477 | else | ||
2478 | preempt_enable_no_resched_notrace(); | ||
2479 | 2457 | ||
2480 | } | 2458 | } |
2481 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); | 2459 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); |
@@ -2501,12 +2479,12 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2501 | struct ring_buffer_event *event; | 2479 | struct ring_buffer_event *event; |
2502 | void *body; | 2480 | void *body; |
2503 | int ret = -EBUSY; | 2481 | int ret = -EBUSY; |
2504 | int cpu, resched; | 2482 | int cpu; |
2505 | 2483 | ||
2506 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2484 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2507 | return -EBUSY; | 2485 | return -EBUSY; |
2508 | 2486 | ||
2509 | resched = ftrace_preempt_disable(); | 2487 | preempt_disable_notrace(); |
2510 | 2488 | ||
2511 | if (atomic_read(&buffer->record_disabled)) | 2489 | if (atomic_read(&buffer->record_disabled)) |
2512 | goto out; | 2490 | goto out; |
@@ -2536,7 +2514,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2536 | 2514 | ||
2537 | ret = 0; | 2515 | ret = 0; |
2538 | out: | 2516 | out: |
2539 | ftrace_preempt_enable(resched); | 2517 | preempt_enable_notrace(); |
2540 | 2518 | ||
2541 | return ret; | 2519 | return ret; |
2542 | } | 2520 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d6736b93dc2a..ed1032d6f81d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -341,7 +341,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
341 | /* trace_flags holds trace_options default values */ | 341 | /* trace_flags holds trace_options default values */ |
342 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 342 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
343 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 343 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
344 | TRACE_ITER_GRAPH_TIME; | 344 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD; |
345 | 345 | ||
346 | static int trace_stop_count; | 346 | static int trace_stop_count; |
347 | static DEFINE_SPINLOCK(tracing_start_lock); | 347 | static DEFINE_SPINLOCK(tracing_start_lock); |
@@ -425,6 +425,7 @@ static const char *trace_options[] = { | |||
425 | "latency-format", | 425 | "latency-format", |
426 | "sleep-time", | 426 | "sleep-time", |
427 | "graph-time", | 427 | "graph-time", |
428 | "record-cmd", | ||
428 | NULL | 429 | NULL |
429 | }; | 430 | }; |
430 | 431 | ||
@@ -656,6 +657,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
656 | return; | 657 | return; |
657 | 658 | ||
658 | WARN_ON_ONCE(!irqs_disabled()); | 659 | WARN_ON_ONCE(!irqs_disabled()); |
660 | if (!current_trace->use_max_tr) { | ||
661 | WARN_ON_ONCE(1); | ||
662 | return; | ||
663 | } | ||
659 | arch_spin_lock(&ftrace_max_lock); | 664 | arch_spin_lock(&ftrace_max_lock); |
660 | 665 | ||
661 | tr->buffer = max_tr.buffer; | 666 | tr->buffer = max_tr.buffer; |
@@ -682,6 +687,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
682 | return; | 687 | return; |
683 | 688 | ||
684 | WARN_ON_ONCE(!irqs_disabled()); | 689 | WARN_ON_ONCE(!irqs_disabled()); |
690 | if (!current_trace->use_max_tr) { | ||
691 | WARN_ON_ONCE(1); | ||
692 | return; | ||
693 | } | ||
694 | |||
685 | arch_spin_lock(&ftrace_max_lock); | 695 | arch_spin_lock(&ftrace_max_lock); |
686 | 696 | ||
687 | ftrace_disable_cpu(); | 697 | ftrace_disable_cpu(); |
@@ -726,7 +736,7 @@ __acquires(kernel_lock) | |||
726 | return -1; | 736 | return -1; |
727 | } | 737 | } |
728 | 738 | ||
729 | if (strlen(type->name) > MAX_TRACER_SIZE) { | 739 | if (strlen(type->name) >= MAX_TRACER_SIZE) { |
730 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | 740 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); |
731 | return -1; | 741 | return -1; |
732 | } | 742 | } |
@@ -1328,61 +1338,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
1328 | 1338 | ||
1329 | #endif /* CONFIG_STACKTRACE */ | 1339 | #endif /* CONFIG_STACKTRACE */ |
1330 | 1340 | ||
1331 | static void | ||
1332 | ftrace_trace_special(void *__tr, | ||
1333 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
1334 | int pc) | ||
1335 | { | ||
1336 | struct ftrace_event_call *call = &event_special; | ||
1337 | struct ring_buffer_event *event; | ||
1338 | struct trace_array *tr = __tr; | ||
1339 | struct ring_buffer *buffer = tr->buffer; | ||
1340 | struct special_entry *entry; | ||
1341 | |||
1342 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, | ||
1343 | sizeof(*entry), 0, pc); | ||
1344 | if (!event) | ||
1345 | return; | ||
1346 | entry = ring_buffer_event_data(event); | ||
1347 | entry->arg1 = arg1; | ||
1348 | entry->arg2 = arg2; | ||
1349 | entry->arg3 = arg3; | ||
1350 | |||
1351 | if (!filter_check_discard(call, entry, buffer, event)) | ||
1352 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
1353 | } | ||
1354 | |||
1355 | void | ||
1356 | __trace_special(void *__tr, void *__data, | ||
1357 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1358 | { | ||
1359 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); | ||
1360 | } | ||
1361 | |||
1362 | void | ||
1363 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1364 | { | ||
1365 | struct trace_array *tr = &global_trace; | ||
1366 | struct trace_array_cpu *data; | ||
1367 | unsigned long flags; | ||
1368 | int cpu; | ||
1369 | int pc; | ||
1370 | |||
1371 | if (tracing_disabled) | ||
1372 | return; | ||
1373 | |||
1374 | pc = preempt_count(); | ||
1375 | local_irq_save(flags); | ||
1376 | cpu = raw_smp_processor_id(); | ||
1377 | data = tr->data[cpu]; | ||
1378 | |||
1379 | if (likely(atomic_inc_return(&data->disabled) == 1)) | ||
1380 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); | ||
1381 | |||
1382 | atomic_dec(&data->disabled); | ||
1383 | local_irq_restore(flags); | ||
1384 | } | ||
1385 | |||
1386 | /** | 1341 | /** |
1387 | * trace_vbprintk - write binary msg to tracing buffer | 1342 | * trace_vbprintk - write binary msg to tracing buffer |
1388 | * | 1343 | * |
@@ -1401,7 +1356,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1401 | struct bprint_entry *entry; | 1356 | struct bprint_entry *entry; |
1402 | unsigned long flags; | 1357 | unsigned long flags; |
1403 | int disable; | 1358 | int disable; |
1404 | int resched; | ||
1405 | int cpu, len = 0, size, pc; | 1359 | int cpu, len = 0, size, pc; |
1406 | 1360 | ||
1407 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1361 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
@@ -1411,7 +1365,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1411 | pause_graph_tracing(); | 1365 | pause_graph_tracing(); |
1412 | 1366 | ||
1413 | pc = preempt_count(); | 1367 | pc = preempt_count(); |
1414 | resched = ftrace_preempt_disable(); | 1368 | preempt_disable_notrace(); |
1415 | cpu = raw_smp_processor_id(); | 1369 | cpu = raw_smp_processor_id(); |
1416 | data = tr->data[cpu]; | 1370 | data = tr->data[cpu]; |
1417 | 1371 | ||
@@ -1449,7 +1403,7 @@ out_unlock: | |||
1449 | 1403 | ||
1450 | out: | 1404 | out: |
1451 | atomic_dec_return(&data->disabled); | 1405 | atomic_dec_return(&data->disabled); |
1452 | ftrace_preempt_enable(resched); | 1406 | preempt_enable_notrace(); |
1453 | unpause_graph_tracing(); | 1407 | unpause_graph_tracing(); |
1454 | 1408 | ||
1455 | return len; | 1409 | return len; |
@@ -2386,6 +2340,7 @@ static const struct file_operations show_traces_fops = { | |||
2386 | .open = show_traces_open, | 2340 | .open = show_traces_open, |
2387 | .read = seq_read, | 2341 | .read = seq_read, |
2388 | .release = seq_release, | 2342 | .release = seq_release, |
2343 | .llseek = seq_lseek, | ||
2389 | }; | 2344 | }; |
2390 | 2345 | ||
2391 | /* | 2346 | /* |
@@ -2479,6 +2434,7 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2479 | .open = tracing_open_generic, | 2434 | .open = tracing_open_generic, |
2480 | .read = tracing_cpumask_read, | 2435 | .read = tracing_cpumask_read, |
2481 | .write = tracing_cpumask_write, | 2436 | .write = tracing_cpumask_write, |
2437 | .llseek = generic_file_llseek, | ||
2482 | }; | 2438 | }; |
2483 | 2439 | ||
2484 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 2440 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
@@ -2554,6 +2510,9 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2554 | trace_flags |= mask; | 2510 | trace_flags |= mask; |
2555 | else | 2511 | else |
2556 | trace_flags &= ~mask; | 2512 | trace_flags &= ~mask; |
2513 | |||
2514 | if (mask == TRACE_ITER_RECORD_CMD) | ||
2515 | trace_event_enable_cmd_record(enabled); | ||
2557 | } | 2516 | } |
2558 | 2517 | ||
2559 | static ssize_t | 2518 | static ssize_t |
@@ -2645,6 +2604,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf, | |||
2645 | static const struct file_operations tracing_readme_fops = { | 2604 | static const struct file_operations tracing_readme_fops = { |
2646 | .open = tracing_open_generic, | 2605 | .open = tracing_open_generic, |
2647 | .read = tracing_readme_read, | 2606 | .read = tracing_readme_read, |
2607 | .llseek = generic_file_llseek, | ||
2648 | }; | 2608 | }; |
2649 | 2609 | ||
2650 | static ssize_t | 2610 | static ssize_t |
@@ -2695,6 +2655,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | |||
2695 | static const struct file_operations tracing_saved_cmdlines_fops = { | 2655 | static const struct file_operations tracing_saved_cmdlines_fops = { |
2696 | .open = tracing_open_generic, | 2656 | .open = tracing_open_generic, |
2697 | .read = tracing_saved_cmdlines_read, | 2657 | .read = tracing_saved_cmdlines_read, |
2658 | .llseek = generic_file_llseek, | ||
2698 | }; | 2659 | }; |
2699 | 2660 | ||
2700 | static ssize_t | 2661 | static ssize_t |
@@ -2790,6 +2751,9 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2790 | if (ret < 0) | 2751 | if (ret < 0) |
2791 | return ret; | 2752 | return ret; |
2792 | 2753 | ||
2754 | if (!current_trace->use_max_tr) | ||
2755 | goto out; | ||
2756 | |||
2793 | ret = ring_buffer_resize(max_tr.buffer, size); | 2757 | ret = ring_buffer_resize(max_tr.buffer, size); |
2794 | if (ret < 0) { | 2758 | if (ret < 0) { |
2795 | int r; | 2759 | int r; |
@@ -2817,11 +2781,14 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2817 | return ret; | 2781 | return ret; |
2818 | } | 2782 | } |
2819 | 2783 | ||
2784 | max_tr.entries = size; | ||
2785 | out: | ||
2820 | global_trace.entries = size; | 2786 | global_trace.entries = size; |
2821 | 2787 | ||
2822 | return ret; | 2788 | return ret; |
2823 | } | 2789 | } |
2824 | 2790 | ||
2791 | |||
2825 | /** | 2792 | /** |
2826 | * tracing_update_buffers - used by tracing facility to expand ring buffers | 2793 | * tracing_update_buffers - used by tracing facility to expand ring buffers |
2827 | * | 2794 | * |
@@ -2882,12 +2849,26 @@ static int tracing_set_tracer(const char *buf) | |||
2882 | trace_branch_disable(); | 2849 | trace_branch_disable(); |
2883 | if (current_trace && current_trace->reset) | 2850 | if (current_trace && current_trace->reset) |
2884 | current_trace->reset(tr); | 2851 | current_trace->reset(tr); |
2885 | 2852 | if (current_trace && current_trace->use_max_tr) { | |
2853 | /* | ||
2854 | * We don't free the ring buffer. instead, resize it because | ||
2855 | * The max_tr ring buffer has some state (e.g. ring->clock) and | ||
2856 | * we want preserve it. | ||
2857 | */ | ||
2858 | ring_buffer_resize(max_tr.buffer, 1); | ||
2859 | max_tr.entries = 1; | ||
2860 | } | ||
2886 | destroy_trace_option_files(topts); | 2861 | destroy_trace_option_files(topts); |
2887 | 2862 | ||
2888 | current_trace = t; | 2863 | current_trace = t; |
2889 | 2864 | ||
2890 | topts = create_trace_option_files(current_trace); | 2865 | topts = create_trace_option_files(current_trace); |
2866 | if (current_trace->use_max_tr) { | ||
2867 | ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); | ||
2868 | if (ret < 0) | ||
2869 | goto out; | ||
2870 | max_tr.entries = global_trace.entries; | ||
2871 | } | ||
2891 | 2872 | ||
2892 | if (t->init) { | 2873 | if (t->init) { |
2893 | ret = tracer_init(t, tr); | 2874 | ret = tracer_init(t, tr); |
@@ -3024,6 +3005,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3024 | if (iter->trace->pipe_open) | 3005 | if (iter->trace->pipe_open) |
3025 | iter->trace->pipe_open(iter); | 3006 | iter->trace->pipe_open(iter); |
3026 | 3007 | ||
3008 | nonseekable_open(inode, filp); | ||
3027 | out: | 3009 | out: |
3028 | mutex_unlock(&trace_types_lock); | 3010 | mutex_unlock(&trace_types_lock); |
3029 | return ret; | 3011 | return ret; |
@@ -3469,7 +3451,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3469 | } | 3451 | } |
3470 | 3452 | ||
3471 | tracing_start(); | 3453 | tracing_start(); |
3472 | max_tr.entries = global_trace.entries; | ||
3473 | mutex_unlock(&trace_types_lock); | 3454 | mutex_unlock(&trace_types_lock); |
3474 | 3455 | ||
3475 | return cnt; | 3456 | return cnt; |
@@ -3582,18 +3563,21 @@ static const struct file_operations tracing_max_lat_fops = { | |||
3582 | .open = tracing_open_generic, | 3563 | .open = tracing_open_generic, |
3583 | .read = tracing_max_lat_read, | 3564 | .read = tracing_max_lat_read, |
3584 | .write = tracing_max_lat_write, | 3565 | .write = tracing_max_lat_write, |
3566 | .llseek = generic_file_llseek, | ||
3585 | }; | 3567 | }; |
3586 | 3568 | ||
3587 | static const struct file_operations tracing_ctrl_fops = { | 3569 | static const struct file_operations tracing_ctrl_fops = { |
3588 | .open = tracing_open_generic, | 3570 | .open = tracing_open_generic, |
3589 | .read = tracing_ctrl_read, | 3571 | .read = tracing_ctrl_read, |
3590 | .write = tracing_ctrl_write, | 3572 | .write = tracing_ctrl_write, |
3573 | .llseek = generic_file_llseek, | ||
3591 | }; | 3574 | }; |
3592 | 3575 | ||
3593 | static const struct file_operations set_tracer_fops = { | 3576 | static const struct file_operations set_tracer_fops = { |
3594 | .open = tracing_open_generic, | 3577 | .open = tracing_open_generic, |
3595 | .read = tracing_set_trace_read, | 3578 | .read = tracing_set_trace_read, |
3596 | .write = tracing_set_trace_write, | 3579 | .write = tracing_set_trace_write, |
3580 | .llseek = generic_file_llseek, | ||
3597 | }; | 3581 | }; |
3598 | 3582 | ||
3599 | static const struct file_operations tracing_pipe_fops = { | 3583 | static const struct file_operations tracing_pipe_fops = { |
@@ -3602,17 +3586,20 @@ static const struct file_operations tracing_pipe_fops = { | |||
3602 | .read = tracing_read_pipe, | 3586 | .read = tracing_read_pipe, |
3603 | .splice_read = tracing_splice_read_pipe, | 3587 | .splice_read = tracing_splice_read_pipe, |
3604 | .release = tracing_release_pipe, | 3588 | .release = tracing_release_pipe, |
3589 | .llseek = no_llseek, | ||
3605 | }; | 3590 | }; |
3606 | 3591 | ||
3607 | static const struct file_operations tracing_entries_fops = { | 3592 | static const struct file_operations tracing_entries_fops = { |
3608 | .open = tracing_open_generic, | 3593 | .open = tracing_open_generic, |
3609 | .read = tracing_entries_read, | 3594 | .read = tracing_entries_read, |
3610 | .write = tracing_entries_write, | 3595 | .write = tracing_entries_write, |
3596 | .llseek = generic_file_llseek, | ||
3611 | }; | 3597 | }; |
3612 | 3598 | ||
3613 | static const struct file_operations tracing_mark_fops = { | 3599 | static const struct file_operations tracing_mark_fops = { |
3614 | .open = tracing_open_generic, | 3600 | .open = tracing_open_generic, |
3615 | .write = tracing_mark_write, | 3601 | .write = tracing_mark_write, |
3602 | .llseek = generic_file_llseek, | ||
3616 | }; | 3603 | }; |
3617 | 3604 | ||
3618 | static const struct file_operations trace_clock_fops = { | 3605 | static const struct file_operations trace_clock_fops = { |
@@ -3918,6 +3905,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3918 | static const struct file_operations tracing_stats_fops = { | 3905 | static const struct file_operations tracing_stats_fops = { |
3919 | .open = tracing_open_generic, | 3906 | .open = tracing_open_generic, |
3920 | .read = tracing_stats_read, | 3907 | .read = tracing_stats_read, |
3908 | .llseek = generic_file_llseek, | ||
3921 | }; | 3909 | }; |
3922 | 3910 | ||
3923 | #ifdef CONFIG_DYNAMIC_FTRACE | 3911 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -3954,6 +3942,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf, | |||
3954 | static const struct file_operations tracing_dyn_info_fops = { | 3942 | static const struct file_operations tracing_dyn_info_fops = { |
3955 | .open = tracing_open_generic, | 3943 | .open = tracing_open_generic, |
3956 | .read = tracing_read_dyn_info, | 3944 | .read = tracing_read_dyn_info, |
3945 | .llseek = generic_file_llseek, | ||
3957 | }; | 3946 | }; |
3958 | #endif | 3947 | #endif |
3959 | 3948 | ||
@@ -4107,6 +4096,7 @@ static const struct file_operations trace_options_fops = { | |||
4107 | .open = tracing_open_generic, | 4096 | .open = tracing_open_generic, |
4108 | .read = trace_options_read, | 4097 | .read = trace_options_read, |
4109 | .write = trace_options_write, | 4098 | .write = trace_options_write, |
4099 | .llseek = generic_file_llseek, | ||
4110 | }; | 4100 | }; |
4111 | 4101 | ||
4112 | static ssize_t | 4102 | static ssize_t |
@@ -4158,6 +4148,7 @@ static const struct file_operations trace_options_core_fops = { | |||
4158 | .open = tracing_open_generic, | 4148 | .open = tracing_open_generic, |
4159 | .read = trace_options_core_read, | 4149 | .read = trace_options_core_read, |
4160 | .write = trace_options_core_write, | 4150 | .write = trace_options_core_write, |
4151 | .llseek = generic_file_llseek, | ||
4161 | }; | 4152 | }; |
4162 | 4153 | ||
4163 | struct dentry *trace_create_file(const char *name, | 4154 | struct dentry *trace_create_file(const char *name, |
@@ -4347,9 +4338,6 @@ static __init int tracer_init_debugfs(void) | |||
4347 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4338 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
4348 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4339 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
4349 | #endif | 4340 | #endif |
4350 | #ifdef CONFIG_SYSPROF_TRACER | ||
4351 | init_tracer_sysprof_debugfs(d_tracer); | ||
4352 | #endif | ||
4353 | 4341 | ||
4354 | create_trace_options_dir(); | 4342 | create_trace_options_dir(); |
4355 | 4343 | ||
@@ -4576,16 +4564,14 @@ __init static int tracer_alloc_buffers(void) | |||
4576 | 4564 | ||
4577 | 4565 | ||
4578 | #ifdef CONFIG_TRACER_MAX_TRACE | 4566 | #ifdef CONFIG_TRACER_MAX_TRACE |
4579 | max_tr.buffer = ring_buffer_alloc(ring_buf_size, | 4567 | max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS); |
4580 | TRACE_BUFFER_FLAGS); | ||
4581 | if (!max_tr.buffer) { | 4568 | if (!max_tr.buffer) { |
4582 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 4569 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
4583 | WARN_ON(1); | 4570 | WARN_ON(1); |
4584 | ring_buffer_free(global_trace.buffer); | 4571 | ring_buffer_free(global_trace.buffer); |
4585 | goto out_free_cpumask; | 4572 | goto out_free_cpumask; |
4586 | } | 4573 | } |
4587 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 4574 | max_tr.entries = 1; |
4588 | WARN_ON(max_tr.entries != global_trace.entries); | ||
4589 | #endif | 4575 | #endif |
4590 | 4576 | ||
4591 | /* Allocate the first page for all buffers */ | 4577 | /* Allocate the first page for all buffers */ |
@@ -4598,9 +4584,6 @@ __init static int tracer_alloc_buffers(void) | |||
4598 | 4584 | ||
4599 | register_tracer(&nop_trace); | 4585 | register_tracer(&nop_trace); |
4600 | current_trace = &nop_trace; | 4586 | current_trace = &nop_trace; |
4601 | #ifdef CONFIG_BOOT_TRACER | ||
4602 | register_tracer(&boot_tracer); | ||
4603 | #endif | ||
4604 | /* All seems OK, enable tracing */ | 4587 | /* All seems OK, enable tracing */ |
4605 | tracing_disabled = 0; | 4588 | tracing_disabled = 0; |
4606 | 4589 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 0605fc00c176..d39b3c5454a5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -9,10 +9,7 @@ | |||
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/tracepoint.h> | 10 | #include <linux/tracepoint.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <trace/boot.h> | ||
13 | #include <linux/kmemtrace.h> | ||
14 | #include <linux/hw_breakpoint.h> | 12 | #include <linux/hw_breakpoint.h> |
15 | |||
16 | #include <linux/trace_seq.h> | 13 | #include <linux/trace_seq.h> |
17 | #include <linux/ftrace_event.h> | 14 | #include <linux/ftrace_event.h> |
18 | 15 | ||
@@ -25,30 +22,17 @@ enum trace_type { | |||
25 | TRACE_STACK, | 22 | TRACE_STACK, |
26 | TRACE_PRINT, | 23 | TRACE_PRINT, |
27 | TRACE_BPRINT, | 24 | TRACE_BPRINT, |
28 | TRACE_SPECIAL, | ||
29 | TRACE_MMIO_RW, | 25 | TRACE_MMIO_RW, |
30 | TRACE_MMIO_MAP, | 26 | TRACE_MMIO_MAP, |
31 | TRACE_BRANCH, | 27 | TRACE_BRANCH, |
32 | TRACE_BOOT_CALL, | ||
33 | TRACE_BOOT_RET, | ||
34 | TRACE_GRAPH_RET, | 28 | TRACE_GRAPH_RET, |
35 | TRACE_GRAPH_ENT, | 29 | TRACE_GRAPH_ENT, |
36 | TRACE_USER_STACK, | 30 | TRACE_USER_STACK, |
37 | TRACE_KMEM_ALLOC, | ||
38 | TRACE_KMEM_FREE, | ||
39 | TRACE_BLK, | 31 | TRACE_BLK, |
40 | TRACE_KSYM, | ||
41 | 32 | ||
42 | __TRACE_LAST_TYPE, | 33 | __TRACE_LAST_TYPE, |
43 | }; | 34 | }; |
44 | 35 | ||
45 | enum kmemtrace_type_id { | ||
46 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | ||
47 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | ||
48 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | ||
49 | }; | ||
50 | |||
51 | extern struct tracer boot_tracer; | ||
52 | 36 | ||
53 | #undef __field | 37 | #undef __field |
54 | #define __field(type, item) type item; | 38 | #define __field(type, item) type item; |
@@ -204,23 +188,15 @@ extern void __ftrace_bad_type(void); | |||
204 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 188 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
205 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 189 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
206 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ | 190 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
207 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | ||
208 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 191 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
209 | TRACE_MMIO_RW); \ | 192 | TRACE_MMIO_RW); \ |
210 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 193 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
211 | TRACE_MMIO_MAP); \ | 194 | TRACE_MMIO_MAP); \ |
212 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ | ||
213 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | ||
214 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | 195 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
215 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ | 196 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
216 | TRACE_GRAPH_ENT); \ | 197 | TRACE_GRAPH_ENT); \ |
217 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | 198 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
218 | TRACE_GRAPH_RET); \ | 199 | TRACE_GRAPH_RET); \ |
219 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | ||
220 | TRACE_KMEM_ALLOC); \ | ||
221 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | ||
222 | TRACE_KMEM_FREE); \ | ||
223 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ | ||
224 | __ftrace_bad_type(); \ | 200 | __ftrace_bad_type(); \ |
225 | } while (0) | 201 | } while (0) |
226 | 202 | ||
@@ -298,6 +274,7 @@ struct tracer { | |||
298 | struct tracer *next; | 274 | struct tracer *next; |
299 | int print_max; | 275 | int print_max; |
300 | struct tracer_flags *flags; | 276 | struct tracer_flags *flags; |
277 | int use_max_tr; | ||
301 | }; | 278 | }; |
302 | 279 | ||
303 | 280 | ||
@@ -318,7 +295,6 @@ struct dentry *trace_create_file(const char *name, | |||
318 | const struct file_operations *fops); | 295 | const struct file_operations *fops); |
319 | 296 | ||
320 | struct dentry *tracing_init_dentry(void); | 297 | struct dentry *tracing_init_dentry(void); |
321 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | ||
322 | 298 | ||
323 | struct ring_buffer_event; | 299 | struct ring_buffer_event; |
324 | 300 | ||
@@ -363,11 +339,6 @@ void tracing_sched_wakeup_trace(struct trace_array *tr, | |||
363 | struct task_struct *wakee, | 339 | struct task_struct *wakee, |
364 | struct task_struct *cur, | 340 | struct task_struct *cur, |
365 | unsigned long flags, int pc); | 341 | unsigned long flags, int pc); |
366 | void trace_special(struct trace_array *tr, | ||
367 | struct trace_array_cpu *data, | ||
368 | unsigned long arg1, | ||
369 | unsigned long arg2, | ||
370 | unsigned long arg3, int pc); | ||
371 | void trace_function(struct trace_array *tr, | 342 | void trace_function(struct trace_array *tr, |
372 | unsigned long ip, | 343 | unsigned long ip, |
373 | unsigned long parent_ip, | 344 | unsigned long parent_ip, |
@@ -398,8 +369,6 @@ extern cpumask_var_t __read_mostly tracing_buffer_mask; | |||
398 | #define for_each_tracing_cpu(cpu) \ | 369 | #define for_each_tracing_cpu(cpu) \ |
399 | for_each_cpu(cpu, tracing_buffer_mask) | 370 | for_each_cpu(cpu, tracing_buffer_mask) |
400 | 371 | ||
401 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | ||
402 | |||
403 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 372 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
404 | 373 | ||
405 | extern unsigned long tracing_thresh; | 374 | extern unsigned long tracing_thresh; |
@@ -469,12 +438,8 @@ extern int trace_selftest_startup_nop(struct tracer *trace, | |||
469 | struct trace_array *tr); | 438 | struct trace_array *tr); |
470 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, | 439 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
471 | struct trace_array *tr); | 440 | struct trace_array *tr); |
472 | extern int trace_selftest_startup_sysprof(struct tracer *trace, | ||
473 | struct trace_array *tr); | ||
474 | extern int trace_selftest_startup_branch(struct tracer *trace, | 441 | extern int trace_selftest_startup_branch(struct tracer *trace, |
475 | struct trace_array *tr); | 442 | struct trace_array *tr); |
476 | extern int trace_selftest_startup_ksym(struct tracer *trace, | ||
477 | struct trace_array *tr); | ||
478 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 443 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
479 | 444 | ||
480 | extern void *head_page(struct trace_array_cpu *data); | 445 | extern void *head_page(struct trace_array_cpu *data); |
@@ -636,6 +601,7 @@ enum trace_iterator_flags { | |||
636 | TRACE_ITER_LATENCY_FMT = 0x20000, | 601 | TRACE_ITER_LATENCY_FMT = 0x20000, |
637 | TRACE_ITER_SLEEP_TIME = 0x40000, | 602 | TRACE_ITER_SLEEP_TIME = 0x40000, |
638 | TRACE_ITER_GRAPH_TIME = 0x80000, | 603 | TRACE_ITER_GRAPH_TIME = 0x80000, |
604 | TRACE_ITER_RECORD_CMD = 0x100000, | ||
639 | }; | 605 | }; |
640 | 606 | ||
641 | /* | 607 | /* |
@@ -647,54 +613,6 @@ enum trace_iterator_flags { | |||
647 | 613 | ||
648 | extern struct tracer nop_trace; | 614 | extern struct tracer nop_trace; |
649 | 615 | ||
650 | /** | ||
651 | * ftrace_preempt_disable - disable preemption scheduler safe | ||
652 | * | ||
653 | * When tracing can happen inside the scheduler, there exists | ||
654 | * cases that the tracing might happen before the need_resched | ||
655 | * flag is checked. If this happens and the tracer calls | ||
656 | * preempt_enable (after a disable), a schedule might take place | ||
657 | * causing an infinite recursion. | ||
658 | * | ||
659 | * To prevent this, we read the need_resched flag before | ||
660 | * disabling preemption. When we want to enable preemption we | ||
661 | * check the flag, if it is set, then we call preempt_enable_no_resched. | ||
662 | * Otherwise, we call preempt_enable. | ||
663 | * | ||
664 | * The rational for doing the above is that if need_resched is set | ||
665 | * and we have yet to reschedule, we are either in an atomic location | ||
666 | * (where we do not need to check for scheduling) or we are inside | ||
667 | * the scheduler and do not want to resched. | ||
668 | */ | ||
669 | static inline int ftrace_preempt_disable(void) | ||
670 | { | ||
671 | int resched; | ||
672 | |||
673 | resched = need_resched(); | ||
674 | preempt_disable_notrace(); | ||
675 | |||
676 | return resched; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * ftrace_preempt_enable - enable preemption scheduler safe | ||
681 | * @resched: the return value from ftrace_preempt_disable | ||
682 | * | ||
683 | * This is a scheduler safe way to enable preemption and not miss | ||
684 | * any preemption checks. The disabled saved the state of preemption. | ||
685 | * If resched is set, then we are either inside an atomic or | ||
686 | * are inside the scheduler (we would have already scheduled | ||
687 | * otherwise). In this case, we do not want to call normal | ||
688 | * preempt_enable, but preempt_enable_no_resched instead. | ||
689 | */ | ||
690 | static inline void ftrace_preempt_enable(int resched) | ||
691 | { | ||
692 | if (resched) | ||
693 | preempt_enable_no_resched_notrace(); | ||
694 | else | ||
695 | preempt_enable_notrace(); | ||
696 | } | ||
697 | |||
698 | #ifdef CONFIG_BRANCH_TRACER | 616 | #ifdef CONFIG_BRANCH_TRACER |
699 | extern int enable_branch_tracing(struct trace_array *tr); | 617 | extern int enable_branch_tracing(struct trace_array *tr); |
700 | extern void disable_branch_tracing(void); | 618 | extern void disable_branch_tracing(void); |
@@ -785,6 +703,8 @@ struct filter_pred { | |||
785 | int pop_n; | 703 | int pop_n; |
786 | }; | 704 | }; |
787 | 705 | ||
706 | extern struct list_head ftrace_common_fields; | ||
707 | |||
788 | extern enum regex_type | 708 | extern enum regex_type |
789 | filter_parse_regex(char *buff, int len, char **search, int *not); | 709 | filter_parse_regex(char *buff, int len, char **search, int *not); |
790 | extern void print_event_filter(struct ftrace_event_call *call, | 710 | extern void print_event_filter(struct ftrace_event_call *call, |
@@ -814,6 +734,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, | |||
814 | return 0; | 734 | return 0; |
815 | } | 735 | } |
816 | 736 | ||
737 | extern void trace_event_enable_cmd_record(bool enable); | ||
738 | |||
817 | extern struct mutex event_mutex; | 739 | extern struct mutex event_mutex; |
818 | extern struct list_head ftrace_events; | 740 | extern struct list_head ftrace_events; |
819 | 741 | ||
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c deleted file mode 100644 index c21d5f3956ad..000000000000 --- a/kernel/trace/trace_boot.c +++ /dev/null | |||
@@ -1,185 +0,0 @@ | |||
1 | /* | ||
2 | * ring buffer based initcalls tracer | ||
3 | * | ||
4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/ftrace.h> | ||
11 | #include <linux/kallsyms.h> | ||
12 | #include <linux/time.h> | ||
13 | |||
14 | #include "trace.h" | ||
15 | #include "trace_output.h" | ||
16 | |||
17 | static struct trace_array *boot_trace; | ||
18 | static bool pre_initcalls_finished; | ||
19 | |||
20 | /* Tells the boot tracer that the pre_smp_initcalls are finished. | ||
21 | * So we are ready . | ||
22 | * It doesn't enable sched events tracing however. | ||
23 | * You have to call enable_boot_trace to do so. | ||
24 | */ | ||
25 | void start_boot_trace(void) | ||
26 | { | ||
27 | pre_initcalls_finished = true; | ||
28 | } | ||
29 | |||
30 | void enable_boot_trace(void) | ||
31 | { | ||
32 | if (boot_trace && pre_initcalls_finished) | ||
33 | tracing_start_sched_switch_record(); | ||
34 | } | ||
35 | |||
36 | void disable_boot_trace(void) | ||
37 | { | ||
38 | if (boot_trace && pre_initcalls_finished) | ||
39 | tracing_stop_sched_switch_record(); | ||
40 | } | ||
41 | |||
42 | static int boot_trace_init(struct trace_array *tr) | ||
43 | { | ||
44 | boot_trace = tr; | ||
45 | |||
46 | if (!tr) | ||
47 | return 0; | ||
48 | |||
49 | tracing_reset_online_cpus(tr); | ||
50 | |||
51 | tracing_sched_switch_assign_trace(tr); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static enum print_line_t | ||
56 | initcall_call_print_line(struct trace_iterator *iter) | ||
57 | { | ||
58 | struct trace_entry *entry = iter->ent; | ||
59 | struct trace_seq *s = &iter->seq; | ||
60 | struct trace_boot_call *field; | ||
61 | struct boot_trace_call *call; | ||
62 | u64 ts; | ||
63 | unsigned long nsec_rem; | ||
64 | int ret; | ||
65 | |||
66 | trace_assign_type(field, entry); | ||
67 | call = &field->boot_call; | ||
68 | ts = iter->ts; | ||
69 | nsec_rem = do_div(ts, NSEC_PER_SEC); | ||
70 | |||
71 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", | ||
72 | (unsigned long)ts, nsec_rem, call->func, call->caller); | ||
73 | |||
74 | if (!ret) | ||
75 | return TRACE_TYPE_PARTIAL_LINE; | ||
76 | else | ||
77 | return TRACE_TYPE_HANDLED; | ||
78 | } | ||
79 | |||
80 | static enum print_line_t | ||
81 | initcall_ret_print_line(struct trace_iterator *iter) | ||
82 | { | ||
83 | struct trace_entry *entry = iter->ent; | ||
84 | struct trace_seq *s = &iter->seq; | ||
85 | struct trace_boot_ret *field; | ||
86 | struct boot_trace_ret *init_ret; | ||
87 | u64 ts; | ||
88 | unsigned long nsec_rem; | ||
89 | int ret; | ||
90 | |||
91 | trace_assign_type(field, entry); | ||
92 | init_ret = &field->boot_ret; | ||
93 | ts = iter->ts; | ||
94 | nsec_rem = do_div(ts, NSEC_PER_SEC); | ||
95 | |||
96 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " | ||
97 | "returned %d after %llu msecs\n", | ||
98 | (unsigned long) ts, | ||
99 | nsec_rem, | ||
100 | init_ret->func, init_ret->result, init_ret->duration); | ||
101 | |||
102 | if (!ret) | ||
103 | return TRACE_TYPE_PARTIAL_LINE; | ||
104 | else | ||
105 | return TRACE_TYPE_HANDLED; | ||
106 | } | ||
107 | |||
108 | static enum print_line_t initcall_print_line(struct trace_iterator *iter) | ||
109 | { | ||
110 | struct trace_entry *entry = iter->ent; | ||
111 | |||
112 | switch (entry->type) { | ||
113 | case TRACE_BOOT_CALL: | ||
114 | return initcall_call_print_line(iter); | ||
115 | case TRACE_BOOT_RET: | ||
116 | return initcall_ret_print_line(iter); | ||
117 | default: | ||
118 | return TRACE_TYPE_UNHANDLED; | ||
119 | } | ||
120 | } | ||
121 | |||
122 | struct tracer boot_tracer __read_mostly = | ||
123 | { | ||
124 | .name = "initcall", | ||
125 | .init = boot_trace_init, | ||
126 | .reset = tracing_reset_online_cpus, | ||
127 | .print_line = initcall_print_line, | ||
128 | }; | ||
129 | |||
130 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | ||
131 | { | ||
132 | struct ftrace_event_call *call = &event_boot_call; | ||
133 | struct ring_buffer_event *event; | ||
134 | struct ring_buffer *buffer; | ||
135 | struct trace_boot_call *entry; | ||
136 | struct trace_array *tr = boot_trace; | ||
137 | |||
138 | if (!tr || !pre_initcalls_finished) | ||
139 | return; | ||
140 | |||
141 | /* Get its name now since this function could | ||
142 | * disappear because it is in the .init section. | ||
143 | */ | ||
144 | sprint_symbol(bt->func, (unsigned long)fn); | ||
145 | preempt_disable(); | ||
146 | |||
147 | buffer = tr->buffer; | ||
148 | event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL, | ||
149 | sizeof(*entry), 0, 0); | ||
150 | if (!event) | ||
151 | goto out; | ||
152 | entry = ring_buffer_event_data(event); | ||
153 | entry->boot_call = *bt; | ||
154 | if (!filter_check_discard(call, entry, buffer, event)) | ||
155 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
156 | out: | ||
157 | preempt_enable(); | ||
158 | } | ||
159 | |||
160 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | ||
161 | { | ||
162 | struct ftrace_event_call *call = &event_boot_ret; | ||
163 | struct ring_buffer_event *event; | ||
164 | struct ring_buffer *buffer; | ||
165 | struct trace_boot_ret *entry; | ||
166 | struct trace_array *tr = boot_trace; | ||
167 | |||
168 | if (!tr || !pre_initcalls_finished) | ||
169 | return; | ||
170 | |||
171 | sprint_symbol(bt->func, (unsigned long)fn); | ||
172 | preempt_disable(); | ||
173 | |||
174 | buffer = tr->buffer; | ||
175 | event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET, | ||
176 | sizeof(*entry), 0, 0); | ||
177 | if (!event) | ||
178 | goto out; | ||
179 | entry = ring_buffer_event_data(event); | ||
180 | entry->boot_ret = *bt; | ||
181 | if (!filter_check_discard(call, entry, buffer, event)) | ||
182 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
183 | out: | ||
184 | preempt_enable(); | ||
185 | } | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 9d589d8dcd1a..52fda6c04ac3 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -32,16 +32,15 @@ | |||
32 | u64 notrace trace_clock_local(void) | 32 | u64 notrace trace_clock_local(void) |
33 | { | 33 | { |
34 | u64 clock; | 34 | u64 clock; |
35 | int resched; | ||
36 | 35 | ||
37 | /* | 36 | /* |
38 | * sched_clock() is an architecture implemented, fast, scalable, | 37 | * sched_clock() is an architecture implemented, fast, scalable, |
39 | * lockless clock. It is not guaranteed to be coherent across | 38 | * lockless clock. It is not guaranteed to be coherent across |
40 | * CPUs, nor across CPU idle events. | 39 | * CPUs, nor across CPU idle events. |
41 | */ | 40 | */ |
42 | resched = ftrace_preempt_disable(); | 41 | preempt_disable_notrace(); |
43 | clock = sched_clock(); | 42 | clock = sched_clock(); |
44 | ftrace_preempt_enable(resched); | 43 | preempt_enable_notrace(); |
45 | 44 | ||
46 | return clock; | 45 | return clock; |
47 | } | 46 | } |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index dc008c1240da..e3dfecaf13e6 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -151,23 +151,6 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry, | |||
151 | ); | 151 | ); |
152 | 152 | ||
153 | /* | 153 | /* |
154 | * Special (free-form) trace entry: | ||
155 | */ | ||
156 | FTRACE_ENTRY(special, special_entry, | ||
157 | |||
158 | TRACE_SPECIAL, | ||
159 | |||
160 | F_STRUCT( | ||
161 | __field( unsigned long, arg1 ) | ||
162 | __field( unsigned long, arg2 ) | ||
163 | __field( unsigned long, arg3 ) | ||
164 | ), | ||
165 | |||
166 | F_printk("(%08lx) (%08lx) (%08lx)", | ||
167 | __entry->arg1, __entry->arg2, __entry->arg3) | ||
168 | ); | ||
169 | |||
170 | /* | ||
171 | * Stack-trace entry: | 154 | * Stack-trace entry: |
172 | */ | 155 | */ |
173 | 156 | ||
@@ -271,33 +254,6 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, | |||
271 | __entry->map_id, __entry->opcode) | 254 | __entry->map_id, __entry->opcode) |
272 | ); | 255 | ); |
273 | 256 | ||
274 | FTRACE_ENTRY(boot_call, trace_boot_call, | ||
275 | |||
276 | TRACE_BOOT_CALL, | ||
277 | |||
278 | F_STRUCT( | ||
279 | __field_struct( struct boot_trace_call, boot_call ) | ||
280 | __field_desc( pid_t, boot_call, caller ) | ||
281 | __array_desc( char, boot_call, func, KSYM_SYMBOL_LEN) | ||
282 | ), | ||
283 | |||
284 | F_printk("%d %s", __entry->caller, __entry->func) | ||
285 | ); | ||
286 | |||
287 | FTRACE_ENTRY(boot_ret, trace_boot_ret, | ||
288 | |||
289 | TRACE_BOOT_RET, | ||
290 | |||
291 | F_STRUCT( | ||
292 | __field_struct( struct boot_trace_ret, boot_ret ) | ||
293 | __array_desc( char, boot_ret, func, KSYM_SYMBOL_LEN) | ||
294 | __field_desc( int, boot_ret, result ) | ||
295 | __field_desc( unsigned long, boot_ret, duration ) | ||
296 | ), | ||
297 | |||
298 | F_printk("%s %d %lx", | ||
299 | __entry->func, __entry->result, __entry->duration) | ||
300 | ); | ||
301 | 257 | ||
302 | #define TRACE_FUNC_SIZE 30 | 258 | #define TRACE_FUNC_SIZE 30 |
303 | #define TRACE_FILE_SIZE 20 | 259 | #define TRACE_FILE_SIZE 20 |
@@ -318,53 +274,3 @@ FTRACE_ENTRY(branch, trace_branch, | |||
318 | __entry->func, __entry->file, __entry->correct) | 274 | __entry->func, __entry->file, __entry->correct) |
319 | ); | 275 | ); |
320 | 276 | ||
321 | FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, | ||
322 | |||
323 | TRACE_KMEM_ALLOC, | ||
324 | |||
325 | F_STRUCT( | ||
326 | __field( enum kmemtrace_type_id, type_id ) | ||
327 | __field( unsigned long, call_site ) | ||
328 | __field( const void *, ptr ) | ||
329 | __field( size_t, bytes_req ) | ||
330 | __field( size_t, bytes_alloc ) | ||
331 | __field( gfp_t, gfp_flags ) | ||
332 | __field( int, node ) | ||
333 | ), | ||
334 | |||
335 | F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi" | ||
336 | " flags:%x node:%d", | ||
337 | __entry->type_id, __entry->call_site, __entry->ptr, | ||
338 | __entry->bytes_req, __entry->bytes_alloc, | ||
339 | __entry->gfp_flags, __entry->node) | ||
340 | ); | ||
341 | |||
342 | FTRACE_ENTRY(kmem_free, kmemtrace_free_entry, | ||
343 | |||
344 | TRACE_KMEM_FREE, | ||
345 | |||
346 | F_STRUCT( | ||
347 | __field( enum kmemtrace_type_id, type_id ) | ||
348 | __field( unsigned long, call_site ) | ||
349 | __field( const void *, ptr ) | ||
350 | ), | ||
351 | |||
352 | F_printk("type:%u call_site:%lx ptr:%p", | ||
353 | __entry->type_id, __entry->call_site, __entry->ptr) | ||
354 | ); | ||
355 | |||
356 | FTRACE_ENTRY(ksym_trace, ksym_trace_entry, | ||
357 | |||
358 | TRACE_KSYM, | ||
359 | |||
360 | F_STRUCT( | ||
361 | __field( unsigned long, ip ) | ||
362 | __field( unsigned char, type ) | ||
363 | __array( char , cmd, TASK_COMM_LEN ) | ||
364 | __field( unsigned long, addr ) | ||
365 | ), | ||
366 | |||
367 | F_printk("ip: %pF type: %d ksym_name: %pS cmd: %s", | ||
368 | (void *)__entry->ip, (unsigned int)__entry->type, | ||
369 | (void *)__entry->addr, __entry->cmd) | ||
370 | ); | ||
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 8a2b73f7c068..000e6e85b445 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -9,8 +9,6 @@ | |||
9 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
10 | #include "trace.h" | 10 | #include "trace.h" |
11 | 11 | ||
12 | EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | ||
13 | |||
14 | static char *perf_trace_buf[4]; | 12 | static char *perf_trace_buf[4]; |
15 | 13 | ||
16 | /* | 14 | /* |
@@ -56,13 +54,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, | |||
56 | } | 54 | } |
57 | } | 55 | } |
58 | 56 | ||
59 | if (tp_event->class->reg) | 57 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); |
60 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); | ||
61 | else | ||
62 | ret = tracepoint_probe_register(tp_event->name, | ||
63 | tp_event->class->perf_probe, | ||
64 | tp_event); | ||
65 | |||
66 | if (ret) | 58 | if (ret) |
67 | goto fail; | 59 | goto fail; |
68 | 60 | ||
@@ -96,9 +88,7 @@ int perf_trace_init(struct perf_event *p_event) | |||
96 | mutex_lock(&event_mutex); | 88 | mutex_lock(&event_mutex); |
97 | list_for_each_entry(tp_event, &ftrace_events, list) { | 89 | list_for_each_entry(tp_event, &ftrace_events, list) { |
98 | if (tp_event->event.type == event_id && | 90 | if (tp_event->event.type == event_id && |
99 | tp_event->class && | 91 | tp_event->class && tp_event->class->reg && |
100 | (tp_event->class->perf_probe || | ||
101 | tp_event->class->reg) && | ||
102 | try_module_get(tp_event->mod)) { | 92 | try_module_get(tp_event->mod)) { |
103 | ret = perf_trace_event_init(tp_event, p_event); | 93 | ret = perf_trace_event_init(tp_event, p_event); |
104 | break; | 94 | break; |
@@ -138,18 +128,13 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
138 | if (--tp_event->perf_refcount > 0) | 128 | if (--tp_event->perf_refcount > 0) |
139 | goto out; | 129 | goto out; |
140 | 130 | ||
141 | if (tp_event->class->reg) | 131 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); |
142 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); | ||
143 | else | ||
144 | tracepoint_probe_unregister(tp_event->name, | ||
145 | tp_event->class->perf_probe, | ||
146 | tp_event); | ||
147 | 132 | ||
148 | /* | 133 | /* |
149 | * Ensure our callback won't be called anymore. See | 134 | * Ensure our callback won't be called anymore. The buffers |
150 | * tracepoint_probe_unregister() and __DO_TRACE(). | 135 | * will be freed after that. |
151 | */ | 136 | */ |
152 | synchronize_sched(); | 137 | tracepoint_synchronize_unregister(); |
153 | 138 | ||
154 | free_percpu(tp_event->perf_events); | 139 | free_percpu(tp_event->perf_events); |
155 | tp_event->perf_events = NULL; | 140 | tp_event->perf_events = NULL; |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53cffc0b0801..09b4fa6e4d3b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -28,6 +28,7 @@ | |||
28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
29 | 29 | ||
30 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
31 | LIST_HEAD(ftrace_common_fields); | ||
31 | 32 | ||
32 | struct list_head * | 33 | struct list_head * |
33 | trace_get_fields(struct ftrace_event_call *event_call) | 34 | trace_get_fields(struct ftrace_event_call *event_call) |
@@ -37,15 +38,11 @@ trace_get_fields(struct ftrace_event_call *event_call) | |||
37 | return event_call->class->get_fields(event_call); | 38 | return event_call->class->get_fields(event_call); |
38 | } | 39 | } |
39 | 40 | ||
40 | int trace_define_field(struct ftrace_event_call *call, const char *type, | 41 | static int __trace_define_field(struct list_head *head, const char *type, |
41 | const char *name, int offset, int size, int is_signed, | 42 | const char *name, int offset, int size, |
42 | int filter_type) | 43 | int is_signed, int filter_type) |
43 | { | 44 | { |
44 | struct ftrace_event_field *field; | 45 | struct ftrace_event_field *field; |
45 | struct list_head *head; | ||
46 | |||
47 | if (WARN_ON(!call->class)) | ||
48 | return 0; | ||
49 | 46 | ||
50 | field = kzalloc(sizeof(*field), GFP_KERNEL); | 47 | field = kzalloc(sizeof(*field), GFP_KERNEL); |
51 | if (!field) | 48 | if (!field) |
@@ -68,7 +65,6 @@ int trace_define_field(struct ftrace_event_call *call, const char *type, | |||
68 | field->size = size; | 65 | field->size = size; |
69 | field->is_signed = is_signed; | 66 | field->is_signed = is_signed; |
70 | 67 | ||
71 | head = trace_get_fields(call); | ||
72 | list_add(&field->link, head); | 68 | list_add(&field->link, head); |
73 | 69 | ||
74 | return 0; | 70 | return 0; |
@@ -80,17 +76,32 @@ err: | |||
80 | 76 | ||
81 | return -ENOMEM; | 77 | return -ENOMEM; |
82 | } | 78 | } |
79 | |||
80 | int trace_define_field(struct ftrace_event_call *call, const char *type, | ||
81 | const char *name, int offset, int size, int is_signed, | ||
82 | int filter_type) | ||
83 | { | ||
84 | struct list_head *head; | ||
85 | |||
86 | if (WARN_ON(!call->class)) | ||
87 | return 0; | ||
88 | |||
89 | head = trace_get_fields(call); | ||
90 | return __trace_define_field(head, type, name, offset, size, | ||
91 | is_signed, filter_type); | ||
92 | } | ||
83 | EXPORT_SYMBOL_GPL(trace_define_field); | 93 | EXPORT_SYMBOL_GPL(trace_define_field); |
84 | 94 | ||
85 | #define __common_field(type, item) \ | 95 | #define __common_field(type, item) \ |
86 | ret = trace_define_field(call, #type, "common_" #item, \ | 96 | ret = __trace_define_field(&ftrace_common_fields, #type, \ |
87 | offsetof(typeof(ent), item), \ | 97 | "common_" #item, \ |
88 | sizeof(ent.item), \ | 98 | offsetof(typeof(ent), item), \ |
89 | is_signed_type(type), FILTER_OTHER); \ | 99 | sizeof(ent.item), \ |
100 | is_signed_type(type), FILTER_OTHER); \ | ||
90 | if (ret) \ | 101 | if (ret) \ |
91 | return ret; | 102 | return ret; |
92 | 103 | ||
93 | static int trace_define_common_fields(struct ftrace_event_call *call) | 104 | static int trace_define_common_fields(void) |
94 | { | 105 | { |
95 | int ret; | 106 | int ret; |
96 | struct trace_entry ent; | 107 | struct trace_entry ent; |
@@ -130,6 +141,55 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
130 | } | 141 | } |
131 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 142 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
132 | 143 | ||
144 | int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) | ||
145 | { | ||
146 | switch (type) { | ||
147 | case TRACE_REG_REGISTER: | ||
148 | return tracepoint_probe_register(call->name, | ||
149 | call->class->probe, | ||
150 | call); | ||
151 | case TRACE_REG_UNREGISTER: | ||
152 | tracepoint_probe_unregister(call->name, | ||
153 | call->class->probe, | ||
154 | call); | ||
155 | return 0; | ||
156 | |||
157 | #ifdef CONFIG_PERF_EVENTS | ||
158 | case TRACE_REG_PERF_REGISTER: | ||
159 | return tracepoint_probe_register(call->name, | ||
160 | call->class->perf_probe, | ||
161 | call); | ||
162 | case TRACE_REG_PERF_UNREGISTER: | ||
163 | tracepoint_probe_unregister(call->name, | ||
164 | call->class->perf_probe, | ||
165 | call); | ||
166 | return 0; | ||
167 | #endif | ||
168 | } | ||
169 | return 0; | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(ftrace_event_reg); | ||
172 | |||
173 | void trace_event_enable_cmd_record(bool enable) | ||
174 | { | ||
175 | struct ftrace_event_call *call; | ||
176 | |||
177 | mutex_lock(&event_mutex); | ||
178 | list_for_each_entry(call, &ftrace_events, list) { | ||
179 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) | ||
180 | continue; | ||
181 | |||
182 | if (enable) { | ||
183 | tracing_start_cmdline_record(); | ||
184 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; | ||
185 | } else { | ||
186 | tracing_stop_cmdline_record(); | ||
187 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; | ||
188 | } | ||
189 | } | ||
190 | mutex_unlock(&event_mutex); | ||
191 | } | ||
192 | |||
133 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, | 193 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, |
134 | int enable) | 194 | int enable) |
135 | { | 195 | { |
@@ -139,24 +199,20 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
139 | case 0: | 199 | case 0: |
140 | if (call->flags & TRACE_EVENT_FL_ENABLED) { | 200 | if (call->flags & TRACE_EVENT_FL_ENABLED) { |
141 | call->flags &= ~TRACE_EVENT_FL_ENABLED; | 201 | call->flags &= ~TRACE_EVENT_FL_ENABLED; |
142 | tracing_stop_cmdline_record(); | 202 | if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) { |
143 | if (call->class->reg) | 203 | tracing_stop_cmdline_record(); |
144 | call->class->reg(call, TRACE_REG_UNREGISTER); | 204 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; |
145 | else | 205 | } |
146 | tracepoint_probe_unregister(call->name, | 206 | call->class->reg(call, TRACE_REG_UNREGISTER); |
147 | call->class->probe, | ||
148 | call); | ||
149 | } | 207 | } |
150 | break; | 208 | break; |
151 | case 1: | 209 | case 1: |
152 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { | 210 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { |
153 | tracing_start_cmdline_record(); | 211 | if (trace_flags & TRACE_ITER_RECORD_CMD) { |
154 | if (call->class->reg) | 212 | tracing_start_cmdline_record(); |
155 | ret = call->class->reg(call, TRACE_REG_REGISTER); | 213 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; |
156 | else | 214 | } |
157 | ret = tracepoint_probe_register(call->name, | 215 | ret = call->class->reg(call, TRACE_REG_REGISTER); |
158 | call->class->probe, | ||
159 | call); | ||
160 | if (ret) { | 216 | if (ret) { |
161 | tracing_stop_cmdline_record(); | 217 | tracing_stop_cmdline_record(); |
162 | pr_info("event trace: Could not enable event " | 218 | pr_info("event trace: Could not enable event " |
@@ -194,8 +250,7 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, | |||
194 | mutex_lock(&event_mutex); | 250 | mutex_lock(&event_mutex); |
195 | list_for_each_entry(call, &ftrace_events, list) { | 251 | list_for_each_entry(call, &ftrace_events, list) { |
196 | 252 | ||
197 | if (!call->name || !call->class || | 253 | if (!call->name || !call->class || !call->class->reg) |
198 | (!call->class->probe && !call->class->reg)) | ||
199 | continue; | 254 | continue; |
200 | 255 | ||
201 | if (match && | 256 | if (match && |
@@ -321,7 +376,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
321 | * The ftrace subsystem is for showing formats only. | 376 | * The ftrace subsystem is for showing formats only. |
322 | * They can not be enabled or disabled via the event files. | 377 | * They can not be enabled or disabled via the event files. |
323 | */ | 378 | */ |
324 | if (call->class && (call->class->probe || call->class->reg)) | 379 | if (call->class && call->class->reg) |
325 | return call; | 380 | return call; |
326 | } | 381 | } |
327 | 382 | ||
@@ -474,8 +529,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
474 | 529 | ||
475 | mutex_lock(&event_mutex); | 530 | mutex_lock(&event_mutex); |
476 | list_for_each_entry(call, &ftrace_events, list) { | 531 | list_for_each_entry(call, &ftrace_events, list) { |
477 | if (!call->name || !call->class || | 532 | if (!call->name || !call->class || !call->class->reg) |
478 | (!call->class->probe && !call->class->reg)) | ||
479 | continue; | 533 | continue; |
480 | 534 | ||
481 | if (system && strcmp(call->class->system, system) != 0) | 535 | if (system && strcmp(call->class->system, system) != 0) |
@@ -544,32 +598,10 @@ out: | |||
544 | return ret; | 598 | return ret; |
545 | } | 599 | } |
546 | 600 | ||
547 | static ssize_t | 601 | static void print_event_fields(struct trace_seq *s, struct list_head *head) |
548 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
549 | loff_t *ppos) | ||
550 | { | 602 | { |
551 | struct ftrace_event_call *call = filp->private_data; | ||
552 | struct ftrace_event_field *field; | 603 | struct ftrace_event_field *field; |
553 | struct list_head *head; | ||
554 | struct trace_seq *s; | ||
555 | int common_field_count = 5; | ||
556 | char *buf; | ||
557 | int r = 0; | ||
558 | |||
559 | if (*ppos) | ||
560 | return 0; | ||
561 | |||
562 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
563 | if (!s) | ||
564 | return -ENOMEM; | ||
565 | |||
566 | trace_seq_init(s); | ||
567 | |||
568 | trace_seq_printf(s, "name: %s\n", call->name); | ||
569 | trace_seq_printf(s, "ID: %d\n", call->event.type); | ||
570 | trace_seq_printf(s, "format:\n"); | ||
571 | 604 | ||
572 | head = trace_get_fields(call); | ||
573 | list_for_each_entry_reverse(field, head, link) { | 605 | list_for_each_entry_reverse(field, head, link) { |
574 | /* | 606 | /* |
575 | * Smartly shows the array type(except dynamic array). | 607 | * Smartly shows the array type(except dynamic array). |
@@ -584,29 +616,54 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
584 | array_descriptor = NULL; | 616 | array_descriptor = NULL; |
585 | 617 | ||
586 | if (!array_descriptor) { | 618 | if (!array_descriptor) { |
587 | r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" | 619 | trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" |
588 | "\tsize:%u;\tsigned:%d;\n", | 620 | "\tsize:%u;\tsigned:%d;\n", |
589 | field->type, field->name, field->offset, | 621 | field->type, field->name, field->offset, |
590 | field->size, !!field->is_signed); | 622 | field->size, !!field->is_signed); |
591 | } else { | 623 | } else { |
592 | r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" | 624 | trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" |
593 | "\tsize:%u;\tsigned:%d;\n", | 625 | "\tsize:%u;\tsigned:%d;\n", |
594 | (int)(array_descriptor - field->type), | 626 | (int)(array_descriptor - field->type), |
595 | field->type, field->name, | 627 | field->type, field->name, |
596 | array_descriptor, field->offset, | 628 | array_descriptor, field->offset, |
597 | field->size, !!field->is_signed); | 629 | field->size, !!field->is_signed); |
598 | } | 630 | } |
631 | } | ||
632 | } | ||
599 | 633 | ||
600 | if (--common_field_count == 0) | 634 | static ssize_t |
601 | r = trace_seq_printf(s, "\n"); | 635 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, |
636 | loff_t *ppos) | ||
637 | { | ||
638 | struct ftrace_event_call *call = filp->private_data; | ||
639 | struct list_head *head; | ||
640 | struct trace_seq *s; | ||
641 | char *buf; | ||
642 | int r; | ||
602 | 643 | ||
603 | if (!r) | 644 | if (*ppos) |
604 | break; | 645 | return 0; |
605 | } | 646 | |
647 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
648 | if (!s) | ||
649 | return -ENOMEM; | ||
650 | |||
651 | trace_seq_init(s); | ||
652 | |||
653 | trace_seq_printf(s, "name: %s\n", call->name); | ||
654 | trace_seq_printf(s, "ID: %d\n", call->event.type); | ||
655 | trace_seq_printf(s, "format:\n"); | ||
656 | |||
657 | /* print common fields */ | ||
658 | print_event_fields(s, &ftrace_common_fields); | ||
606 | 659 | ||
607 | if (r) | 660 | trace_seq_putc(s, '\n'); |
608 | r = trace_seq_printf(s, "\nprint fmt: %s\n", | 661 | |
609 | call->print_fmt); | 662 | /* print event specific fields */ |
663 | head = trace_get_fields(call); | ||
664 | print_event_fields(s, head); | ||
665 | |||
666 | r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); | ||
610 | 667 | ||
611 | if (!r) { | 668 | if (!r) { |
612 | /* | 669 | /* |
@@ -963,35 +1020,31 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
963 | return -1; | 1020 | return -1; |
964 | } | 1021 | } |
965 | 1022 | ||
966 | if (call->class->probe || call->class->reg) | 1023 | if (call->class->reg) |
967 | trace_create_file("enable", 0644, call->dir, call, | 1024 | trace_create_file("enable", 0644, call->dir, call, |
968 | enable); | 1025 | enable); |
969 | 1026 | ||
970 | #ifdef CONFIG_PERF_EVENTS | 1027 | #ifdef CONFIG_PERF_EVENTS |
971 | if (call->event.type && (call->class->perf_probe || call->class->reg)) | 1028 | if (call->event.type && call->class->reg) |
972 | trace_create_file("id", 0444, call->dir, call, | 1029 | trace_create_file("id", 0444, call->dir, call, |
973 | id); | 1030 | id); |
974 | #endif | 1031 | #endif |
975 | 1032 | ||
976 | if (call->class->define_fields) { | 1033 | /* |
977 | /* | 1034 | * Other events may have the same class. Only update |
978 | * Other events may have the same class. Only update | 1035 | * the fields if they are not already defined. |
979 | * the fields if they are not already defined. | 1036 | */ |
980 | */ | 1037 | head = trace_get_fields(call); |
981 | head = trace_get_fields(call); | 1038 | if (list_empty(head)) { |
982 | if (list_empty(head)) { | 1039 | ret = call->class->define_fields(call); |
983 | ret = trace_define_common_fields(call); | 1040 | if (ret < 0) { |
984 | if (!ret) | 1041 | pr_warning("Could not initialize trace point" |
985 | ret = call->class->define_fields(call); | 1042 | " events/%s\n", call->name); |
986 | if (ret < 0) { | 1043 | return ret; |
987 | pr_warning("Could not initialize trace point" | ||
988 | " events/%s\n", call->name); | ||
989 | return ret; | ||
990 | } | ||
991 | } | 1044 | } |
992 | trace_create_file("filter", 0644, call->dir, call, | ||
993 | filter); | ||
994 | } | 1045 | } |
1046 | trace_create_file("filter", 0644, call->dir, call, | ||
1047 | filter); | ||
995 | 1048 | ||
996 | trace_create_file("format", 0444, call->dir, call, | 1049 | trace_create_file("format", 0444, call->dir, call, |
997 | format); | 1050 | format); |
@@ -999,11 +1052,17 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
999 | return 0; | 1052 | return 0; |
1000 | } | 1053 | } |
1001 | 1054 | ||
1002 | static int __trace_add_event_call(struct ftrace_event_call *call) | 1055 | static int |
1056 | __trace_add_event_call(struct ftrace_event_call *call, struct module *mod, | ||
1057 | const struct file_operations *id, | ||
1058 | const struct file_operations *enable, | ||
1059 | const struct file_operations *filter, | ||
1060 | const struct file_operations *format) | ||
1003 | { | 1061 | { |
1004 | struct dentry *d_events; | 1062 | struct dentry *d_events; |
1005 | int ret; | 1063 | int ret; |
1006 | 1064 | ||
1065 | /* The linker may leave blanks */ | ||
1007 | if (!call->name) | 1066 | if (!call->name) |
1008 | return -EINVAL; | 1067 | return -EINVAL; |
1009 | 1068 | ||
@@ -1011,8 +1070,8 @@ static int __trace_add_event_call(struct ftrace_event_call *call) | |||
1011 | ret = call->class->raw_init(call); | 1070 | ret = call->class->raw_init(call); |
1012 | if (ret < 0) { | 1071 | if (ret < 0) { |
1013 | if (ret != -ENOSYS) | 1072 | if (ret != -ENOSYS) |
1014 | pr_warning("Could not initialize trace " | 1073 | pr_warning("Could not initialize trace events/%s\n", |
1015 | "events/%s\n", call->name); | 1074 | call->name); |
1016 | return ret; | 1075 | return ret; |
1017 | } | 1076 | } |
1018 | } | 1077 | } |
@@ -1021,11 +1080,10 @@ static int __trace_add_event_call(struct ftrace_event_call *call) | |||
1021 | if (!d_events) | 1080 | if (!d_events) |
1022 | return -ENOENT; | 1081 | return -ENOENT; |
1023 | 1082 | ||
1024 | ret = event_create_dir(call, d_events, &ftrace_event_id_fops, | 1083 | ret = event_create_dir(call, d_events, id, enable, filter, format); |
1025 | &ftrace_enable_fops, &ftrace_event_filter_fops, | ||
1026 | &ftrace_event_format_fops); | ||
1027 | if (!ret) | 1084 | if (!ret) |
1028 | list_add(&call->list, &ftrace_events); | 1085 | list_add(&call->list, &ftrace_events); |
1086 | call->mod = mod; | ||
1029 | 1087 | ||
1030 | return ret; | 1088 | return ret; |
1031 | } | 1089 | } |
@@ -1035,7 +1093,10 @@ int trace_add_event_call(struct ftrace_event_call *call) | |||
1035 | { | 1093 | { |
1036 | int ret; | 1094 | int ret; |
1037 | mutex_lock(&event_mutex); | 1095 | mutex_lock(&event_mutex); |
1038 | ret = __trace_add_event_call(call); | 1096 | ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops, |
1097 | &ftrace_enable_fops, | ||
1098 | &ftrace_event_filter_fops, | ||
1099 | &ftrace_event_format_fops); | ||
1039 | mutex_unlock(&event_mutex); | 1100 | mutex_unlock(&event_mutex); |
1040 | return ret; | 1101 | return ret; |
1041 | } | 1102 | } |
@@ -1152,8 +1213,6 @@ static void trace_module_add_events(struct module *mod) | |||
1152 | { | 1213 | { |
1153 | struct ftrace_module_file_ops *file_ops = NULL; | 1214 | struct ftrace_module_file_ops *file_ops = NULL; |
1154 | struct ftrace_event_call *call, *start, *end; | 1215 | struct ftrace_event_call *call, *start, *end; |
1155 | struct dentry *d_events; | ||
1156 | int ret; | ||
1157 | 1216 | ||
1158 | start = mod->trace_events; | 1217 | start = mod->trace_events; |
1159 | end = mod->trace_events + mod->num_trace_events; | 1218 | end = mod->trace_events + mod->num_trace_events; |
@@ -1161,38 +1220,14 @@ static void trace_module_add_events(struct module *mod) | |||
1161 | if (start == end) | 1220 | if (start == end) |
1162 | return; | 1221 | return; |
1163 | 1222 | ||
1164 | d_events = event_trace_events_dir(); | 1223 | file_ops = trace_create_file_ops(mod); |
1165 | if (!d_events) | 1224 | if (!file_ops) |
1166 | return; | 1225 | return; |
1167 | 1226 | ||
1168 | for_each_event(call, start, end) { | 1227 | for_each_event(call, start, end) { |
1169 | /* The linker may leave blanks */ | 1228 | __trace_add_event_call(call, mod, |
1170 | if (!call->name) | ||
1171 | continue; | ||
1172 | if (call->class->raw_init) { | ||
1173 | ret = call->class->raw_init(call); | ||
1174 | if (ret < 0) { | ||
1175 | if (ret != -ENOSYS) | ||
1176 | pr_warning("Could not initialize trace " | ||
1177 | "point events/%s\n", call->name); | ||
1178 | continue; | ||
1179 | } | ||
1180 | } | ||
1181 | /* | ||
1182 | * This module has events, create file ops for this module | ||
1183 | * if not already done. | ||
1184 | */ | ||
1185 | if (!file_ops) { | ||
1186 | file_ops = trace_create_file_ops(mod); | ||
1187 | if (!file_ops) | ||
1188 | return; | ||
1189 | } | ||
1190 | call->mod = mod; | ||
1191 | ret = event_create_dir(call, d_events, | ||
1192 | &file_ops->id, &file_ops->enable, | 1229 | &file_ops->id, &file_ops->enable, |
1193 | &file_ops->filter, &file_ops->format); | 1230 | &file_ops->filter, &file_ops->format); |
1194 | if (!ret) | ||
1195 | list_add(&call->list, &ftrace_events); | ||
1196 | } | 1231 | } |
1197 | } | 1232 | } |
1198 | 1233 | ||
@@ -1319,25 +1354,14 @@ static __init int event_trace_init(void) | |||
1319 | trace_create_file("enable", 0644, d_events, | 1354 | trace_create_file("enable", 0644, d_events, |
1320 | NULL, &ftrace_system_enable_fops); | 1355 | NULL, &ftrace_system_enable_fops); |
1321 | 1356 | ||
1357 | if (trace_define_common_fields()) | ||
1358 | pr_warning("tracing: Failed to allocate common fields"); | ||
1359 | |||
1322 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { | 1360 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { |
1323 | /* The linker may leave blanks */ | 1361 | __trace_add_event_call(call, NULL, &ftrace_event_id_fops, |
1324 | if (!call->name) | ||
1325 | continue; | ||
1326 | if (call->class->raw_init) { | ||
1327 | ret = call->class->raw_init(call); | ||
1328 | if (ret < 0) { | ||
1329 | if (ret != -ENOSYS) | ||
1330 | pr_warning("Could not initialize trace " | ||
1331 | "point events/%s\n", call->name); | ||
1332 | continue; | ||
1333 | } | ||
1334 | } | ||
1335 | ret = event_create_dir(call, d_events, &ftrace_event_id_fops, | ||
1336 | &ftrace_enable_fops, | 1362 | &ftrace_enable_fops, |
1337 | &ftrace_event_filter_fops, | 1363 | &ftrace_event_filter_fops, |
1338 | &ftrace_event_format_fops); | 1364 | &ftrace_event_format_fops); |
1339 | if (!ret) | ||
1340 | list_add(&call->list, &ftrace_events); | ||
1341 | } | 1365 | } |
1342 | 1366 | ||
1343 | while (true) { | 1367 | while (true) { |
@@ -1524,12 +1548,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1524 | struct ftrace_entry *entry; | 1548 | struct ftrace_entry *entry; |
1525 | unsigned long flags; | 1549 | unsigned long flags; |
1526 | long disabled; | 1550 | long disabled; |
1527 | int resched; | ||
1528 | int cpu; | 1551 | int cpu; |
1529 | int pc; | 1552 | int pc; |
1530 | 1553 | ||
1531 | pc = preempt_count(); | 1554 | pc = preempt_count(); |
1532 | resched = ftrace_preempt_disable(); | 1555 | preempt_disable_notrace(); |
1533 | cpu = raw_smp_processor_id(); | 1556 | cpu = raw_smp_processor_id(); |
1534 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); | 1557 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); |
1535 | 1558 | ||
@@ -1551,7 +1574,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1551 | 1574 | ||
1552 | out: | 1575 | out: |
1553 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); | 1576 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
1554 | ftrace_preempt_enable(resched); | 1577 | preempt_enable_notrace(); |
1555 | } | 1578 | } |
1556 | 1579 | ||
1557 | static struct ftrace_ops trace_ops __initdata = | 1580 | static struct ftrace_ops trace_ops __initdata = |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 57bb1bb32999..36d40104b17f 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -497,12 +497,10 @@ void print_subsystem_event_filter(struct event_subsystem *system, | |||
497 | } | 497 | } |
498 | 498 | ||
499 | static struct ftrace_event_field * | 499 | static struct ftrace_event_field * |
500 | find_event_field(struct ftrace_event_call *call, char *name) | 500 | __find_event_field(struct list_head *head, char *name) |
501 | { | 501 | { |
502 | struct ftrace_event_field *field; | 502 | struct ftrace_event_field *field; |
503 | struct list_head *head; | ||
504 | 503 | ||
505 | head = trace_get_fields(call); | ||
506 | list_for_each_entry(field, head, link) { | 504 | list_for_each_entry(field, head, link) { |
507 | if (!strcmp(field->name, name)) | 505 | if (!strcmp(field->name, name)) |
508 | return field; | 506 | return field; |
@@ -511,6 +509,20 @@ find_event_field(struct ftrace_event_call *call, char *name) | |||
511 | return NULL; | 509 | return NULL; |
512 | } | 510 | } |
513 | 511 | ||
512 | static struct ftrace_event_field * | ||
513 | find_event_field(struct ftrace_event_call *call, char *name) | ||
514 | { | ||
515 | struct ftrace_event_field *field; | ||
516 | struct list_head *head; | ||
517 | |||
518 | field = __find_event_field(&ftrace_common_fields, name); | ||
519 | if (field) | ||
520 | return field; | ||
521 | |||
522 | head = trace_get_fields(call); | ||
523 | return __find_event_field(head, name); | ||
524 | } | ||
525 | |||
514 | static void filter_free_pred(struct filter_pred *pred) | 526 | static void filter_free_pred(struct filter_pred *pred) |
515 | { | 527 | { |
516 | if (!pred) | 528 | if (!pred) |
@@ -627,9 +639,6 @@ static int init_subsystem_preds(struct event_subsystem *system) | |||
627 | int err; | 639 | int err; |
628 | 640 | ||
629 | list_for_each_entry(call, &ftrace_events, list) { | 641 | list_for_each_entry(call, &ftrace_events, list) { |
630 | if (!call->class || !call->class->define_fields) | ||
631 | continue; | ||
632 | |||
633 | if (strcmp(call->class->system, system->name) != 0) | 642 | if (strcmp(call->class->system, system->name) != 0) |
634 | continue; | 643 | continue; |
635 | 644 | ||
@@ -646,9 +655,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) | |||
646 | struct ftrace_event_call *call; | 655 | struct ftrace_event_call *call; |
647 | 656 | ||
648 | list_for_each_entry(call, &ftrace_events, list) { | 657 | list_for_each_entry(call, &ftrace_events, list) { |
649 | if (!call->class || !call->class->define_fields) | ||
650 | continue; | ||
651 | |||
652 | if (strcmp(call->class->system, system->name) != 0) | 658 | if (strcmp(call->class->system, system->name) != 0) |
653 | continue; | 659 | continue; |
654 | 660 | ||
@@ -1251,9 +1257,6 @@ static int replace_system_preds(struct event_subsystem *system, | |||
1251 | list_for_each_entry(call, &ftrace_events, list) { | 1257 | list_for_each_entry(call, &ftrace_events, list) { |
1252 | struct event_filter *filter = call->filter; | 1258 | struct event_filter *filter = call->filter; |
1253 | 1259 | ||
1254 | if (!call->class || !call->class->define_fields) | ||
1255 | continue; | ||
1256 | |||
1257 | if (strcmp(call->class->system, system->name) != 0) | 1260 | if (strcmp(call->class->system, system->name) != 0) |
1258 | continue; | 1261 | continue; |
1259 | 1262 | ||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 8536e2a65969..4ba44deaac25 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -125,12 +125,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | |||
125 | 125 | ||
126 | #include "trace_entries.h" | 126 | #include "trace_entries.h" |
127 | 127 | ||
128 | static int ftrace_raw_init_event(struct ftrace_event_call *call) | ||
129 | { | ||
130 | INIT_LIST_HEAD(&call->class->fields); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | #undef __entry | 128 | #undef __entry |
135 | #define __entry REC | 129 | #define __entry REC |
136 | 130 | ||
@@ -158,7 +152,7 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call) | |||
158 | struct ftrace_event_class event_class_ftrace_##call = { \ | 152 | struct ftrace_event_class event_class_ftrace_##call = { \ |
159 | .system = __stringify(TRACE_SYSTEM), \ | 153 | .system = __stringify(TRACE_SYSTEM), \ |
160 | .define_fields = ftrace_define_fields_##call, \ | 154 | .define_fields = ftrace_define_fields_##call, \ |
161 | .raw_init = ftrace_raw_init_event, \ | 155 | .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ |
162 | }; \ | 156 | }; \ |
163 | \ | 157 | \ |
164 | struct ftrace_event_call __used \ | 158 | struct ftrace_event_call __used \ |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index b3f3776b0cd6..16aee4d44e8f 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -54,14 +54,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |||
54 | struct trace_array_cpu *data; | 54 | struct trace_array_cpu *data; |
55 | unsigned long flags; | 55 | unsigned long flags; |
56 | long disabled; | 56 | long disabled; |
57 | int cpu, resched; | 57 | int cpu; |
58 | int pc; | 58 | int pc; |
59 | 59 | ||
60 | if (unlikely(!ftrace_function_enabled)) | 60 | if (unlikely(!ftrace_function_enabled)) |
61 | return; | 61 | return; |
62 | 62 | ||
63 | pc = preempt_count(); | 63 | pc = preempt_count(); |
64 | resched = ftrace_preempt_disable(); | 64 | preempt_disable_notrace(); |
65 | local_save_flags(flags); | 65 | local_save_flags(flags); |
66 | cpu = raw_smp_processor_id(); | 66 | cpu = raw_smp_processor_id(); |
67 | data = tr->data[cpu]; | 67 | data = tr->data[cpu]; |
@@ -71,7 +71,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |||
71 | trace_function(tr, ip, parent_ip, flags, pc); | 71 | trace_function(tr, ip, parent_ip, flags, pc); |
72 | 72 | ||
73 | atomic_dec(&data->disabled); | 73 | atomic_dec(&data->disabled); |
74 | ftrace_preempt_enable(resched); | 74 | preempt_enable_notrace(); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void | 77 | static void |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 79f4bac99a94..6bff23625781 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -641,7 +641,8 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
641 | 641 | ||
642 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 642 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
643 | if (len < 7) { | 643 | if (len < 7) { |
644 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | 644 | snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", |
645 | nsecs_rem); | ||
645 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 646 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
646 | if (!ret) | 647 | if (!ret) |
647 | return TRACE_TYPE_PARTIAL_LINE; | 648 | return TRACE_TYPE_PARTIAL_LINE; |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 6fd486e0cef4..73a6b0601f2e 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -649,6 +649,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
649 | #endif | 649 | #endif |
650 | .open = irqsoff_trace_open, | 650 | .open = irqsoff_trace_open, |
651 | .close = irqsoff_trace_close, | 651 | .close = irqsoff_trace_close, |
652 | .use_max_tr = 1, | ||
652 | }; | 653 | }; |
653 | # define register_irqsoff(trace) register_tracer(&trace) | 654 | # define register_irqsoff(trace) register_tracer(&trace) |
654 | #else | 655 | #else |
@@ -681,6 +682,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
681 | #endif | 682 | #endif |
682 | .open = irqsoff_trace_open, | 683 | .open = irqsoff_trace_open, |
683 | .close = irqsoff_trace_close, | 684 | .close = irqsoff_trace_close, |
685 | .use_max_tr = 1, | ||
684 | }; | 686 | }; |
685 | # define register_preemptoff(trace) register_tracer(&trace) | 687 | # define register_preemptoff(trace) register_tracer(&trace) |
686 | #else | 688 | #else |
@@ -715,6 +717,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
715 | #endif | 717 | #endif |
716 | .open = irqsoff_trace_open, | 718 | .open = irqsoff_trace_open, |
717 | .close = irqsoff_trace_close, | 719 | .close = irqsoff_trace_close, |
720 | .use_max_tr = 1, | ||
718 | }; | 721 | }; |
719 | 722 | ||
720 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 723 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f52b5f50299d..8b27c9849b42 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/ptrace.h> | 30 | #include <linux/ptrace.h> |
31 | #include <linux/perf_event.h> | 31 | #include <linux/perf_event.h> |
32 | #include <linux/stringify.h> | 32 | #include <linux/stringify.h> |
33 | #include <linux/limits.h> | ||
34 | #include <linux/uaccess.h> | ||
33 | #include <asm/bitsperlong.h> | 35 | #include <asm/bitsperlong.h> |
34 | 36 | ||
35 | #include "trace.h" | 37 | #include "trace.h" |
@@ -38,6 +40,7 @@ | |||
38 | #define MAX_TRACE_ARGS 128 | 40 | #define MAX_TRACE_ARGS 128 |
39 | #define MAX_ARGSTR_LEN 63 | 41 | #define MAX_ARGSTR_LEN 63 |
40 | #define MAX_EVENT_NAME_LEN 64 | 42 | #define MAX_EVENT_NAME_LEN 64 |
43 | #define MAX_STRING_SIZE PATH_MAX | ||
41 | #define KPROBE_EVENT_SYSTEM "kprobes" | 44 | #define KPROBE_EVENT_SYSTEM "kprobes" |
42 | 45 | ||
43 | /* Reserved field names */ | 46 | /* Reserved field names */ |
@@ -58,14 +61,16 @@ const char *reserved_field_names[] = { | |||
58 | }; | 61 | }; |
59 | 62 | ||
60 | /* Printing function type */ | 63 | /* Printing function type */ |
61 | typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *); | 64 | typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, |
65 | void *); | ||
62 | #define PRINT_TYPE_FUNC_NAME(type) print_type_##type | 66 | #define PRINT_TYPE_FUNC_NAME(type) print_type_##type |
63 | #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type | 67 | #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type |
64 | 68 | ||
65 | /* Printing in basic type function template */ | 69 | /* Printing in basic type function template */ |
66 | #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \ | 70 | #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \ |
67 | static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ | 71 | static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ |
68 | const char *name, void *data)\ | 72 | const char *name, \ |
73 | void *data, void *ent)\ | ||
69 | { \ | 74 | { \ |
70 | return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\ | 75 | return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\ |
71 | } \ | 76 | } \ |
@@ -80,6 +85,49 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int) | |||
80 | DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long) | 85 | DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long) |
81 | DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long) | 86 | DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long) |
82 | 87 | ||
88 | /* data_rloc: data relative location, compatible with u32 */ | ||
89 | #define make_data_rloc(len, roffs) \ | ||
90 | (((u32)(len) << 16) | ((u32)(roffs) & 0xffff)) | ||
91 | #define get_rloc_len(dl) ((u32)(dl) >> 16) | ||
92 | #define get_rloc_offs(dl) ((u32)(dl) & 0xffff) | ||
93 | |||
94 | static inline void *get_rloc_data(u32 *dl) | ||
95 | { | ||
96 | return (u8 *)dl + get_rloc_offs(*dl); | ||
97 | } | ||
98 | |||
99 | /* For data_loc conversion */ | ||
100 | static inline void *get_loc_data(u32 *dl, void *ent) | ||
101 | { | ||
102 | return (u8 *)ent + get_rloc_offs(*dl); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Convert data_rloc to data_loc: | ||
107 | * data_rloc stores the offset from data_rloc itself, but data_loc | ||
108 | * stores the offset from event entry. | ||
109 | */ | ||
110 | #define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) | ||
111 | |||
112 | /* For defining macros, define string/string_size types */ | ||
113 | typedef u32 string; | ||
114 | typedef u32 string_size; | ||
115 | |||
116 | /* Print type function for string type */ | ||
117 | static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, | ||
118 | const char *name, | ||
119 | void *data, void *ent) | ||
120 | { | ||
121 | int len = *(u32 *)data >> 16; | ||
122 | |||
123 | if (!len) | ||
124 | return trace_seq_printf(s, " %s=(fault)", name); | ||
125 | else | ||
126 | return trace_seq_printf(s, " %s=\"%s\"", name, | ||
127 | (const char *)get_loc_data(data, ent)); | ||
128 | } | ||
129 | static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; | ||
130 | |||
83 | /* Data fetch function type */ | 131 | /* Data fetch function type */ |
84 | typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); | 132 | typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); |
85 | 133 | ||
@@ -94,32 +142,38 @@ static __kprobes void call_fetch(struct fetch_param *fprm, | |||
94 | return fprm->fn(regs, fprm->data, dest); | 142 | return fprm->fn(regs, fprm->data, dest); |
95 | } | 143 | } |
96 | 144 | ||
97 | #define FETCH_FUNC_NAME(kind, type) fetch_##kind##_##type | 145 | #define FETCH_FUNC_NAME(method, type) fetch_##method##_##type |
98 | /* | 146 | /* |
99 | * Define macro for basic types - we don't need to define s* types, because | 147 | * Define macro for basic types - we don't need to define s* types, because |
100 | * we have to care only about bitwidth at recording time. | 148 | * we have to care only about bitwidth at recording time. |
101 | */ | 149 | */ |
102 | #define DEFINE_BASIC_FETCH_FUNCS(kind) \ | 150 | #define DEFINE_BASIC_FETCH_FUNCS(method) \ |
103 | DEFINE_FETCH_##kind(u8) \ | 151 | DEFINE_FETCH_##method(u8) \ |
104 | DEFINE_FETCH_##kind(u16) \ | 152 | DEFINE_FETCH_##method(u16) \ |
105 | DEFINE_FETCH_##kind(u32) \ | 153 | DEFINE_FETCH_##method(u32) \ |
106 | DEFINE_FETCH_##kind(u64) | 154 | DEFINE_FETCH_##method(u64) |
107 | 155 | ||
108 | #define CHECK_BASIC_FETCH_FUNCS(kind, fn) \ | 156 | #define CHECK_FETCH_FUNCS(method, fn) \ |
109 | ((FETCH_FUNC_NAME(kind, u8) == fn) || \ | 157 | (((FETCH_FUNC_NAME(method, u8) == fn) || \ |
110 | (FETCH_FUNC_NAME(kind, u16) == fn) || \ | 158 | (FETCH_FUNC_NAME(method, u16) == fn) || \ |
111 | (FETCH_FUNC_NAME(kind, u32) == fn) || \ | 159 | (FETCH_FUNC_NAME(method, u32) == fn) || \ |
112 | (FETCH_FUNC_NAME(kind, u64) == fn)) | 160 | (FETCH_FUNC_NAME(method, u64) == fn) || \ |
161 | (FETCH_FUNC_NAME(method, string) == fn) || \ | ||
162 | (FETCH_FUNC_NAME(method, string_size) == fn)) \ | ||
163 | && (fn != NULL)) | ||
113 | 164 | ||
114 | /* Data fetch function templates */ | 165 | /* Data fetch function templates */ |
115 | #define DEFINE_FETCH_reg(type) \ | 166 | #define DEFINE_FETCH_reg(type) \ |
116 | static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ | 167 | static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ |
117 | void *offset, void *dest) \ | 168 | void *offset, void *dest) \ |
118 | { \ | 169 | { \ |
119 | *(type *)dest = (type)regs_get_register(regs, \ | 170 | *(type *)dest = (type)regs_get_register(regs, \ |
120 | (unsigned int)((unsigned long)offset)); \ | 171 | (unsigned int)((unsigned long)offset)); \ |
121 | } | 172 | } |
122 | DEFINE_BASIC_FETCH_FUNCS(reg) | 173 | DEFINE_BASIC_FETCH_FUNCS(reg) |
174 | /* No string on the register */ | ||
175 | #define fetch_reg_string NULL | ||
176 | #define fetch_reg_string_size NULL | ||
123 | 177 | ||
124 | #define DEFINE_FETCH_stack(type) \ | 178 | #define DEFINE_FETCH_stack(type) \ |
125 | static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ | 179 | static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ |
@@ -129,6 +183,9 @@ static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ | |||
129 | (unsigned int)((unsigned long)offset)); \ | 183 | (unsigned int)((unsigned long)offset)); \ |
130 | } | 184 | } |
131 | DEFINE_BASIC_FETCH_FUNCS(stack) | 185 | DEFINE_BASIC_FETCH_FUNCS(stack) |
186 | /* No string on the stack entry */ | ||
187 | #define fetch_stack_string NULL | ||
188 | #define fetch_stack_string_size NULL | ||
132 | 189 | ||
133 | #define DEFINE_FETCH_retval(type) \ | 190 | #define DEFINE_FETCH_retval(type) \ |
134 | static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ | 191 | static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ |
@@ -137,6 +194,9 @@ static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ | |||
137 | *(type *)dest = (type)regs_return_value(regs); \ | 194 | *(type *)dest = (type)regs_return_value(regs); \ |
138 | } | 195 | } |
139 | DEFINE_BASIC_FETCH_FUNCS(retval) | 196 | DEFINE_BASIC_FETCH_FUNCS(retval) |
197 | /* No string on the retval */ | ||
198 | #define fetch_retval_string NULL | ||
199 | #define fetch_retval_string_size NULL | ||
140 | 200 | ||
141 | #define DEFINE_FETCH_memory(type) \ | 201 | #define DEFINE_FETCH_memory(type) \ |
142 | static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ | 202 | static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ |
@@ -149,6 +209,62 @@ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ | |||
149 | *(type *)dest = retval; \ | 209 | *(type *)dest = retval; \ |
150 | } | 210 | } |
151 | DEFINE_BASIC_FETCH_FUNCS(memory) | 211 | DEFINE_BASIC_FETCH_FUNCS(memory) |
212 | /* | ||
213 | * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max | ||
214 | * length and relative data location. | ||
215 | */ | ||
216 | static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, | ||
217 | void *addr, void *dest) | ||
218 | { | ||
219 | long ret; | ||
220 | int maxlen = get_rloc_len(*(u32 *)dest); | ||
221 | u8 *dst = get_rloc_data(dest); | ||
222 | u8 *src = addr; | ||
223 | mm_segment_t old_fs = get_fs(); | ||
224 | if (!maxlen) | ||
225 | return; | ||
226 | /* | ||
227 | * Try to get string again, since the string can be changed while | ||
228 | * probing. | ||
229 | */ | ||
230 | set_fs(KERNEL_DS); | ||
231 | pagefault_disable(); | ||
232 | do | ||
233 | ret = __copy_from_user_inatomic(dst++, src++, 1); | ||
234 | while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); | ||
235 | dst[-1] = '\0'; | ||
236 | pagefault_enable(); | ||
237 | set_fs(old_fs); | ||
238 | |||
239 | if (ret < 0) { /* Failed to fetch string */ | ||
240 | ((u8 *)get_rloc_data(dest))[0] = '\0'; | ||
241 | *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); | ||
242 | } else | ||
243 | *(u32 *)dest = make_data_rloc(src - (u8 *)addr, | ||
244 | get_rloc_offs(*(u32 *)dest)); | ||
245 | } | ||
246 | /* Return the length of string -- including null terminal byte */ | ||
247 | static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, | ||
248 | void *addr, void *dest) | ||
249 | { | ||
250 | int ret, len = 0; | ||
251 | u8 c; | ||
252 | mm_segment_t old_fs = get_fs(); | ||
253 | |||
254 | set_fs(KERNEL_DS); | ||
255 | pagefault_disable(); | ||
256 | do { | ||
257 | ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); | ||
258 | len++; | ||
259 | } while (c && ret == 0 && len < MAX_STRING_SIZE); | ||
260 | pagefault_enable(); | ||
261 | set_fs(old_fs); | ||
262 | |||
263 | if (ret < 0) /* Failed to check the length */ | ||
264 | *(u32 *)dest = 0; | ||
265 | else | ||
266 | *(u32 *)dest = len; | ||
267 | } | ||
152 | 268 | ||
153 | /* Memory fetching by symbol */ | 269 | /* Memory fetching by symbol */ |
154 | struct symbol_cache { | 270 | struct symbol_cache { |
@@ -203,6 +319,8 @@ static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\ | |||
203 | *(type *)dest = 0; \ | 319 | *(type *)dest = 0; \ |
204 | } | 320 | } |
205 | DEFINE_BASIC_FETCH_FUNCS(symbol) | 321 | DEFINE_BASIC_FETCH_FUNCS(symbol) |
322 | DEFINE_FETCH_symbol(string) | ||
323 | DEFINE_FETCH_symbol(string_size) | ||
206 | 324 | ||
207 | /* Dereference memory access function */ | 325 | /* Dereference memory access function */ |
208 | struct deref_fetch_param { | 326 | struct deref_fetch_param { |
@@ -224,12 +342,14 @@ static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\ | |||
224 | *(type *)dest = 0; \ | 342 | *(type *)dest = 0; \ |
225 | } | 343 | } |
226 | DEFINE_BASIC_FETCH_FUNCS(deref) | 344 | DEFINE_BASIC_FETCH_FUNCS(deref) |
345 | DEFINE_FETCH_deref(string) | ||
346 | DEFINE_FETCH_deref(string_size) | ||
227 | 347 | ||
228 | static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) | 348 | static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) |
229 | { | 349 | { |
230 | if (CHECK_BASIC_FETCH_FUNCS(deref, data->orig.fn)) | 350 | if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) |
231 | free_deref_fetch_param(data->orig.data); | 351 | free_deref_fetch_param(data->orig.data); |
232 | else if (CHECK_BASIC_FETCH_FUNCS(symbol, data->orig.fn)) | 352 | else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) |
233 | free_symbol_cache(data->orig.data); | 353 | free_symbol_cache(data->orig.data); |
234 | kfree(data); | 354 | kfree(data); |
235 | } | 355 | } |
@@ -240,23 +360,43 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) | |||
240 | #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) | 360 | #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) |
241 | #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) | 361 | #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) |
242 | 362 | ||
243 | #define ASSIGN_FETCH_FUNC(kind, type) \ | 363 | /* Fetch types */ |
244 | .kind = FETCH_FUNC_NAME(kind, type) | 364 | enum { |
245 | 365 | FETCH_MTD_reg = 0, | |
246 | #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ | 366 | FETCH_MTD_stack, |
247 | {.name = #ptype, \ | 367 | FETCH_MTD_retval, |
248 | .size = sizeof(ftype), \ | 368 | FETCH_MTD_memory, |
249 | .is_signed = sign, \ | 369 | FETCH_MTD_symbol, |
250 | .print = PRINT_TYPE_FUNC_NAME(ptype), \ | 370 | FETCH_MTD_deref, |
251 | .fmt = PRINT_TYPE_FMT_NAME(ptype), \ | 371 | FETCH_MTD_END, |
252 | ASSIGN_FETCH_FUNC(reg, ftype), \ | 372 | }; |
253 | ASSIGN_FETCH_FUNC(stack, ftype), \ | 373 | |
254 | ASSIGN_FETCH_FUNC(retval, ftype), \ | 374 | #define ASSIGN_FETCH_FUNC(method, type) \ |
255 | ASSIGN_FETCH_FUNC(memory, ftype), \ | 375 | [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type) |
256 | ASSIGN_FETCH_FUNC(symbol, ftype), \ | 376 | |
257 | ASSIGN_FETCH_FUNC(deref, ftype), \ | 377 | #define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ |
378 | {.name = _name, \ | ||
379 | .size = _size, \ | ||
380 | .is_signed = sign, \ | ||
381 | .print = PRINT_TYPE_FUNC_NAME(ptype), \ | ||
382 | .fmt = PRINT_TYPE_FMT_NAME(ptype), \ | ||
383 | .fmttype = _fmttype, \ | ||
384 | .fetch = { \ | ||
385 | ASSIGN_FETCH_FUNC(reg, ftype), \ | ||
386 | ASSIGN_FETCH_FUNC(stack, ftype), \ | ||
387 | ASSIGN_FETCH_FUNC(retval, ftype), \ | ||
388 | ASSIGN_FETCH_FUNC(memory, ftype), \ | ||
389 | ASSIGN_FETCH_FUNC(symbol, ftype), \ | ||
390 | ASSIGN_FETCH_FUNC(deref, ftype), \ | ||
391 | } \ | ||
258 | } | 392 | } |
259 | 393 | ||
394 | #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ | ||
395 | __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype) | ||
396 | |||
397 | #define FETCH_TYPE_STRING 0 | ||
398 | #define FETCH_TYPE_STRSIZE 1 | ||
399 | |||
260 | /* Fetch type information table */ | 400 | /* Fetch type information table */ |
261 | static const struct fetch_type { | 401 | static const struct fetch_type { |
262 | const char *name; /* Name of type */ | 402 | const char *name; /* Name of type */ |
@@ -264,14 +404,16 @@ static const struct fetch_type { | |||
264 | int is_signed; /* Signed flag */ | 404 | int is_signed; /* Signed flag */ |
265 | print_type_func_t print; /* Print functions */ | 405 | print_type_func_t print; /* Print functions */ |
266 | const char *fmt; /* Fromat string */ | 406 | const char *fmt; /* Fromat string */ |
407 | const char *fmttype; /* Name in format file */ | ||
267 | /* Fetch functions */ | 408 | /* Fetch functions */ |
268 | fetch_func_t reg; | 409 | fetch_func_t fetch[FETCH_MTD_END]; |
269 | fetch_func_t stack; | ||
270 | fetch_func_t retval; | ||
271 | fetch_func_t memory; | ||
272 | fetch_func_t symbol; | ||
273 | fetch_func_t deref; | ||
274 | } fetch_type_table[] = { | 410 | } fetch_type_table[] = { |
411 | /* Special types */ | ||
412 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, | ||
413 | sizeof(u32), 1, "__data_loc char[]"), | ||
414 | [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, | ||
415 | string_size, sizeof(u32), 0, "u32"), | ||
416 | /* Basic types */ | ||
275 | ASSIGN_FETCH_TYPE(u8, u8, 0), | 417 | ASSIGN_FETCH_TYPE(u8, u8, 0), |
276 | ASSIGN_FETCH_TYPE(u16, u16, 0), | 418 | ASSIGN_FETCH_TYPE(u16, u16, 0), |
277 | ASSIGN_FETCH_TYPE(u32, u32, 0), | 419 | ASSIGN_FETCH_TYPE(u32, u32, 0), |
@@ -302,12 +444,28 @@ static __kprobes void fetch_stack_address(struct pt_regs *regs, | |||
302 | *(unsigned long *)dest = kernel_stack_pointer(regs); | 444 | *(unsigned long *)dest = kernel_stack_pointer(regs); |
303 | } | 445 | } |
304 | 446 | ||
447 | static fetch_func_t get_fetch_size_function(const struct fetch_type *type, | ||
448 | fetch_func_t orig_fn) | ||
449 | { | ||
450 | int i; | ||
451 | |||
452 | if (type != &fetch_type_table[FETCH_TYPE_STRING]) | ||
453 | return NULL; /* Only string type needs size function */ | ||
454 | for (i = 0; i < FETCH_MTD_END; i++) | ||
455 | if (type->fetch[i] == orig_fn) | ||
456 | return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i]; | ||
457 | |||
458 | WARN_ON(1); /* This should not happen */ | ||
459 | return NULL; | ||
460 | } | ||
461 | |||
305 | /** | 462 | /** |
306 | * Kprobe event core functions | 463 | * Kprobe event core functions |
307 | */ | 464 | */ |
308 | 465 | ||
309 | struct probe_arg { | 466 | struct probe_arg { |
310 | struct fetch_param fetch; | 467 | struct fetch_param fetch; |
468 | struct fetch_param fetch_size; | ||
311 | unsigned int offset; /* Offset from argument entry */ | 469 | unsigned int offset; /* Offset from argument entry */ |
312 | const char *name; /* Name of this argument */ | 470 | const char *name; /* Name of this argument */ |
313 | const char *comm; /* Command of this argument */ | 471 | const char *comm; /* Command of this argument */ |
@@ -429,9 +587,9 @@ error: | |||
429 | 587 | ||
430 | static void free_probe_arg(struct probe_arg *arg) | 588 | static void free_probe_arg(struct probe_arg *arg) |
431 | { | 589 | { |
432 | if (CHECK_BASIC_FETCH_FUNCS(deref, arg->fetch.fn)) | 590 | if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn)) |
433 | free_deref_fetch_param(arg->fetch.data); | 591 | free_deref_fetch_param(arg->fetch.data); |
434 | else if (CHECK_BASIC_FETCH_FUNCS(symbol, arg->fetch.fn)) | 592 | else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn)) |
435 | free_symbol_cache(arg->fetch.data); | 593 | free_symbol_cache(arg->fetch.data); |
436 | kfree(arg->name); | 594 | kfree(arg->name); |
437 | kfree(arg->comm); | 595 | kfree(arg->comm); |
@@ -548,7 +706,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, | |||
548 | 706 | ||
549 | if (strcmp(arg, "retval") == 0) { | 707 | if (strcmp(arg, "retval") == 0) { |
550 | if (is_return) | 708 | if (is_return) |
551 | f->fn = t->retval; | 709 | f->fn = t->fetch[FETCH_MTD_retval]; |
552 | else | 710 | else |
553 | ret = -EINVAL; | 711 | ret = -EINVAL; |
554 | } else if (strncmp(arg, "stack", 5) == 0) { | 712 | } else if (strncmp(arg, "stack", 5) == 0) { |
@@ -562,7 +720,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, | |||
562 | if (ret || param > PARAM_MAX_STACK) | 720 | if (ret || param > PARAM_MAX_STACK) |
563 | ret = -EINVAL; | 721 | ret = -EINVAL; |
564 | else { | 722 | else { |
565 | f->fn = t->stack; | 723 | f->fn = t->fetch[FETCH_MTD_stack]; |
566 | f->data = (void *)param; | 724 | f->data = (void *)param; |
567 | } | 725 | } |
568 | } else | 726 | } else |
@@ -588,7 +746,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, | |||
588 | case '%': /* named register */ | 746 | case '%': /* named register */ |
589 | ret = regs_query_register_offset(arg + 1); | 747 | ret = regs_query_register_offset(arg + 1); |
590 | if (ret >= 0) { | 748 | if (ret >= 0) { |
591 | f->fn = t->reg; | 749 | f->fn = t->fetch[FETCH_MTD_reg]; |
592 | f->data = (void *)(unsigned long)ret; | 750 | f->data = (void *)(unsigned long)ret; |
593 | ret = 0; | 751 | ret = 0; |
594 | } | 752 | } |
@@ -598,7 +756,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, | |||
598 | ret = strict_strtoul(arg + 1, 0, ¶m); | 756 | ret = strict_strtoul(arg + 1, 0, ¶m); |
599 | if (ret) | 757 | if (ret) |
600 | break; | 758 | break; |
601 | f->fn = t->memory; | 759 | f->fn = t->fetch[FETCH_MTD_memory]; |
602 | f->data = (void *)param; | 760 | f->data = (void *)param; |
603 | } else { | 761 | } else { |
604 | ret = split_symbol_offset(arg + 1, &offset); | 762 | ret = split_symbol_offset(arg + 1, &offset); |
@@ -606,7 +764,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, | |||
606 | break; | 764 | break; |
607 | f->data = alloc_symbol_cache(arg + 1, offset); | 765 | f->data = alloc_symbol_cache(arg + 1, offset); |
608 | if (f->data) | 766 | if (f->data) |
609 | f->fn = t->symbol; | 767 | f->fn = t->fetch[FETCH_MTD_symbol]; |
610 | } | 768 | } |
611 | break; | 769 | break; |
612 | case '+': /* deref memory */ | 770 | case '+': /* deref memory */ |
@@ -636,14 +794,17 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, | |||
636 | if (ret) | 794 | if (ret) |
637 | kfree(dprm); | 795 | kfree(dprm); |
638 | else { | 796 | else { |
639 | f->fn = t->deref; | 797 | f->fn = t->fetch[FETCH_MTD_deref]; |
640 | f->data = (void *)dprm; | 798 | f->data = (void *)dprm; |
641 | } | 799 | } |
642 | } | 800 | } |
643 | break; | 801 | break; |
644 | } | 802 | } |
645 | if (!ret && !f->fn) | 803 | if (!ret && !f->fn) { /* Parsed, but do not find fetch method */ |
804 | pr_info("%s type has no corresponding fetch method.\n", | ||
805 | t->name); | ||
646 | ret = -EINVAL; | 806 | ret = -EINVAL; |
807 | } | ||
647 | return ret; | 808 | return ret; |
648 | } | 809 | } |
649 | 810 | ||
@@ -652,6 +813,7 @@ static int parse_probe_arg(char *arg, struct trace_probe *tp, | |||
652 | struct probe_arg *parg, int is_return) | 813 | struct probe_arg *parg, int is_return) |
653 | { | 814 | { |
654 | const char *t; | 815 | const char *t; |
816 | int ret; | ||
655 | 817 | ||
656 | if (strlen(arg) > MAX_ARGSTR_LEN) { | 818 | if (strlen(arg) > MAX_ARGSTR_LEN) { |
657 | pr_info("Argument is too long.: %s\n", arg); | 819 | pr_info("Argument is too long.: %s\n", arg); |
@@ -674,7 +836,13 @@ static int parse_probe_arg(char *arg, struct trace_probe *tp, | |||
674 | } | 836 | } |
675 | parg->offset = tp->size; | 837 | parg->offset = tp->size; |
676 | tp->size += parg->type->size; | 838 | tp->size += parg->type->size; |
677 | return __parse_probe_arg(arg, parg->type, &parg->fetch, is_return); | 839 | ret = __parse_probe_arg(arg, parg->type, &parg->fetch, is_return); |
840 | if (ret >= 0) { | ||
841 | parg->fetch_size.fn = get_fetch_size_function(parg->type, | ||
842 | parg->fetch.fn); | ||
843 | parg->fetch_size.data = parg->fetch.data; | ||
844 | } | ||
845 | return ret; | ||
678 | } | 846 | } |
679 | 847 | ||
680 | /* Return 1 if name is reserved or already used by another argument */ | 848 | /* Return 1 if name is reserved or already used by another argument */ |
@@ -757,14 +925,17 @@ static int create_trace_probe(int argc, char **argv) | |||
757 | pr_info("Delete command needs an event name.\n"); | 925 | pr_info("Delete command needs an event name.\n"); |
758 | return -EINVAL; | 926 | return -EINVAL; |
759 | } | 927 | } |
928 | mutex_lock(&probe_lock); | ||
760 | tp = find_probe_event(event, group); | 929 | tp = find_probe_event(event, group); |
761 | if (!tp) { | 930 | if (!tp) { |
931 | mutex_unlock(&probe_lock); | ||
762 | pr_info("Event %s/%s doesn't exist.\n", group, event); | 932 | pr_info("Event %s/%s doesn't exist.\n", group, event); |
763 | return -ENOENT; | 933 | return -ENOENT; |
764 | } | 934 | } |
765 | /* delete an event */ | 935 | /* delete an event */ |
766 | unregister_trace_probe(tp); | 936 | unregister_trace_probe(tp); |
767 | free_trace_probe(tp); | 937 | free_trace_probe(tp); |
938 | mutex_unlock(&probe_lock); | ||
768 | return 0; | 939 | return 0; |
769 | } | 940 | } |
770 | 941 | ||
@@ -1043,6 +1214,54 @@ static const struct file_operations kprobe_profile_ops = { | |||
1043 | .release = seq_release, | 1214 | .release = seq_release, |
1044 | }; | 1215 | }; |
1045 | 1216 | ||
1217 | /* Sum up total data length for dynamic arraies (strings) */ | ||
1218 | static __kprobes int __get_data_size(struct trace_probe *tp, | ||
1219 | struct pt_regs *regs) | ||
1220 | { | ||
1221 | int i, ret = 0; | ||
1222 | u32 len; | ||
1223 | |||
1224 | for (i = 0; i < tp->nr_args; i++) | ||
1225 | if (unlikely(tp->args[i].fetch_size.fn)) { | ||
1226 | call_fetch(&tp->args[i].fetch_size, regs, &len); | ||
1227 | ret += len; | ||
1228 | } | ||
1229 | |||
1230 | return ret; | ||
1231 | } | ||
1232 | |||
1233 | /* Store the value of each argument */ | ||
1234 | static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp, | ||
1235 | struct pt_regs *regs, | ||
1236 | u8 *data, int maxlen) | ||
1237 | { | ||
1238 | int i; | ||
1239 | u32 end = tp->size; | ||
1240 | u32 *dl; /* Data (relative) location */ | ||
1241 | |||
1242 | for (i = 0; i < tp->nr_args; i++) { | ||
1243 | if (unlikely(tp->args[i].fetch_size.fn)) { | ||
1244 | /* | ||
1245 | * First, we set the relative location and | ||
1246 | * maximum data length to *dl | ||
1247 | */ | ||
1248 | dl = (u32 *)(data + tp->args[i].offset); | ||
1249 | *dl = make_data_rloc(maxlen, end - tp->args[i].offset); | ||
1250 | /* Then try to fetch string or dynamic array data */ | ||
1251 | call_fetch(&tp->args[i].fetch, regs, dl); | ||
1252 | /* Reduce maximum length */ | ||
1253 | end += get_rloc_len(*dl); | ||
1254 | maxlen -= get_rloc_len(*dl); | ||
1255 | /* Trick here, convert data_rloc to data_loc */ | ||
1256 | *dl = convert_rloc_to_loc(*dl, | ||
1257 | ent_size + tp->args[i].offset); | ||
1258 | } else | ||
1259 | /* Just fetching data normally */ | ||
1260 | call_fetch(&tp->args[i].fetch, regs, | ||
1261 | data + tp->args[i].offset); | ||
1262 | } | ||
1263 | } | ||
1264 | |||
1046 | /* Kprobe handler */ | 1265 | /* Kprobe handler */ |
1047 | static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | 1266 | static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) |
1048 | { | 1267 | { |
@@ -1050,8 +1269,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
1050 | struct kprobe_trace_entry_head *entry; | 1269 | struct kprobe_trace_entry_head *entry; |
1051 | struct ring_buffer_event *event; | 1270 | struct ring_buffer_event *event; |
1052 | struct ring_buffer *buffer; | 1271 | struct ring_buffer *buffer; |
1053 | u8 *data; | 1272 | int size, dsize, pc; |
1054 | int size, i, pc; | ||
1055 | unsigned long irq_flags; | 1273 | unsigned long irq_flags; |
1056 | struct ftrace_event_call *call = &tp->call; | 1274 | struct ftrace_event_call *call = &tp->call; |
1057 | 1275 | ||
@@ -1060,7 +1278,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
1060 | local_save_flags(irq_flags); | 1278 | local_save_flags(irq_flags); |
1061 | pc = preempt_count(); | 1279 | pc = preempt_count(); |
1062 | 1280 | ||
1063 | size = sizeof(*entry) + tp->size; | 1281 | dsize = __get_data_size(tp, regs); |
1282 | size = sizeof(*entry) + tp->size + dsize; | ||
1064 | 1283 | ||
1065 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 1284 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
1066 | size, irq_flags, pc); | 1285 | size, irq_flags, pc); |
@@ -1069,9 +1288,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
1069 | 1288 | ||
1070 | entry = ring_buffer_event_data(event); | 1289 | entry = ring_buffer_event_data(event); |
1071 | entry->ip = (unsigned long)kp->addr; | 1290 | entry->ip = (unsigned long)kp->addr; |
1072 | data = (u8 *)&entry[1]; | 1291 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1073 | for (i = 0; i < tp->nr_args; i++) | ||
1074 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | ||
1075 | 1292 | ||
1076 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1293 | if (!filter_current_check_discard(buffer, call, entry, event)) |
1077 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1294 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
@@ -1085,15 +1302,15 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
1085 | struct kretprobe_trace_entry_head *entry; | 1302 | struct kretprobe_trace_entry_head *entry; |
1086 | struct ring_buffer_event *event; | 1303 | struct ring_buffer_event *event; |
1087 | struct ring_buffer *buffer; | 1304 | struct ring_buffer *buffer; |
1088 | u8 *data; | 1305 | int size, pc, dsize; |
1089 | int size, i, pc; | ||
1090 | unsigned long irq_flags; | 1306 | unsigned long irq_flags; |
1091 | struct ftrace_event_call *call = &tp->call; | 1307 | struct ftrace_event_call *call = &tp->call; |
1092 | 1308 | ||
1093 | local_save_flags(irq_flags); | 1309 | local_save_flags(irq_flags); |
1094 | pc = preempt_count(); | 1310 | pc = preempt_count(); |
1095 | 1311 | ||
1096 | size = sizeof(*entry) + tp->size; | 1312 | dsize = __get_data_size(tp, regs); |
1313 | size = sizeof(*entry) + tp->size + dsize; | ||
1097 | 1314 | ||
1098 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 1315 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
1099 | size, irq_flags, pc); | 1316 | size, irq_flags, pc); |
@@ -1103,9 +1320,7 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
1103 | entry = ring_buffer_event_data(event); | 1320 | entry = ring_buffer_event_data(event); |
1104 | entry->func = (unsigned long)tp->rp.kp.addr; | 1321 | entry->func = (unsigned long)tp->rp.kp.addr; |
1105 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1322 | entry->ret_ip = (unsigned long)ri->ret_addr; |
1106 | data = (u8 *)&entry[1]; | 1323 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1107 | for (i = 0; i < tp->nr_args; i++) | ||
1108 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | ||
1109 | 1324 | ||
1110 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1325 | if (!filter_current_check_discard(buffer, call, entry, event)) |
1111 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1326 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
@@ -1137,7 +1352,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
1137 | data = (u8 *)&field[1]; | 1352 | data = (u8 *)&field[1]; |
1138 | for (i = 0; i < tp->nr_args; i++) | 1353 | for (i = 0; i < tp->nr_args; i++) |
1139 | if (!tp->args[i].type->print(s, tp->args[i].name, | 1354 | if (!tp->args[i].type->print(s, tp->args[i].name, |
1140 | data + tp->args[i].offset)) | 1355 | data + tp->args[i].offset, field)) |
1141 | goto partial; | 1356 | goto partial; |
1142 | 1357 | ||
1143 | if (!trace_seq_puts(s, "\n")) | 1358 | if (!trace_seq_puts(s, "\n")) |
@@ -1179,7 +1394,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
1179 | data = (u8 *)&field[1]; | 1394 | data = (u8 *)&field[1]; |
1180 | for (i = 0; i < tp->nr_args; i++) | 1395 | for (i = 0; i < tp->nr_args; i++) |
1181 | if (!tp->args[i].type->print(s, tp->args[i].name, | 1396 | if (!tp->args[i].type->print(s, tp->args[i].name, |
1182 | data + tp->args[i].offset)) | 1397 | data + tp->args[i].offset, field)) |
1183 | goto partial; | 1398 | goto partial; |
1184 | 1399 | ||
1185 | if (!trace_seq_puts(s, "\n")) | 1400 | if (!trace_seq_puts(s, "\n")) |
@@ -1214,11 +1429,6 @@ static void probe_event_disable(struct ftrace_event_call *call) | |||
1214 | } | 1429 | } |
1215 | } | 1430 | } |
1216 | 1431 | ||
1217 | static int probe_event_raw_init(struct ftrace_event_call *event_call) | ||
1218 | { | ||
1219 | return 0; | ||
1220 | } | ||
1221 | |||
1222 | #undef DEFINE_FIELD | 1432 | #undef DEFINE_FIELD |
1223 | #define DEFINE_FIELD(type, item, name, is_signed) \ | 1433 | #define DEFINE_FIELD(type, item, name, is_signed) \ |
1224 | do { \ | 1434 | do { \ |
@@ -1239,7 +1449,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1239 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1449 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
1240 | /* Set argument names as fields */ | 1450 | /* Set argument names as fields */ |
1241 | for (i = 0; i < tp->nr_args; i++) { | 1451 | for (i = 0; i < tp->nr_args; i++) { |
1242 | ret = trace_define_field(event_call, tp->args[i].type->name, | 1452 | ret = trace_define_field(event_call, tp->args[i].type->fmttype, |
1243 | tp->args[i].name, | 1453 | tp->args[i].name, |
1244 | sizeof(field) + tp->args[i].offset, | 1454 | sizeof(field) + tp->args[i].offset, |
1245 | tp->args[i].type->size, | 1455 | tp->args[i].type->size, |
@@ -1261,7 +1471,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1261 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | 1471 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
1262 | /* Set argument names as fields */ | 1472 | /* Set argument names as fields */ |
1263 | for (i = 0; i < tp->nr_args; i++) { | 1473 | for (i = 0; i < tp->nr_args; i++) { |
1264 | ret = trace_define_field(event_call, tp->args[i].type->name, | 1474 | ret = trace_define_field(event_call, tp->args[i].type->fmttype, |
1265 | tp->args[i].name, | 1475 | tp->args[i].name, |
1266 | sizeof(field) + tp->args[i].offset, | 1476 | sizeof(field) + tp->args[i].offset, |
1267 | tp->args[i].type->size, | 1477 | tp->args[i].type->size, |
@@ -1301,8 +1511,13 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) | |||
1301 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); | 1511 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); |
1302 | 1512 | ||
1303 | for (i = 0; i < tp->nr_args; i++) { | 1513 | for (i = 0; i < tp->nr_args; i++) { |
1304 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | 1514 | if (strcmp(tp->args[i].type->name, "string") == 0) |
1305 | tp->args[i].name); | 1515 | pos += snprintf(buf + pos, LEN_OR_ZERO, |
1516 | ", __get_str(%s)", | ||
1517 | tp->args[i].name); | ||
1518 | else | ||
1519 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | ||
1520 | tp->args[i].name); | ||
1306 | } | 1521 | } |
1307 | 1522 | ||
1308 | #undef LEN_OR_ZERO | 1523 | #undef LEN_OR_ZERO |
@@ -1339,11 +1554,11 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
1339 | struct ftrace_event_call *call = &tp->call; | 1554 | struct ftrace_event_call *call = &tp->call; |
1340 | struct kprobe_trace_entry_head *entry; | 1555 | struct kprobe_trace_entry_head *entry; |
1341 | struct hlist_head *head; | 1556 | struct hlist_head *head; |
1342 | u8 *data; | 1557 | int size, __size, dsize; |
1343 | int size, __size, i; | ||
1344 | int rctx; | 1558 | int rctx; |
1345 | 1559 | ||
1346 | __size = sizeof(*entry) + tp->size; | 1560 | dsize = __get_data_size(tp, regs); |
1561 | __size = sizeof(*entry) + tp->size + dsize; | ||
1347 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1562 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1348 | size -= sizeof(u32); | 1563 | size -= sizeof(u32); |
1349 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | 1564 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
@@ -1355,9 +1570,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
1355 | return; | 1570 | return; |
1356 | 1571 | ||
1357 | entry->ip = (unsigned long)kp->addr; | 1572 | entry->ip = (unsigned long)kp->addr; |
1358 | data = (u8 *)&entry[1]; | 1573 | memset(&entry[1], 0, dsize); |
1359 | for (i = 0; i < tp->nr_args; i++) | 1574 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1360 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | ||
1361 | 1575 | ||
1362 | head = this_cpu_ptr(call->perf_events); | 1576 | head = this_cpu_ptr(call->perf_events); |
1363 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); | 1577 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); |
@@ -1371,11 +1585,11 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
1371 | struct ftrace_event_call *call = &tp->call; | 1585 | struct ftrace_event_call *call = &tp->call; |
1372 | struct kretprobe_trace_entry_head *entry; | 1586 | struct kretprobe_trace_entry_head *entry; |
1373 | struct hlist_head *head; | 1587 | struct hlist_head *head; |
1374 | u8 *data; | 1588 | int size, __size, dsize; |
1375 | int size, __size, i; | ||
1376 | int rctx; | 1589 | int rctx; |
1377 | 1590 | ||
1378 | __size = sizeof(*entry) + tp->size; | 1591 | dsize = __get_data_size(tp, regs); |
1592 | __size = sizeof(*entry) + tp->size + dsize; | ||
1379 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1593 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1380 | size -= sizeof(u32); | 1594 | size -= sizeof(u32); |
1381 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | 1595 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
@@ -1388,9 +1602,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
1388 | 1602 | ||
1389 | entry->func = (unsigned long)tp->rp.kp.addr; | 1603 | entry->func = (unsigned long)tp->rp.kp.addr; |
1390 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1604 | entry->ret_ip = (unsigned long)ri->ret_addr; |
1391 | data = (u8 *)&entry[1]; | 1605 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1392 | for (i = 0; i < tp->nr_args; i++) | ||
1393 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | ||
1394 | 1606 | ||
1395 | head = this_cpu_ptr(call->perf_events); | 1607 | head = this_cpu_ptr(call->perf_events); |
1396 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); | 1608 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); |
@@ -1486,15 +1698,12 @@ static int register_probe_event(struct trace_probe *tp) | |||
1486 | int ret; | 1698 | int ret; |
1487 | 1699 | ||
1488 | /* Initialize ftrace_event_call */ | 1700 | /* Initialize ftrace_event_call */ |
1701 | INIT_LIST_HEAD(&call->class->fields); | ||
1489 | if (probe_is_return(tp)) { | 1702 | if (probe_is_return(tp)) { |
1490 | INIT_LIST_HEAD(&call->class->fields); | ||
1491 | call->event.funcs = &kretprobe_funcs; | 1703 | call->event.funcs = &kretprobe_funcs; |
1492 | call->class->raw_init = probe_event_raw_init; | ||
1493 | call->class->define_fields = kretprobe_event_define_fields; | 1704 | call->class->define_fields = kretprobe_event_define_fields; |
1494 | } else { | 1705 | } else { |
1495 | INIT_LIST_HEAD(&call->class->fields); | ||
1496 | call->event.funcs = &kprobe_funcs; | 1706 | call->event.funcs = &kprobe_funcs; |
1497 | call->class->raw_init = probe_event_raw_init; | ||
1498 | call->class->define_fields = kprobe_event_define_fields; | 1707 | call->class->define_fields = kprobe_event_define_fields; |
1499 | } | 1708 | } |
1500 | if (set_print_fmt(tp) < 0) | 1709 | if (set_print_fmt(tp) < 0) |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c deleted file mode 100644 index 8eaf00749b65..000000000000 --- a/kernel/trace/trace_ksym.c +++ /dev/null | |||
@@ -1,508 +0,0 @@ | |||
1 | /* | ||
2 | * trace_ksym.c - Kernel Symbol Tracer | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2009 | ||
19 | */ | ||
20 | |||
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/ftrace.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/fs.h> | ||
28 | |||
29 | #include "trace_output.h" | ||
30 | #include "trace.h" | ||
31 | |||
32 | #include <linux/hw_breakpoint.h> | ||
33 | #include <asm/hw_breakpoint.h> | ||
34 | |||
35 | #include <asm/atomic.h> | ||
36 | |||
37 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ | ||
38 | |||
39 | struct trace_ksym { | ||
40 | struct perf_event **ksym_hbp; | ||
41 | struct perf_event_attr attr; | ||
42 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
43 | atomic64_t counter; | ||
44 | #endif | ||
45 | struct hlist_node ksym_hlist; | ||
46 | }; | ||
47 | |||
48 | static struct trace_array *ksym_trace_array; | ||
49 | |||
50 | static unsigned int ksym_tracing_enabled; | ||
51 | |||
52 | static HLIST_HEAD(ksym_filter_head); | ||
53 | |||
54 | static DEFINE_MUTEX(ksym_tracer_mutex); | ||
55 | |||
56 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
57 | |||
58 | #define MAX_UL_INT 0xffffffff | ||
59 | |||
60 | void ksym_collect_stats(unsigned long hbp_hit_addr) | ||
61 | { | ||
62 | struct hlist_node *node; | ||
63 | struct trace_ksym *entry; | ||
64 | |||
65 | rcu_read_lock(); | ||
66 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | ||
67 | if (entry->attr.bp_addr == hbp_hit_addr) { | ||
68 | atomic64_inc(&entry->counter); | ||
69 | break; | ||
70 | } | ||
71 | } | ||
72 | rcu_read_unlock(); | ||
73 | } | ||
74 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
75 | |||
76 | void ksym_hbp_handler(struct perf_event *hbp, int nmi, | ||
77 | struct perf_sample_data *data, | ||
78 | struct pt_regs *regs) | ||
79 | { | ||
80 | struct ring_buffer_event *event; | ||
81 | struct ksym_trace_entry *entry; | ||
82 | struct ring_buffer *buffer; | ||
83 | int pc; | ||
84 | |||
85 | if (!ksym_tracing_enabled) | ||
86 | return; | ||
87 | |||
88 | buffer = ksym_trace_array->buffer; | ||
89 | |||
90 | pc = preempt_count(); | ||
91 | |||
92 | event = trace_buffer_lock_reserve(buffer, TRACE_KSYM, | ||
93 | sizeof(*entry), 0, pc); | ||
94 | if (!event) | ||
95 | return; | ||
96 | |||
97 | entry = ring_buffer_event_data(event); | ||
98 | entry->ip = instruction_pointer(regs); | ||
99 | entry->type = hw_breakpoint_type(hbp); | ||
100 | entry->addr = hw_breakpoint_addr(hbp); | ||
101 | strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); | ||
102 | |||
103 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
104 | ksym_collect_stats(hw_breakpoint_addr(hbp)); | ||
105 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
106 | |||
107 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
108 | } | ||
109 | |||
110 | /* Valid access types are represented as | ||
111 | * | ||
112 | * rw- : Set Read/Write Access Breakpoint | ||
113 | * -w- : Set Write Access Breakpoint | ||
114 | * --- : Clear Breakpoints | ||
115 | * --x : Set Execution Break points (Not available yet) | ||
116 | * | ||
117 | */ | ||
118 | static int ksym_trace_get_access_type(char *str) | ||
119 | { | ||
120 | int access = 0; | ||
121 | |||
122 | if (str[0] == 'r') | ||
123 | access |= HW_BREAKPOINT_R; | ||
124 | |||
125 | if (str[1] == 'w') | ||
126 | access |= HW_BREAKPOINT_W; | ||
127 | |||
128 | if (str[2] == 'x') | ||
129 | access |= HW_BREAKPOINT_X; | ||
130 | |||
131 | switch (access) { | ||
132 | case HW_BREAKPOINT_R: | ||
133 | case HW_BREAKPOINT_W: | ||
134 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | ||
135 | return access; | ||
136 | default: | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * There can be several possible malformed requests and we attempt to capture | ||
143 | * all of them. We enumerate some of the rules | ||
144 | * 1. We will not allow kernel symbols with ':' since it is used as a delimiter. | ||
145 | * i.e. multiple ':' symbols disallowed. Possible uses are of the form | ||
146 | * <module>:<ksym_name>:<op>. | ||
147 | * 2. No delimiter symbol ':' in the input string | ||
148 | * 3. Spurious operator symbols or symbols not in their respective positions | ||
149 | * 4. <ksym_name>:--- i.e. clear breakpoint request when ksym_name not in file | ||
150 | * 5. Kernel symbol not a part of /proc/kallsyms | ||
151 | * 6. Duplicate requests | ||
152 | */ | ||
153 | static int parse_ksym_trace_str(char *input_string, char **ksymname, | ||
154 | unsigned long *addr) | ||
155 | { | ||
156 | int ret; | ||
157 | |||
158 | *ksymname = strsep(&input_string, ":"); | ||
159 | *addr = kallsyms_lookup_name(*ksymname); | ||
160 | |||
161 | /* Check for malformed request: (2), (1) and (5) */ | ||
162 | if ((!input_string) || | ||
163 | (strlen(input_string) != KSYM_TRACER_OP_LEN) || | ||
164 | (*addr == 0)) | ||
165 | return -EINVAL;; | ||
166 | |||
167 | ret = ksym_trace_get_access_type(input_string); | ||
168 | |||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | ||
173 | { | ||
174 | struct trace_ksym *entry; | ||
175 | int ret = -ENOMEM; | ||
176 | |||
177 | entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); | ||
178 | if (!entry) | ||
179 | return -ENOMEM; | ||
180 | |||
181 | hw_breakpoint_init(&entry->attr); | ||
182 | |||
183 | entry->attr.bp_type = op; | ||
184 | entry->attr.bp_addr = addr; | ||
185 | entry->attr.bp_len = HW_BREAKPOINT_LEN_4; | ||
186 | |||
187 | entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, | ||
188 | ksym_hbp_handler); | ||
189 | |||
190 | if (IS_ERR(entry->ksym_hbp)) { | ||
191 | ret = PTR_ERR(entry->ksym_hbp); | ||
192 | if (ret == -ENOSPC) { | ||
193 | printk(KERN_ERR "ksym_tracer: Maximum limit reached." | ||
194 | " No new requests for tracing can be accepted now.\n"); | ||
195 | } else { | ||
196 | printk(KERN_INFO "ksym_tracer request failed. Try again" | ||
197 | " later!!\n"); | ||
198 | } | ||
199 | goto err; | ||
200 | } | ||
201 | |||
202 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); | ||
203 | |||
204 | return 0; | ||
205 | |||
206 | err: | ||
207 | kfree(entry); | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, | ||
213 | size_t count, loff_t *ppos) | ||
214 | { | ||
215 | struct trace_ksym *entry; | ||
216 | struct hlist_node *node; | ||
217 | struct trace_seq *s; | ||
218 | ssize_t cnt = 0; | ||
219 | int ret; | ||
220 | |||
221 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
222 | if (!s) | ||
223 | return -ENOMEM; | ||
224 | trace_seq_init(s); | ||
225 | |||
226 | mutex_lock(&ksym_tracer_mutex); | ||
227 | |||
228 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
229 | ret = trace_seq_printf(s, "%pS:", | ||
230 | (void *)(unsigned long)entry->attr.bp_addr); | ||
231 | if (entry->attr.bp_type == HW_BREAKPOINT_R) | ||
232 | ret = trace_seq_puts(s, "r--\n"); | ||
233 | else if (entry->attr.bp_type == HW_BREAKPOINT_W) | ||
234 | ret = trace_seq_puts(s, "-w-\n"); | ||
235 | else if (entry->attr.bp_type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) | ||
236 | ret = trace_seq_puts(s, "rw-\n"); | ||
237 | WARN_ON_ONCE(!ret); | ||
238 | } | ||
239 | |||
240 | cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | ||
241 | |||
242 | mutex_unlock(&ksym_tracer_mutex); | ||
243 | |||
244 | kfree(s); | ||
245 | |||
246 | return cnt; | ||
247 | } | ||
248 | |||
249 | static void __ksym_trace_reset(void) | ||
250 | { | ||
251 | struct trace_ksym *entry; | ||
252 | struct hlist_node *node, *node1; | ||
253 | |||
254 | mutex_lock(&ksym_tracer_mutex); | ||
255 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, | ||
256 | ksym_hlist) { | ||
257 | unregister_wide_hw_breakpoint(entry->ksym_hbp); | ||
258 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
259 | synchronize_rcu(); | ||
260 | kfree(entry); | ||
261 | } | ||
262 | mutex_unlock(&ksym_tracer_mutex); | ||
263 | } | ||
264 | |||
265 | static ssize_t ksym_trace_filter_write(struct file *file, | ||
266 | const char __user *buffer, | ||
267 | size_t count, loff_t *ppos) | ||
268 | { | ||
269 | struct trace_ksym *entry; | ||
270 | struct hlist_node *node; | ||
271 | char *buf, *input_string, *ksymname = NULL; | ||
272 | unsigned long ksym_addr = 0; | ||
273 | int ret, op, changed = 0; | ||
274 | |||
275 | buf = kzalloc(count + 1, GFP_KERNEL); | ||
276 | if (!buf) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | ret = -EFAULT; | ||
280 | if (copy_from_user(buf, buffer, count)) | ||
281 | goto out; | ||
282 | |||
283 | buf[count] = '\0'; | ||
284 | input_string = strstrip(buf); | ||
285 | |||
286 | /* | ||
287 | * Clear all breakpoints if: | ||
288 | * 1: echo > ksym_trace_filter | ||
289 | * 2: echo 0 > ksym_trace_filter | ||
290 | * 3: echo "*:---" > ksym_trace_filter | ||
291 | */ | ||
292 | if (!input_string[0] || !strcmp(input_string, "0") || | ||
293 | !strcmp(input_string, "*:---")) { | ||
294 | __ksym_trace_reset(); | ||
295 | ret = 0; | ||
296 | goto out; | ||
297 | } | ||
298 | |||
299 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); | ||
300 | if (ret < 0) | ||
301 | goto out; | ||
302 | |||
303 | mutex_lock(&ksym_tracer_mutex); | ||
304 | |||
305 | ret = -EINVAL; | ||
306 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
307 | if (entry->attr.bp_addr == ksym_addr) { | ||
308 | /* Check for malformed request: (6) */ | ||
309 | if (entry->attr.bp_type != op) | ||
310 | changed = 1; | ||
311 | else | ||
312 | goto out_unlock; | ||
313 | break; | ||
314 | } | ||
315 | } | ||
316 | if (changed) { | ||
317 | unregister_wide_hw_breakpoint(entry->ksym_hbp); | ||
318 | entry->attr.bp_type = op; | ||
319 | ret = 0; | ||
320 | if (op > 0) { | ||
321 | entry->ksym_hbp = | ||
322 | register_wide_hw_breakpoint(&entry->attr, | ||
323 | ksym_hbp_handler); | ||
324 | if (IS_ERR(entry->ksym_hbp)) | ||
325 | ret = PTR_ERR(entry->ksym_hbp); | ||
326 | else | ||
327 | goto out_unlock; | ||
328 | } | ||
329 | /* Error or "symbol:---" case: drop it */ | ||
330 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
331 | synchronize_rcu(); | ||
332 | kfree(entry); | ||
333 | goto out_unlock; | ||
334 | } else { | ||
335 | /* Check for malformed request: (4) */ | ||
336 | if (op) | ||
337 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); | ||
338 | } | ||
339 | out_unlock: | ||
340 | mutex_unlock(&ksym_tracer_mutex); | ||
341 | out: | ||
342 | kfree(buf); | ||
343 | return !ret ? count : ret; | ||
344 | } | ||
345 | |||
346 | static const struct file_operations ksym_tracing_fops = { | ||
347 | .open = tracing_open_generic, | ||
348 | .read = ksym_trace_filter_read, | ||
349 | .write = ksym_trace_filter_write, | ||
350 | }; | ||
351 | |||
352 | static void ksym_trace_reset(struct trace_array *tr) | ||
353 | { | ||
354 | ksym_tracing_enabled = 0; | ||
355 | __ksym_trace_reset(); | ||
356 | } | ||
357 | |||
358 | static int ksym_trace_init(struct trace_array *tr) | ||
359 | { | ||
360 | int cpu, ret = 0; | ||
361 | |||
362 | for_each_online_cpu(cpu) | ||
363 | tracing_reset(tr, cpu); | ||
364 | ksym_tracing_enabled = 1; | ||
365 | ksym_trace_array = tr; | ||
366 | |||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | static void ksym_trace_print_header(struct seq_file *m) | ||
371 | { | ||
372 | seq_puts(m, | ||
373 | "# TASK-PID CPU# Symbol " | ||
374 | "Type Function\n"); | ||
375 | seq_puts(m, | ||
376 | "# | | | " | ||
377 | " | |\n"); | ||
378 | } | ||
379 | |||
380 | static enum print_line_t ksym_trace_output(struct trace_iterator *iter) | ||
381 | { | ||
382 | struct trace_entry *entry = iter->ent; | ||
383 | struct trace_seq *s = &iter->seq; | ||
384 | struct ksym_trace_entry *field; | ||
385 | char str[KSYM_SYMBOL_LEN]; | ||
386 | int ret; | ||
387 | |||
388 | if (entry->type != TRACE_KSYM) | ||
389 | return TRACE_TYPE_UNHANDLED; | ||
390 | |||
391 | trace_assign_type(field, entry); | ||
392 | |||
393 | ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd, | ||
394 | entry->pid, iter->cpu, (char *)field->addr); | ||
395 | if (!ret) | ||
396 | return TRACE_TYPE_PARTIAL_LINE; | ||
397 | |||
398 | switch (field->type) { | ||
399 | case HW_BREAKPOINT_R: | ||
400 | ret = trace_seq_printf(s, " R "); | ||
401 | break; | ||
402 | case HW_BREAKPOINT_W: | ||
403 | ret = trace_seq_printf(s, " W "); | ||
404 | break; | ||
405 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
406 | ret = trace_seq_printf(s, " RW "); | ||
407 | break; | ||
408 | default: | ||
409 | return TRACE_TYPE_PARTIAL_LINE; | ||
410 | } | ||
411 | |||
412 | if (!ret) | ||
413 | return TRACE_TYPE_PARTIAL_LINE; | ||
414 | |||
415 | sprint_symbol(str, field->ip); | ||
416 | ret = trace_seq_printf(s, "%s\n", str); | ||
417 | if (!ret) | ||
418 | return TRACE_TYPE_PARTIAL_LINE; | ||
419 | |||
420 | return TRACE_TYPE_HANDLED; | ||
421 | } | ||
422 | |||
423 | struct tracer ksym_tracer __read_mostly = | ||
424 | { | ||
425 | .name = "ksym_tracer", | ||
426 | .init = ksym_trace_init, | ||
427 | .reset = ksym_trace_reset, | ||
428 | #ifdef CONFIG_FTRACE_SELFTEST | ||
429 | .selftest = trace_selftest_startup_ksym, | ||
430 | #endif | ||
431 | .print_header = ksym_trace_print_header, | ||
432 | .print_line = ksym_trace_output | ||
433 | }; | ||
434 | |||
435 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
436 | static int ksym_profile_show(struct seq_file *m, void *v) | ||
437 | { | ||
438 | struct hlist_node *node; | ||
439 | struct trace_ksym *entry; | ||
440 | int access_type = 0; | ||
441 | char fn_name[KSYM_NAME_LEN]; | ||
442 | |||
443 | seq_puts(m, " Access Type "); | ||
444 | seq_puts(m, " Symbol Counter\n"); | ||
445 | seq_puts(m, " ----------- "); | ||
446 | seq_puts(m, " ------ -------\n"); | ||
447 | |||
448 | rcu_read_lock(); | ||
449 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | ||
450 | |||
451 | access_type = entry->attr.bp_type; | ||
452 | |||
453 | switch (access_type) { | ||
454 | case HW_BREAKPOINT_R: | ||
455 | seq_puts(m, " R "); | ||
456 | break; | ||
457 | case HW_BREAKPOINT_W: | ||
458 | seq_puts(m, " W "); | ||
459 | break; | ||
460 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
461 | seq_puts(m, " RW "); | ||
462 | break; | ||
463 | default: | ||
464 | seq_puts(m, " NA "); | ||
465 | } | ||
466 | |||
467 | if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) | ||
468 | seq_printf(m, " %-36s", fn_name); | ||
469 | else | ||
470 | seq_printf(m, " %-36s", "<NA>"); | ||
471 | seq_printf(m, " %15llu\n", | ||
472 | (unsigned long long)atomic64_read(&entry->counter)); | ||
473 | } | ||
474 | rcu_read_unlock(); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | static int ksym_profile_open(struct inode *node, struct file *file) | ||
480 | { | ||
481 | return single_open(file, ksym_profile_show, NULL); | ||
482 | } | ||
483 | |||
484 | static const struct file_operations ksym_profile_fops = { | ||
485 | .open = ksym_profile_open, | ||
486 | .read = seq_read, | ||
487 | .llseek = seq_lseek, | ||
488 | .release = single_release, | ||
489 | }; | ||
490 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
491 | |||
492 | __init static int init_ksym_trace(void) | ||
493 | { | ||
494 | struct dentry *d_tracer; | ||
495 | |||
496 | d_tracer = tracing_init_dentry(); | ||
497 | |||
498 | trace_create_file("ksym_trace_filter", 0644, d_tracer, | ||
499 | NULL, &ksym_tracing_fops); | ||
500 | |||
501 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
502 | trace_create_file("ksym_profile", 0444, d_tracer, | ||
503 | NULL, &ksym_profile_fops); | ||
504 | #endif | ||
505 | |||
506 | return register_tracer(&ksym_tracer); | ||
507 | } | ||
508 | device_initcall(init_ksym_trace); | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 57c1b4596470..02272baa2206 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -16,9 +16,6 @@ | |||
16 | 16 | ||
17 | DECLARE_RWSEM(trace_event_mutex); | 17 | DECLARE_RWSEM(trace_event_mutex); |
18 | 18 | ||
19 | DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq); | ||
20 | EXPORT_PER_CPU_SYMBOL(ftrace_event_seq); | ||
21 | |||
22 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | 19 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; |
23 | 20 | ||
24 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 21 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
@@ -1069,65 +1066,6 @@ static struct trace_event trace_wake_event = { | |||
1069 | .funcs = &trace_wake_funcs, | 1066 | .funcs = &trace_wake_funcs, |
1070 | }; | 1067 | }; |
1071 | 1068 | ||
1072 | /* TRACE_SPECIAL */ | ||
1073 | static enum print_line_t trace_special_print(struct trace_iterator *iter, | ||
1074 | int flags, struct trace_event *event) | ||
1075 | { | ||
1076 | struct special_entry *field; | ||
1077 | |||
1078 | trace_assign_type(field, iter->ent); | ||
1079 | |||
1080 | if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n", | ||
1081 | field->arg1, | ||
1082 | field->arg2, | ||
1083 | field->arg3)) | ||
1084 | return TRACE_TYPE_PARTIAL_LINE; | ||
1085 | |||
1086 | return TRACE_TYPE_HANDLED; | ||
1087 | } | ||
1088 | |||
1089 | static enum print_line_t trace_special_hex(struct trace_iterator *iter, | ||
1090 | int flags, struct trace_event *event) | ||
1091 | { | ||
1092 | struct special_entry *field; | ||
1093 | struct trace_seq *s = &iter->seq; | ||
1094 | |||
1095 | trace_assign_type(field, iter->ent); | ||
1096 | |||
1097 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | ||
1098 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | ||
1099 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | ||
1100 | |||
1101 | return TRACE_TYPE_HANDLED; | ||
1102 | } | ||
1103 | |||
1104 | static enum print_line_t trace_special_bin(struct trace_iterator *iter, | ||
1105 | int flags, struct trace_event *event) | ||
1106 | { | ||
1107 | struct special_entry *field; | ||
1108 | struct trace_seq *s = &iter->seq; | ||
1109 | |||
1110 | trace_assign_type(field, iter->ent); | ||
1111 | |||
1112 | SEQ_PUT_FIELD_RET(s, field->arg1); | ||
1113 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
1114 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
1115 | |||
1116 | return TRACE_TYPE_HANDLED; | ||
1117 | } | ||
1118 | |||
1119 | static struct trace_event_functions trace_special_funcs = { | ||
1120 | .trace = trace_special_print, | ||
1121 | .raw = trace_special_print, | ||
1122 | .hex = trace_special_hex, | ||
1123 | .binary = trace_special_bin, | ||
1124 | }; | ||
1125 | |||
1126 | static struct trace_event trace_special_event = { | ||
1127 | .type = TRACE_SPECIAL, | ||
1128 | .funcs = &trace_special_funcs, | ||
1129 | }; | ||
1130 | |||
1131 | /* TRACE_STACK */ | 1069 | /* TRACE_STACK */ |
1132 | 1070 | ||
1133 | static enum print_line_t trace_stack_print(struct trace_iterator *iter, | 1071 | static enum print_line_t trace_stack_print(struct trace_iterator *iter, |
@@ -1161,9 +1099,6 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
1161 | 1099 | ||
1162 | static struct trace_event_functions trace_stack_funcs = { | 1100 | static struct trace_event_functions trace_stack_funcs = { |
1163 | .trace = trace_stack_print, | 1101 | .trace = trace_stack_print, |
1164 | .raw = trace_special_print, | ||
1165 | .hex = trace_special_hex, | ||
1166 | .binary = trace_special_bin, | ||
1167 | }; | 1102 | }; |
1168 | 1103 | ||
1169 | static struct trace_event trace_stack_event = { | 1104 | static struct trace_event trace_stack_event = { |
@@ -1194,9 +1129,6 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | |||
1194 | 1129 | ||
1195 | static struct trace_event_functions trace_user_stack_funcs = { | 1130 | static struct trace_event_functions trace_user_stack_funcs = { |
1196 | .trace = trace_user_stack_print, | 1131 | .trace = trace_user_stack_print, |
1197 | .raw = trace_special_print, | ||
1198 | .hex = trace_special_hex, | ||
1199 | .binary = trace_special_bin, | ||
1200 | }; | 1132 | }; |
1201 | 1133 | ||
1202 | static struct trace_event trace_user_stack_event = { | 1134 | static struct trace_event trace_user_stack_event = { |
@@ -1314,7 +1246,6 @@ static struct trace_event *events[] __initdata = { | |||
1314 | &trace_fn_event, | 1246 | &trace_fn_event, |
1315 | &trace_ctx_event, | 1247 | &trace_ctx_event, |
1316 | &trace_wake_event, | 1248 | &trace_wake_event, |
1317 | &trace_special_event, | ||
1318 | &trace_stack_event, | 1249 | &trace_stack_event, |
1319 | &trace_user_stack_event, | 1250 | &trace_user_stack_event, |
1320 | &trace_bprint_event, | 1251 | &trace_bprint_event, |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 0e73bc2ef8c5..4086eae6e81b 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -46,7 +46,6 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
46 | struct trace_array_cpu *data; | 46 | struct trace_array_cpu *data; |
47 | unsigned long flags; | 47 | unsigned long flags; |
48 | long disabled; | 48 | long disabled; |
49 | int resched; | ||
50 | int cpu; | 49 | int cpu; |
51 | int pc; | 50 | int pc; |
52 | 51 | ||
@@ -54,7 +53,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
54 | return; | 53 | return; |
55 | 54 | ||
56 | pc = preempt_count(); | 55 | pc = preempt_count(); |
57 | resched = ftrace_preempt_disable(); | 56 | preempt_disable_notrace(); |
58 | 57 | ||
59 | cpu = raw_smp_processor_id(); | 58 | cpu = raw_smp_processor_id(); |
60 | if (cpu != wakeup_current_cpu) | 59 | if (cpu != wakeup_current_cpu) |
@@ -74,7 +73,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
74 | out: | 73 | out: |
75 | atomic_dec(&data->disabled); | 74 | atomic_dec(&data->disabled); |
76 | out_enable: | 75 | out_enable: |
77 | ftrace_preempt_enable(resched); | 76 | preempt_enable_notrace(); |
78 | } | 77 | } |
79 | 78 | ||
80 | static struct ftrace_ops trace_ops __read_mostly = | 79 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -383,6 +382,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
383 | #ifdef CONFIG_FTRACE_SELFTEST | 382 | #ifdef CONFIG_FTRACE_SELFTEST |
384 | .selftest = trace_selftest_startup_wakeup, | 383 | .selftest = trace_selftest_startup_wakeup, |
385 | #endif | 384 | #endif |
385 | .use_max_tr = 1, | ||
386 | }; | 386 | }; |
387 | 387 | ||
388 | static struct tracer wakeup_rt_tracer __read_mostly = | 388 | static struct tracer wakeup_rt_tracer __read_mostly = |
@@ -397,6 +397,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
397 | #ifdef CONFIG_FTRACE_SELFTEST | 397 | #ifdef CONFIG_FTRACE_SELFTEST |
398 | .selftest = trace_selftest_startup_wakeup, | 398 | .selftest = trace_selftest_startup_wakeup, |
399 | #endif | 399 | #endif |
400 | .use_max_tr = 1, | ||
400 | }; | 401 | }; |
401 | 402 | ||
402 | __init static int init_wakeup_tracer(void) | 403 | __init static int init_wakeup_tracer(void) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 250e7f9bd2f0..155a415b3209 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -13,11 +13,9 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
13 | case TRACE_WAKE: | 13 | case TRACE_WAKE: |
14 | case TRACE_STACK: | 14 | case TRACE_STACK: |
15 | case TRACE_PRINT: | 15 | case TRACE_PRINT: |
16 | case TRACE_SPECIAL: | ||
17 | case TRACE_BRANCH: | 16 | case TRACE_BRANCH: |
18 | case TRACE_GRAPH_ENT: | 17 | case TRACE_GRAPH_ENT: |
19 | case TRACE_GRAPH_RET: | 18 | case TRACE_GRAPH_RET: |
20 | case TRACE_KSYM: | ||
21 | return 1; | 19 | return 1; |
22 | } | 20 | } |
23 | return 0; | 21 | return 0; |
@@ -691,38 +689,6 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
691 | } | 689 | } |
692 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 690 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
693 | 691 | ||
694 | #ifdef CONFIG_SYSPROF_TRACER | ||
695 | int | ||
696 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | ||
697 | { | ||
698 | unsigned long count; | ||
699 | int ret; | ||
700 | |||
701 | /* start the tracing */ | ||
702 | ret = tracer_init(trace, tr); | ||
703 | if (ret) { | ||
704 | warn_failed_init_tracer(trace, ret); | ||
705 | return ret; | ||
706 | } | ||
707 | |||
708 | /* Sleep for a 1/10 of a second */ | ||
709 | msleep(100); | ||
710 | /* stop the tracing. */ | ||
711 | tracing_stop(); | ||
712 | /* check the trace buffer */ | ||
713 | ret = trace_test_buffer(tr, &count); | ||
714 | trace->reset(tr); | ||
715 | tracing_start(); | ||
716 | |||
717 | if (!ret && !count) { | ||
718 | printk(KERN_CONT ".. no entries found .."); | ||
719 | ret = -1; | ||
720 | } | ||
721 | |||
722 | return ret; | ||
723 | } | ||
724 | #endif /* CONFIG_SYSPROF_TRACER */ | ||
725 | |||
726 | #ifdef CONFIG_BRANCH_TRACER | 692 | #ifdef CONFIG_BRANCH_TRACER |
727 | int | 693 | int |
728 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 694 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) |
@@ -755,56 +721,3 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
755 | } | 721 | } |
756 | #endif /* CONFIG_BRANCH_TRACER */ | 722 | #endif /* CONFIG_BRANCH_TRACER */ |
757 | 723 | ||
758 | #ifdef CONFIG_KSYM_TRACER | ||
759 | static int ksym_selftest_dummy; | ||
760 | |||
761 | int | ||
762 | trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) | ||
763 | { | ||
764 | unsigned long count; | ||
765 | int ret; | ||
766 | |||
767 | /* start the tracing */ | ||
768 | ret = tracer_init(trace, tr); | ||
769 | if (ret) { | ||
770 | warn_failed_init_tracer(trace, ret); | ||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | ksym_selftest_dummy = 0; | ||
775 | /* Register the read-write tracing request */ | ||
776 | |||
777 | ret = process_new_ksym_entry("ksym_selftest_dummy", | ||
778 | HW_BREAKPOINT_R | HW_BREAKPOINT_W, | ||
779 | (unsigned long)(&ksym_selftest_dummy)); | ||
780 | |||
781 | if (ret < 0) { | ||
782 | printk(KERN_CONT "ksym_trace read-write startup test failed\n"); | ||
783 | goto ret_path; | ||
784 | } | ||
785 | /* Perform a read and a write operation over the dummy variable to | ||
786 | * trigger the tracer | ||
787 | */ | ||
788 | if (ksym_selftest_dummy == 0) | ||
789 | ksym_selftest_dummy++; | ||
790 | |||
791 | /* stop the tracing. */ | ||
792 | tracing_stop(); | ||
793 | /* check the trace buffer */ | ||
794 | ret = trace_test_buffer(tr, &count); | ||
795 | trace->reset(tr); | ||
796 | tracing_start(); | ||
797 | |||
798 | /* read & write operations - one each is performed on the dummy variable | ||
799 | * triggering two entries in the trace buffer | ||
800 | */ | ||
801 | if (!ret && count != 2) { | ||
802 | printk(KERN_CONT "Ksym tracer startup test failed"); | ||
803 | ret = -1; | ||
804 | } | ||
805 | |||
806 | ret_path: | ||
807 | return ret; | ||
808 | } | ||
809 | #endif /* CONFIG_KSYM_TRACER */ | ||
810 | |||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index f4bc9b27de5f..056468eae7cf 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -110,12 +110,12 @@ static inline void check_stack(void) | |||
110 | static void | 110 | static void |
111 | stack_trace_call(unsigned long ip, unsigned long parent_ip) | 111 | stack_trace_call(unsigned long ip, unsigned long parent_ip) |
112 | { | 112 | { |
113 | int cpu, resched; | 113 | int cpu; |
114 | 114 | ||
115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
116 | return; | 116 | return; |
117 | 117 | ||
118 | resched = ftrace_preempt_disable(); | 118 | preempt_disable_notrace(); |
119 | 119 | ||
120 | cpu = raw_smp_processor_id(); | 120 | cpu = raw_smp_processor_id(); |
121 | /* no atomic needed, we only modify this variable by this cpu */ | 121 | /* no atomic needed, we only modify this variable by this cpu */ |
@@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
127 | out: | 127 | out: |
128 | per_cpu(trace_active, cpu)--; | 128 | per_cpu(trace_active, cpu)--; |
129 | /* prevent recursion in schedule */ | 129 | /* prevent recursion in schedule */ |
130 | ftrace_preempt_enable(resched); | 130 | preempt_enable_notrace(); |
131 | } | 131 | } |
132 | 132 | ||
133 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 34e35804304b..bac752f0cfb5 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -23,6 +23,9 @@ static int syscall_exit_register(struct ftrace_event_call *event, | |||
23 | static int syscall_enter_define_fields(struct ftrace_event_call *call); | 23 | static int syscall_enter_define_fields(struct ftrace_event_call *call); |
24 | static int syscall_exit_define_fields(struct ftrace_event_call *call); | 24 | static int syscall_exit_define_fields(struct ftrace_event_call *call); |
25 | 25 | ||
26 | /* All syscall exit events have the same fields */ | ||
27 | static LIST_HEAD(syscall_exit_fields); | ||
28 | |||
26 | static struct list_head * | 29 | static struct list_head * |
27 | syscall_get_enter_fields(struct ftrace_event_call *call) | 30 | syscall_get_enter_fields(struct ftrace_event_call *call) |
28 | { | 31 | { |
@@ -34,9 +37,7 @@ syscall_get_enter_fields(struct ftrace_event_call *call) | |||
34 | static struct list_head * | 37 | static struct list_head * |
35 | syscall_get_exit_fields(struct ftrace_event_call *call) | 38 | syscall_get_exit_fields(struct ftrace_event_call *call) |
36 | { | 39 | { |
37 | struct syscall_metadata *entry = call->data; | 40 | return &syscall_exit_fields; |
38 | |||
39 | return &entry->exit_fields; | ||
40 | } | 41 | } |
41 | 42 | ||
42 | struct trace_event_functions enter_syscall_print_funcs = { | 43 | struct trace_event_functions enter_syscall_print_funcs = { |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c deleted file mode 100644 index a7974a552ca9..000000000000 --- a/kernel/trace/trace_sysprof.c +++ /dev/null | |||
@@ -1,329 +0,0 @@ | |||
1 | /* | ||
2 | * trace stack traces | ||
3 | * | ||
4 | * Copyright (C) 2004-2008, Soeren Sandmann | ||
5 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | ||
6 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | ||
7 | */ | ||
8 | #include <linux/kallsyms.h> | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/hrtimer.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/ftrace.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/fs.h> | ||
16 | |||
17 | #include <asm/stacktrace.h> | ||
18 | |||
19 | #include "trace.h" | ||
20 | |||
21 | static struct trace_array *sysprof_trace; | ||
22 | static int __read_mostly tracer_enabled; | ||
23 | |||
24 | /* | ||
25 | * 1 msec sample interval by default: | ||
26 | */ | ||
27 | static unsigned long sample_period = 1000000; | ||
28 | static const unsigned int sample_max_depth = 512; | ||
29 | |||
30 | static DEFINE_MUTEX(sample_timer_lock); | ||
31 | /* | ||
32 | * Per CPU hrtimers that do the profiling: | ||
33 | */ | ||
34 | static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer); | ||
35 | |||
36 | struct stack_frame { | ||
37 | const void __user *next_fp; | ||
38 | unsigned long return_address; | ||
39 | }; | ||
40 | |||
41 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) | ||
42 | { | ||
43 | int ret; | ||
44 | |||
45 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) | ||
46 | return 0; | ||
47 | |||
48 | ret = 1; | ||
49 | pagefault_disable(); | ||
50 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | ||
51 | ret = 0; | ||
52 | pagefault_enable(); | ||
53 | |||
54 | return ret; | ||
55 | } | ||
56 | |||
57 | struct backtrace_info { | ||
58 | struct trace_array_cpu *data; | ||
59 | struct trace_array *tr; | ||
60 | int pos; | ||
61 | }; | ||
62 | |||
63 | static void | ||
64 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
65 | { | ||
66 | /* Ignore warnings */ | ||
67 | } | ||
68 | |||
69 | static void backtrace_warning(void *data, char *msg) | ||
70 | { | ||
71 | /* Ignore warnings */ | ||
72 | } | ||
73 | |||
74 | static int backtrace_stack(void *data, char *name) | ||
75 | { | ||
76 | /* Don't bother with IRQ stacks for now */ | ||
77 | return -1; | ||
78 | } | ||
79 | |||
80 | static void backtrace_address(void *data, unsigned long addr, int reliable) | ||
81 | { | ||
82 | struct backtrace_info *info = data; | ||
83 | |||
84 | if (info->pos < sample_max_depth && reliable) { | ||
85 | __trace_special(info->tr, info->data, 1, addr, 0); | ||
86 | |||
87 | info->pos++; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | static const struct stacktrace_ops backtrace_ops = { | ||
92 | .warning = backtrace_warning, | ||
93 | .warning_symbol = backtrace_warning_symbol, | ||
94 | .stack = backtrace_stack, | ||
95 | .address = backtrace_address, | ||
96 | .walk_stack = print_context_stack, | ||
97 | }; | ||
98 | |||
99 | static int | ||
100 | trace_kernel(struct pt_regs *regs, struct trace_array *tr, | ||
101 | struct trace_array_cpu *data) | ||
102 | { | ||
103 | struct backtrace_info info; | ||
104 | unsigned long bp; | ||
105 | char *stack; | ||
106 | |||
107 | info.tr = tr; | ||
108 | info.data = data; | ||
109 | info.pos = 1; | ||
110 | |||
111 | __trace_special(info.tr, info.data, 1, regs->ip, 0); | ||
112 | |||
113 | stack = ((char *)regs + sizeof(struct pt_regs)); | ||
114 | #ifdef CONFIG_FRAME_POINTER | ||
115 | bp = regs->bp; | ||
116 | #else | ||
117 | bp = 0; | ||
118 | #endif | ||
119 | |||
120 | dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info); | ||
121 | |||
122 | return info.pos; | ||
123 | } | ||
124 | |||
125 | static void timer_notify(struct pt_regs *regs, int cpu) | ||
126 | { | ||
127 | struct trace_array_cpu *data; | ||
128 | struct stack_frame frame; | ||
129 | struct trace_array *tr; | ||
130 | const void __user *fp; | ||
131 | int is_user; | ||
132 | int i; | ||
133 | |||
134 | if (!regs) | ||
135 | return; | ||
136 | |||
137 | tr = sysprof_trace; | ||
138 | data = tr->data[cpu]; | ||
139 | is_user = user_mode(regs); | ||
140 | |||
141 | if (!current || current->pid == 0) | ||
142 | return; | ||
143 | |||
144 | if (is_user && current->state != TASK_RUNNING) | ||
145 | return; | ||
146 | |||
147 | __trace_special(tr, data, 0, 0, current->pid); | ||
148 | |||
149 | if (!is_user) | ||
150 | i = trace_kernel(regs, tr, data); | ||
151 | else | ||
152 | i = 0; | ||
153 | |||
154 | /* | ||
155 | * Trace user stack if we are not a kernel thread | ||
156 | */ | ||
157 | if (current->mm && i < sample_max_depth) { | ||
158 | regs = (struct pt_regs *)current->thread.sp0 - 1; | ||
159 | |||
160 | fp = (void __user *)regs->bp; | ||
161 | |||
162 | __trace_special(tr, data, 2, regs->ip, 0); | ||
163 | |||
164 | while (i < sample_max_depth) { | ||
165 | frame.next_fp = NULL; | ||
166 | frame.return_address = 0; | ||
167 | if (!copy_stack_frame(fp, &frame)) | ||
168 | break; | ||
169 | if ((unsigned long)fp < regs->sp) | ||
170 | break; | ||
171 | |||
172 | __trace_special(tr, data, 2, frame.return_address, | ||
173 | (unsigned long)fp); | ||
174 | fp = frame.next_fp; | ||
175 | |||
176 | i++; | ||
177 | } | ||
178 | |||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Special trace entry if we overflow the max depth: | ||
183 | */ | ||
184 | if (i == sample_max_depth) | ||
185 | __trace_special(tr, data, -1, -1, -1); | ||
186 | |||
187 | __trace_special(tr, data, 3, current->pid, i); | ||
188 | } | ||
189 | |||
190 | static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) | ||
191 | { | ||
192 | /* trace here */ | ||
193 | timer_notify(get_irq_regs(), smp_processor_id()); | ||
194 | |||
195 | hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); | ||
196 | |||
197 | return HRTIMER_RESTART; | ||
198 | } | ||
199 | |||
200 | static void start_stack_timer(void *unused) | ||
201 | { | ||
202 | struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); | ||
203 | |||
204 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
205 | hrtimer->function = stack_trace_timer_fn; | ||
206 | |||
207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), | ||
208 | HRTIMER_MODE_REL_PINNED); | ||
209 | } | ||
210 | |||
211 | static void start_stack_timers(void) | ||
212 | { | ||
213 | on_each_cpu(start_stack_timer, NULL, 1); | ||
214 | } | ||
215 | |||
216 | static void stop_stack_timer(int cpu) | ||
217 | { | ||
218 | struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); | ||
219 | |||
220 | hrtimer_cancel(hrtimer); | ||
221 | } | ||
222 | |||
223 | static void stop_stack_timers(void) | ||
224 | { | ||
225 | int cpu; | ||
226 | |||
227 | for_each_online_cpu(cpu) | ||
228 | stop_stack_timer(cpu); | ||
229 | } | ||
230 | |||
231 | static void stop_stack_trace(struct trace_array *tr) | ||
232 | { | ||
233 | mutex_lock(&sample_timer_lock); | ||
234 | stop_stack_timers(); | ||
235 | tracer_enabled = 0; | ||
236 | mutex_unlock(&sample_timer_lock); | ||
237 | } | ||
238 | |||
239 | static int stack_trace_init(struct trace_array *tr) | ||
240 | { | ||
241 | sysprof_trace = tr; | ||
242 | |||
243 | tracing_start_cmdline_record(); | ||
244 | |||
245 | mutex_lock(&sample_timer_lock); | ||
246 | start_stack_timers(); | ||
247 | tracer_enabled = 1; | ||
248 | mutex_unlock(&sample_timer_lock); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static void stack_trace_reset(struct trace_array *tr) | ||
253 | { | ||
254 | tracing_stop_cmdline_record(); | ||
255 | stop_stack_trace(tr); | ||
256 | } | ||
257 | |||
258 | static struct tracer stack_trace __read_mostly = | ||
259 | { | ||
260 | .name = "sysprof", | ||
261 | .init = stack_trace_init, | ||
262 | .reset = stack_trace_reset, | ||
263 | #ifdef CONFIG_FTRACE_SELFTEST | ||
264 | .selftest = trace_selftest_startup_sysprof, | ||
265 | #endif | ||
266 | }; | ||
267 | |||
268 | __init static int init_stack_trace(void) | ||
269 | { | ||
270 | return register_tracer(&stack_trace); | ||
271 | } | ||
272 | device_initcall(init_stack_trace); | ||
273 | |||
274 | #define MAX_LONG_DIGITS 22 | ||
275 | |||
276 | static ssize_t | ||
277 | sysprof_sample_read(struct file *filp, char __user *ubuf, | ||
278 | size_t cnt, loff_t *ppos) | ||
279 | { | ||
280 | char buf[MAX_LONG_DIGITS]; | ||
281 | int r; | ||
282 | |||
283 | r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period)); | ||
284 | |||
285 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
286 | } | ||
287 | |||
288 | static ssize_t | ||
289 | sysprof_sample_write(struct file *filp, const char __user *ubuf, | ||
290 | size_t cnt, loff_t *ppos) | ||
291 | { | ||
292 | char buf[MAX_LONG_DIGITS]; | ||
293 | unsigned long val; | ||
294 | |||
295 | if (cnt > MAX_LONG_DIGITS-1) | ||
296 | cnt = MAX_LONG_DIGITS-1; | ||
297 | |||
298 | if (copy_from_user(&buf, ubuf, cnt)) | ||
299 | return -EFAULT; | ||
300 | |||
301 | buf[cnt] = 0; | ||
302 | |||
303 | val = simple_strtoul(buf, NULL, 10); | ||
304 | /* | ||
305 | * Enforce a minimum sample period of 100 usecs: | ||
306 | */ | ||
307 | if (val < 100) | ||
308 | val = 100; | ||
309 | |||
310 | mutex_lock(&sample_timer_lock); | ||
311 | stop_stack_timers(); | ||
312 | sample_period = val * 1000; | ||
313 | start_stack_timers(); | ||
314 | mutex_unlock(&sample_timer_lock); | ||
315 | |||
316 | return cnt; | ||
317 | } | ||
318 | |||
319 | static const struct file_operations sysprof_sample_fops = { | ||
320 | .read = sysprof_sample_read, | ||
321 | .write = sysprof_sample_write, | ||
322 | }; | ||
323 | |||
324 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer) | ||
325 | { | ||
326 | |||
327 | trace_create_file("sysprof_sample_period", 0644, | ||
328 | d_tracer, NULL, &sysprof_sample_fops); | ||
329 | } | ||