diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 378 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 21 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/trace.h | 18 | ||||
-rw-r--r-- | kernel/trace/trace_ksym.c | 550 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 53 |
7 files changed, 1022 insertions, 0 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 2093a691f1c2..52508612a08f 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -99,6 +99,7 @@ obj-$(CONFIG_X86_DS) += trace/ | |||
99 | obj-$(CONFIG_RING_BUFFER) += trace/ | 99 | obj-$(CONFIG_RING_BUFFER) += trace/ |
100 | obj-$(CONFIG_SMP) += sched_cpupri.o | 100 | obj-$(CONFIG_SMP) += sched_cpupri.o |
101 | obj-$(CONFIG_SLOW_WORK) += slow-work.o | 101 | obj-$(CONFIG_SLOW_WORK) += slow-work.o |
102 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | ||
102 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o | 103 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o |
103 | 104 | ||
104 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 105 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..c1f64e65a9f3 --- /dev/null +++ b/kernel/hw_breakpoint.c | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2007 Alan Stern | ||
17 | * Copyright (C) IBM Corporation, 2009 | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | ||
22 | * using the CPU's debug registers. | ||
23 | * This file contains the arch-independent routines. | ||
24 | */ | ||
25 | |||
26 | #include <linux/irqflags.h> | ||
27 | #include <linux/kallsyms.h> | ||
28 | #include <linux/notifier.h> | ||
29 | #include <linux/kprobes.h> | ||
30 | #include <linux/kdebug.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/percpu.h> | ||
34 | #include <linux/sched.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/smp.h> | ||
37 | |||
38 | #include <asm/hw_breakpoint.h> | ||
39 | #include <asm/processor.h> | ||
40 | |||
41 | #ifdef CONFIG_X86 | ||
42 | #include <asm/debugreg.h> | ||
43 | #endif | ||
44 | /* | ||
45 | * Spinlock that protects all (un)register operations over kernel/user-space | ||
46 | * breakpoint requests | ||
47 | */ | ||
48 | static DEFINE_SPINLOCK(hw_breakpoint_lock); | ||
49 | |||
50 | /* Array of kernel-space breakpoint structures */ | ||
51 | struct hw_breakpoint *hbp_kernel[HBP_NUM]; | ||
52 | |||
53 | /* | ||
54 | * Per-processor copy of hbp_kernel[]. Used only when hbp_kernel is being | ||
55 | * modified but we need the older copy to handle any hbp exceptions. It will | ||
56 | * sync with hbp_kernel[] value after updation is done through IPIs. | ||
57 | */ | ||
58 | DEFINE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); | ||
59 | |||
60 | /* | ||
61 | * Kernel breakpoints grow downwards, starting from HBP_NUM | ||
62 | * 'hbp_kernel_pos' denotes lowest numbered breakpoint register occupied for | ||
63 | * kernel-space request. We will initialise it here and not in an __init | ||
64 | * routine because load_debug_registers(), which uses this variable can be | ||
65 | * called very early during CPU initialisation. | ||
66 | */ | ||
67 | unsigned int hbp_kernel_pos = HBP_NUM; | ||
68 | |||
69 | /* | ||
70 | * An array containing refcount of threads using a given bkpt register | ||
71 | * Accesses are synchronised by acquiring hw_breakpoint_lock | ||
72 | */ | ||
73 | unsigned int hbp_user_refcount[HBP_NUM]; | ||
74 | |||
75 | /* | ||
76 | * Load the debug registers during startup of a CPU. | ||
77 | */ | ||
78 | void load_debug_registers(void) | ||
79 | { | ||
80 | unsigned long flags; | ||
81 | struct task_struct *tsk = current; | ||
82 | |||
83 | spin_lock_bh(&hw_breakpoint_lock); | ||
84 | |||
85 | /* Prevent IPIs for new kernel breakpoint updates */ | ||
86 | local_irq_save(flags); | ||
87 | arch_update_kernel_hw_breakpoint(NULL); | ||
88 | local_irq_restore(flags); | ||
89 | |||
90 | if (test_tsk_thread_flag(tsk, TIF_DEBUG)) | ||
91 | arch_install_thread_hw_breakpoint(tsk); | ||
92 | |||
93 | spin_unlock_bh(&hw_breakpoint_lock); | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Erase all the hardware breakpoint info associated with a thread. | ||
98 | * | ||
99 | * If tsk != current then tsk must not be usable (for example, a | ||
100 | * child being cleaned up from a failed fork). | ||
101 | */ | ||
102 | void flush_thread_hw_breakpoint(struct task_struct *tsk) | ||
103 | { | ||
104 | int i; | ||
105 | struct thread_struct *thread = &(tsk->thread); | ||
106 | |||
107 | spin_lock_bh(&hw_breakpoint_lock); | ||
108 | |||
109 | /* The thread no longer has any breakpoints associated with it */ | ||
110 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
111 | for (i = 0; i < HBP_NUM; i++) { | ||
112 | if (thread->hbp[i]) { | ||
113 | hbp_user_refcount[i]--; | ||
114 | kfree(thread->hbp[i]); | ||
115 | thread->hbp[i] = NULL; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | arch_flush_thread_hw_breakpoint(tsk); | ||
120 | |||
121 | /* Actually uninstall the breakpoints if necessary */ | ||
122 | if (tsk == current) | ||
123 | arch_uninstall_thread_hw_breakpoint(); | ||
124 | spin_unlock_bh(&hw_breakpoint_lock); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Copy the hardware breakpoint info from a thread to its cloned child. | ||
129 | */ | ||
130 | int copy_thread_hw_breakpoint(struct task_struct *tsk, | ||
131 | struct task_struct *child, unsigned long clone_flags) | ||
132 | { | ||
133 | /* | ||
134 | * We will assume that breakpoint settings are not inherited | ||
135 | * and the child starts out with no debug registers set. | ||
136 | * But what about CLONE_PTRACE? | ||
137 | */ | ||
138 | clear_tsk_thread_flag(child, TIF_DEBUG); | ||
139 | |||
140 | /* We will call flush routine since the debugregs are not inherited */ | ||
141 | arch_flush_thread_hw_breakpoint(child); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int __register_user_hw_breakpoint(int pos, struct task_struct *tsk, | ||
147 | struct hw_breakpoint *bp) | ||
148 | { | ||
149 | struct thread_struct *thread = &(tsk->thread); | ||
150 | int rc; | ||
151 | |||
152 | /* Do not overcommit. Fail if kernel has used the hbp registers */ | ||
153 | if (pos >= hbp_kernel_pos) | ||
154 | return -ENOSPC; | ||
155 | |||
156 | rc = arch_validate_hwbkpt_settings(bp, tsk); | ||
157 | if (rc) | ||
158 | return rc; | ||
159 | |||
160 | thread->hbp[pos] = bp; | ||
161 | hbp_user_refcount[pos]++; | ||
162 | |||
163 | arch_update_user_hw_breakpoint(pos, tsk); | ||
164 | /* | ||
165 | * Does it need to be installed right now? | ||
166 | * Otherwise it will get installed the next time tsk runs | ||
167 | */ | ||
168 | if (tsk == current) | ||
169 | arch_install_thread_hw_breakpoint(tsk); | ||
170 | |||
171 | return rc; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Modify the address of a hbp register already in use by the task | ||
176 | * Do not invoke this in-lieu of a __unregister_user_hw_breakpoint() | ||
177 | */ | ||
178 | static int __modify_user_hw_breakpoint(int pos, struct task_struct *tsk, | ||
179 | struct hw_breakpoint *bp) | ||
180 | { | ||
181 | struct thread_struct *thread = &(tsk->thread); | ||
182 | |||
183 | if ((pos >= hbp_kernel_pos) || (arch_validate_hwbkpt_settings(bp, tsk))) | ||
184 | return -EINVAL; | ||
185 | |||
186 | if (thread->hbp[pos] == NULL) | ||
187 | return -EINVAL; | ||
188 | |||
189 | thread->hbp[pos] = bp; | ||
190 | /* | ||
191 | * 'pos' must be that of a hbp register already used by 'tsk' | ||
192 | * Otherwise arch_modify_user_hw_breakpoint() will fail | ||
193 | */ | ||
194 | arch_update_user_hw_breakpoint(pos, tsk); | ||
195 | |||
196 | if (tsk == current) | ||
197 | arch_install_thread_hw_breakpoint(tsk); | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | static void __unregister_user_hw_breakpoint(int pos, struct task_struct *tsk) | ||
203 | { | ||
204 | hbp_user_refcount[pos]--; | ||
205 | tsk->thread.hbp[pos] = NULL; | ||
206 | |||
207 | arch_update_user_hw_breakpoint(pos, tsk); | ||
208 | |||
209 | if (tsk == current) | ||
210 | arch_install_thread_hw_breakpoint(tsk); | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | ||
215 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
216 | * @bp: the breakpoint structure to register | ||
217 | * | ||
218 | * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and | ||
219 | * @bp->triggered must be set properly before invocation | ||
220 | * | ||
221 | */ | ||
222 | int register_user_hw_breakpoint(struct task_struct *tsk, | ||
223 | struct hw_breakpoint *bp) | ||
224 | { | ||
225 | struct thread_struct *thread = &(tsk->thread); | ||
226 | int i, rc = -ENOSPC; | ||
227 | |||
228 | spin_lock_bh(&hw_breakpoint_lock); | ||
229 | |||
230 | for (i = 0; i < hbp_kernel_pos; i++) { | ||
231 | if (!thread->hbp[i]) { | ||
232 | rc = __register_user_hw_breakpoint(i, tsk, bp); | ||
233 | break; | ||
234 | } | ||
235 | } | ||
236 | if (!rc) | ||
237 | set_tsk_thread_flag(tsk, TIF_DEBUG); | ||
238 | |||
239 | spin_unlock_bh(&hw_breakpoint_lock); | ||
240 | return rc; | ||
241 | } | ||
242 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | ||
243 | |||
244 | /** | ||
245 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | ||
246 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
247 | * @bp: the breakpoint structure to unregister | ||
248 | * | ||
249 | */ | ||
250 | int modify_user_hw_breakpoint(struct task_struct *tsk, struct hw_breakpoint *bp) | ||
251 | { | ||
252 | struct thread_struct *thread = &(tsk->thread); | ||
253 | int i, ret = -ENOENT; | ||
254 | |||
255 | spin_lock_bh(&hw_breakpoint_lock); | ||
256 | for (i = 0; i < hbp_kernel_pos; i++) { | ||
257 | if (bp == thread->hbp[i]) { | ||
258 | ret = __modify_user_hw_breakpoint(i, tsk, bp); | ||
259 | break; | ||
260 | } | ||
261 | } | ||
262 | spin_unlock_bh(&hw_breakpoint_lock); | ||
263 | return ret; | ||
264 | } | ||
265 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | ||
266 | |||
267 | /** | ||
268 | * unregister_user_hw_breakpoint - unregister a user-space hardware breakpoint | ||
269 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
270 | * @bp: the breakpoint structure to unregister | ||
271 | * | ||
272 | */ | ||
273 | void unregister_user_hw_breakpoint(struct task_struct *tsk, | ||
274 | struct hw_breakpoint *bp) | ||
275 | { | ||
276 | struct thread_struct *thread = &(tsk->thread); | ||
277 | int i, pos = -1, hbp_counter = 0; | ||
278 | |||
279 | spin_lock_bh(&hw_breakpoint_lock); | ||
280 | for (i = 0; i < hbp_kernel_pos; i++) { | ||
281 | if (thread->hbp[i]) | ||
282 | hbp_counter++; | ||
283 | if (bp == thread->hbp[i]) | ||
284 | pos = i; | ||
285 | } | ||
286 | if (pos >= 0) { | ||
287 | __unregister_user_hw_breakpoint(pos, tsk); | ||
288 | hbp_counter--; | ||
289 | } | ||
290 | if (!hbp_counter) | ||
291 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
292 | |||
293 | spin_unlock_bh(&hw_breakpoint_lock); | ||
294 | } | ||
295 | EXPORT_SYMBOL_GPL(unregister_user_hw_breakpoint); | ||
296 | |||
297 | /** | ||
298 | * register_kernel_hw_breakpoint - register a hardware breakpoint for kernel space | ||
299 | * @bp: the breakpoint structure to register | ||
300 | * | ||
301 | * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and | ||
302 | * @bp->triggered must be set properly before invocation | ||
303 | * | ||
304 | */ | ||
305 | int register_kernel_hw_breakpoint(struct hw_breakpoint *bp) | ||
306 | { | ||
307 | int rc; | ||
308 | |||
309 | rc = arch_validate_hwbkpt_settings(bp, NULL); | ||
310 | if (rc) | ||
311 | return rc; | ||
312 | |||
313 | spin_lock_bh(&hw_breakpoint_lock); | ||
314 | |||
315 | rc = -ENOSPC; | ||
316 | /* Check if we are over-committing */ | ||
317 | if ((hbp_kernel_pos > 0) && (!hbp_user_refcount[hbp_kernel_pos-1])) { | ||
318 | hbp_kernel_pos--; | ||
319 | hbp_kernel[hbp_kernel_pos] = bp; | ||
320 | on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); | ||
321 | rc = 0; | ||
322 | } | ||
323 | |||
324 | spin_unlock_bh(&hw_breakpoint_lock); | ||
325 | return rc; | ||
326 | } | ||
327 | EXPORT_SYMBOL_GPL(register_kernel_hw_breakpoint); | ||
328 | |||
329 | /** | ||
330 | * unregister_kernel_hw_breakpoint - unregister a HW breakpoint for kernel space | ||
331 | * @bp: the breakpoint structure to unregister | ||
332 | * | ||
333 | * Uninstalls and unregisters @bp. | ||
334 | */ | ||
335 | void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp) | ||
336 | { | ||
337 | int i, j; | ||
338 | |||
339 | spin_lock_bh(&hw_breakpoint_lock); | ||
340 | |||
341 | /* Find the 'bp' in our list of breakpoints for kernel */ | ||
342 | for (i = hbp_kernel_pos; i < HBP_NUM; i++) | ||
343 | if (bp == hbp_kernel[i]) | ||
344 | break; | ||
345 | |||
346 | /* Check if we did not find a match for 'bp'. If so return early */ | ||
347 | if (i == HBP_NUM) { | ||
348 | spin_unlock_bh(&hw_breakpoint_lock); | ||
349 | return; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * We'll shift the breakpoints one-level above to compact if | ||
354 | * unregistration creates a hole | ||
355 | */ | ||
356 | for (j = i; j > hbp_kernel_pos; j--) | ||
357 | hbp_kernel[j] = hbp_kernel[j-1]; | ||
358 | |||
359 | hbp_kernel[hbp_kernel_pos] = NULL; | ||
360 | on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); | ||
361 | hbp_kernel_pos++; | ||
362 | |||
363 | spin_unlock_bh(&hw_breakpoint_lock); | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(unregister_kernel_hw_breakpoint); | ||
366 | |||
367 | static struct notifier_block hw_breakpoint_exceptions_nb = { | ||
368 | .notifier_call = hw_breakpoint_exceptions_notify, | ||
369 | /* we need to be notified first */ | ||
370 | .priority = 0x7fffffff | ||
371 | }; | ||
372 | |||
373 | static int __init init_hw_breakpoint(void) | ||
374 | { | ||
375 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | ||
376 | } | ||
377 | |||
378 | core_initcall(init_hw_breakpoint); | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1ea0d1234f4a..5efeb4229ea0 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -329,6 +329,27 @@ config POWER_TRACER | |||
329 | power management decisions, specifically the C-state and P-state | 329 | power management decisions, specifically the C-state and P-state |
330 | behavior. | 330 | behavior. |
331 | 331 | ||
332 | config KSYM_TRACER | ||
333 | bool "Trace read and write access on kernel memory locations" | ||
334 | depends on HAVE_HW_BREAKPOINT | ||
335 | select TRACING | ||
336 | help | ||
337 | This tracer helps find read and write operations on any given kernel | ||
338 | symbol i.e. /proc/kallsyms. | ||
339 | |||
340 | config PROFILE_KSYM_TRACER | ||
341 | bool "Profile all kernel memory accesses on 'watched' variables" | ||
342 | depends on KSYM_TRACER | ||
343 | help | ||
344 | This tracer profiles kernel accesses on variables watched through the | ||
345 | ksym tracer ftrace plugin. Depending upon the hardware, all read | ||
346 | and write operations on kernel variables can be monitored for | ||
347 | accesses. | ||
348 | |||
349 | The results will be displayed in: | ||
350 | /debugfs/tracing/profile_ksym | ||
351 | |||
352 | Say N if unsure. | ||
332 | 353 | ||
333 | config STACK_TRACER | 354 | config STACK_TRACER |
334 | bool "Trace max stack" | 355 | bool "Trace max stack" |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 844164dca90a..ce3b1cd02732 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -54,5 +54,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o | |||
54 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 54 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
55 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o | 55 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o |
56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
57 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o | ||
57 | 58 | ||
58 | libftrace-y := ftrace.o | 59 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index fa1dccb579d5..ea7e0bcbd539 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -15,6 +15,10 @@ | |||
15 | #include <linux/trace_seq.h> | 15 | #include <linux/trace_seq.h> |
16 | #include <linux/ftrace_event.h> | 16 | #include <linux/ftrace_event.h> |
17 | 17 | ||
18 | #ifdef CONFIG_KSYM_TRACER | ||
19 | #include <asm/hw_breakpoint.h> | ||
20 | #endif | ||
21 | |||
18 | enum trace_type { | 22 | enum trace_type { |
19 | __TRACE_FIRST_TYPE = 0, | 23 | __TRACE_FIRST_TYPE = 0, |
20 | 24 | ||
@@ -38,6 +42,7 @@ enum trace_type { | |||
38 | TRACE_KMEM_FREE, | 42 | TRACE_KMEM_FREE, |
39 | TRACE_POWER, | 43 | TRACE_POWER, |
40 | TRACE_BLK, | 44 | TRACE_BLK, |
45 | TRACE_KSYM, | ||
41 | 46 | ||
42 | __TRACE_LAST_TYPE, | 47 | __TRACE_LAST_TYPE, |
43 | }; | 48 | }; |
@@ -205,6 +210,16 @@ struct syscall_trace_exit { | |||
205 | unsigned long ret; | 210 | unsigned long ret; |
206 | }; | 211 | }; |
207 | 212 | ||
213 | #define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy" | ||
214 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | ||
215 | |||
216 | struct ksym_trace_entry { | ||
217 | struct trace_entry ent; | ||
218 | unsigned long ip; | ||
219 | unsigned char type; | ||
220 | char ksym_name[KSYM_NAME_LEN]; | ||
221 | char cmd[TASK_COMM_LEN]; | ||
222 | }; | ||
208 | 223 | ||
209 | /* | 224 | /* |
210 | * trace_flag_type is an enumeration that holds different | 225 | * trace_flag_type is an enumeration that holds different |
@@ -315,6 +330,7 @@ extern void __ftrace_bad_type(void); | |||
315 | TRACE_KMEM_ALLOC); \ | 330 | TRACE_KMEM_ALLOC); \ |
316 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | 331 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ |
317 | TRACE_KMEM_FREE); \ | 332 | TRACE_KMEM_FREE); \ |
333 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ | ||
318 | __ftrace_bad_type(); \ | 334 | __ftrace_bad_type(); \ |
319 | } while (0) | 335 | } while (0) |
320 | 336 | ||
@@ -558,6 +574,8 @@ extern int trace_selftest_startup_branch(struct tracer *trace, | |||
558 | struct trace_array *tr); | 574 | struct trace_array *tr); |
559 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, | 575 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, |
560 | struct trace_array *tr); | 576 | struct trace_array *tr); |
577 | extern int trace_selftest_startup_ksym(struct tracer *trace, | ||
578 | struct trace_array *tr); | ||
561 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 579 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
562 | 580 | ||
563 | extern void *head_page(struct trace_array_cpu *data); | 581 | extern void *head_page(struct trace_array_cpu *data); |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c new file mode 100644 index 000000000000..2fde875ead4c --- /dev/null +++ b/kernel/trace/trace_ksym.c | |||
@@ -0,0 +1,550 @@ | |||
1 | /* | ||
2 | * trace_ksym.c - Kernel Symbol Tracer | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2009 | ||
19 | */ | ||
20 | |||
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/ftrace.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/fs.h> | ||
27 | |||
28 | #include "trace_output.h" | ||
29 | #include "trace_stat.h" | ||
30 | #include "trace.h" | ||
31 | |||
32 | /* For now, let us restrict the no. of symbols traced simultaneously to number | ||
33 | * of available hardware breakpoint registers. | ||
34 | */ | ||
35 | #define KSYM_TRACER_MAX HBP_NUM | ||
36 | |||
37 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ | ||
38 | |||
39 | struct trace_ksym { | ||
40 | struct hw_breakpoint *ksym_hbp; | ||
41 | unsigned long ksym_addr; | ||
42 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
43 | unsigned long counter; | ||
44 | #endif | ||
45 | struct hlist_node ksym_hlist; | ||
46 | }; | ||
47 | |||
48 | static struct trace_array *ksym_trace_array; | ||
49 | |||
50 | static unsigned int ksym_filter_entry_count; | ||
51 | static unsigned int ksym_tracing_enabled; | ||
52 | |||
53 | static HLIST_HEAD(ksym_filter_head); | ||
54 | |||
55 | static DEFINE_MUTEX(ksym_tracer_mutex); | ||
56 | |||
57 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
58 | |||
59 | #define MAX_UL_INT 0xffffffff | ||
60 | |||
61 | void ksym_collect_stats(unsigned long hbp_hit_addr) | ||
62 | { | ||
63 | struct hlist_node *node; | ||
64 | struct trace_ksym *entry; | ||
65 | |||
66 | rcu_read_lock(); | ||
67 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | ||
68 | if ((entry->ksym_addr == hbp_hit_addr) && | ||
69 | (entry->counter <= MAX_UL_INT)) { | ||
70 | entry->counter++; | ||
71 | break; | ||
72 | } | ||
73 | } | ||
74 | rcu_read_unlock(); | ||
75 | } | ||
76 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
77 | |||
78 | void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) | ||
79 | { | ||
80 | struct ring_buffer_event *event; | ||
81 | struct trace_array *tr; | ||
82 | struct ksym_trace_entry *entry; | ||
83 | int pc; | ||
84 | |||
85 | if (!ksym_tracing_enabled) | ||
86 | return; | ||
87 | |||
88 | tr = ksym_trace_array; | ||
89 | pc = preempt_count(); | ||
90 | |||
91 | event = trace_buffer_lock_reserve(tr, TRACE_KSYM, | ||
92 | sizeof(*entry), 0, pc); | ||
93 | if (!event) | ||
94 | return; | ||
95 | |||
96 | entry = ring_buffer_event_data(event); | ||
97 | entry->ip = instruction_pointer(regs); | ||
98 | entry->type = hbp->info.type; | ||
99 | strlcpy(entry->ksym_name, hbp->info.name, KSYM_SYMBOL_LEN); | ||
100 | strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); | ||
101 | |||
102 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
103 | ksym_collect_stats(hbp->info.address); | ||
104 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
105 | |||
106 | trace_buffer_unlock_commit(tr, event, 0, pc); | ||
107 | } | ||
108 | |||
109 | /* Valid access types are represented as | ||
110 | * | ||
111 | * rw- : Set Read/Write Access Breakpoint | ||
112 | * -w- : Set Write Access Breakpoint | ||
113 | * --- : Clear Breakpoints | ||
114 | * --x : Set Execution Break points (Not available yet) | ||
115 | * | ||
116 | */ | ||
117 | static int ksym_trace_get_access_type(char *str) | ||
118 | { | ||
119 | int access = 0; | ||
120 | |||
121 | if (str[0] == 'r') | ||
122 | access += 4; | ||
123 | else if (str[0] != '-') | ||
124 | return -EINVAL; | ||
125 | |||
126 | if (str[1] == 'w') | ||
127 | access += 2; | ||
128 | else if (str[1] != '-') | ||
129 | return -EINVAL; | ||
130 | |||
131 | if (str[2] != '-') | ||
132 | return -EINVAL; | ||
133 | |||
134 | switch (access) { | ||
135 | case 6: | ||
136 | access = HW_BREAKPOINT_RW; | ||
137 | break; | ||
138 | case 4: | ||
139 | access = -EINVAL; | ||
140 | break; | ||
141 | case 2: | ||
142 | access = HW_BREAKPOINT_WRITE; | ||
143 | break; | ||
144 | } | ||
145 | |||
146 | return access; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * There can be several possible malformed requests and we attempt to capture | ||
151 | * all of them. We enumerate some of the rules | ||
152 | * 1. We will not allow kernel symbols with ':' since it is used as a delimiter. | ||
153 | * i.e. multiple ':' symbols disallowed. Possible uses are of the form | ||
154 | * <module>:<ksym_name>:<op>. | ||
155 | * 2. No delimiter symbol ':' in the input string | ||
156 | * 3. Spurious operator symbols or symbols not in their respective positions | ||
157 | * 4. <ksym_name>:--- i.e. clear breakpoint request when ksym_name not in file | ||
158 | * 5. Kernel symbol not a part of /proc/kallsyms | ||
159 | * 6. Duplicate requests | ||
160 | */ | ||
161 | static int parse_ksym_trace_str(char *input_string, char **ksymname, | ||
162 | unsigned long *addr) | ||
163 | { | ||
164 | int ret; | ||
165 | |||
166 | *ksymname = strsep(&input_string, ":"); | ||
167 | *addr = kallsyms_lookup_name(*ksymname); | ||
168 | |||
169 | /* Check for malformed request: (2), (1) and (5) */ | ||
170 | if ((!input_string) || | ||
171 | (strlen(input_string) != KSYM_TRACER_OP_LEN) || | ||
172 | (*addr == 0)) | ||
173 | return -EINVAL;; | ||
174 | |||
175 | ret = ksym_trace_get_access_type(input_string); | ||
176 | |||
177 | return ret; | ||
178 | } | ||
179 | |||
180 | int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | ||
181 | { | ||
182 | struct trace_ksym *entry; | ||
183 | int ret = -ENOMEM; | ||
184 | |||
185 | if (ksym_filter_entry_count >= KSYM_TRACER_MAX) { | ||
186 | printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No" | ||
187 | " new requests for tracing can be accepted now.\n", | ||
188 | KSYM_TRACER_MAX); | ||
189 | return -ENOSPC; | ||
190 | } | ||
191 | |||
192 | entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); | ||
193 | if (!entry) | ||
194 | return -ENOMEM; | ||
195 | |||
196 | entry->ksym_hbp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); | ||
197 | if (!entry->ksym_hbp) | ||
198 | goto err; | ||
199 | |||
200 | entry->ksym_hbp->info.name = kstrdup(ksymname, GFP_KERNEL); | ||
201 | if (!entry->ksym_hbp->info.name) | ||
202 | goto err; | ||
203 | |||
204 | entry->ksym_hbp->info.type = op; | ||
205 | entry->ksym_addr = entry->ksym_hbp->info.address = addr; | ||
206 | #ifdef CONFIG_X86 | ||
207 | entry->ksym_hbp->info.len = HW_BREAKPOINT_LEN_4; | ||
208 | #endif | ||
209 | entry->ksym_hbp->triggered = (void *)ksym_hbp_handler; | ||
210 | |||
211 | ret = register_kernel_hw_breakpoint(entry->ksym_hbp); | ||
212 | if (ret < 0) { | ||
213 | printk(KERN_INFO "ksym_tracer request failed. Try again" | ||
214 | " later!!\n"); | ||
215 | ret = -EAGAIN; | ||
216 | goto err; | ||
217 | } | ||
218 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); | ||
219 | ksym_filter_entry_count++; | ||
220 | return 0; | ||
221 | err: | ||
222 | if (entry->ksym_hbp) | ||
223 | kfree(entry->ksym_hbp->info.name); | ||
224 | kfree(entry->ksym_hbp); | ||
225 | kfree(entry); | ||
226 | return ret; | ||
227 | } | ||
228 | |||
229 | static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, | ||
230 | size_t count, loff_t *ppos) | ||
231 | { | ||
232 | struct trace_ksym *entry; | ||
233 | struct hlist_node *node; | ||
234 | struct trace_seq *s; | ||
235 | ssize_t cnt = 0; | ||
236 | int ret; | ||
237 | |||
238 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
239 | if (!s) | ||
240 | return -ENOMEM; | ||
241 | trace_seq_init(s); | ||
242 | |||
243 | mutex_lock(&ksym_tracer_mutex); | ||
244 | |||
245 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
246 | ret = trace_seq_printf(s, "%s:", entry->ksym_hbp->info.name); | ||
247 | if (entry->ksym_hbp->info.type == HW_BREAKPOINT_WRITE) | ||
248 | ret = trace_seq_puts(s, "-w-\n"); | ||
249 | else if (entry->ksym_hbp->info.type == HW_BREAKPOINT_RW) | ||
250 | ret = trace_seq_puts(s, "rw-\n"); | ||
251 | WARN_ON_ONCE(!ret); | ||
252 | } | ||
253 | |||
254 | cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | ||
255 | |||
256 | mutex_unlock(&ksym_tracer_mutex); | ||
257 | |||
258 | kfree(s); | ||
259 | |||
260 | return cnt; | ||
261 | } | ||
262 | |||
263 | static void __ksym_trace_reset(void) | ||
264 | { | ||
265 | struct trace_ksym *entry; | ||
266 | struct hlist_node *node, *node1; | ||
267 | |||
268 | mutex_lock(&ksym_tracer_mutex); | ||
269 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, | ||
270 | ksym_hlist) { | ||
271 | unregister_kernel_hw_breakpoint(entry->ksym_hbp); | ||
272 | ksym_filter_entry_count--; | ||
273 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
274 | synchronize_rcu(); | ||
275 | kfree(entry->ksym_hbp->info.name); | ||
276 | kfree(entry->ksym_hbp); | ||
277 | kfree(entry); | ||
278 | } | ||
279 | mutex_unlock(&ksym_tracer_mutex); | ||
280 | } | ||
281 | |||
282 | static ssize_t ksym_trace_filter_write(struct file *file, | ||
283 | const char __user *buffer, | ||
284 | size_t count, loff_t *ppos) | ||
285 | { | ||
286 | struct trace_ksym *entry; | ||
287 | struct hlist_node *node; | ||
288 | char *input_string, *ksymname = NULL; | ||
289 | unsigned long ksym_addr = 0; | ||
290 | int ret, op, changed = 0; | ||
291 | |||
292 | input_string = kzalloc(count + 1, GFP_KERNEL); | ||
293 | if (!input_string) | ||
294 | return -ENOMEM; | ||
295 | |||
296 | if (copy_from_user(input_string, buffer, count)) { | ||
297 | kfree(input_string); | ||
298 | return -EFAULT; | ||
299 | } | ||
300 | input_string[count] = '\0'; | ||
301 | |||
302 | strstrip(input_string); | ||
303 | |||
304 | /* | ||
305 | * Clear all breakpoints if: | ||
306 | * 1: echo > ksym_trace_filter | ||
307 | * 2: echo 0 > ksym_trace_filter | ||
308 | * 3: echo "*:---" > ksym_trace_filter | ||
309 | */ | ||
310 | if (!input_string[0] || !strcmp(input_string, "0") || | ||
311 | !strcmp(input_string, "*:---")) { | ||
312 | __ksym_trace_reset(); | ||
313 | kfree(input_string); | ||
314 | return count; | ||
315 | } | ||
316 | |||
317 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); | ||
318 | if (ret < 0) { | ||
319 | kfree(input_string); | ||
320 | return ret; | ||
321 | } | ||
322 | |||
323 | mutex_lock(&ksym_tracer_mutex); | ||
324 | |||
325 | ret = -EINVAL; | ||
326 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
327 | if (entry->ksym_addr == ksym_addr) { | ||
328 | /* Check for malformed request: (6) */ | ||
329 | if (entry->ksym_hbp->info.type != op) | ||
330 | changed = 1; | ||
331 | else | ||
332 | goto out; | ||
333 | break; | ||
334 | } | ||
335 | } | ||
336 | if (changed) { | ||
337 | unregister_kernel_hw_breakpoint(entry->ksym_hbp); | ||
338 | entry->ksym_hbp->info.type = op; | ||
339 | if (op > 0) { | ||
340 | ret = register_kernel_hw_breakpoint(entry->ksym_hbp); | ||
341 | if (ret == 0) | ||
342 | goto out; | ||
343 | } | ||
344 | ksym_filter_entry_count--; | ||
345 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
346 | synchronize_rcu(); | ||
347 | kfree(entry->ksym_hbp->info.name); | ||
348 | kfree(entry->ksym_hbp); | ||
349 | kfree(entry); | ||
350 | ret = 0; | ||
351 | goto out; | ||
352 | } else { | ||
353 | /* Check for malformed request: (4) */ | ||
354 | if (op == 0) | ||
355 | goto out; | ||
356 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); | ||
357 | } | ||
358 | out: | ||
359 | mutex_unlock(&ksym_tracer_mutex); | ||
360 | |||
361 | kfree(input_string); | ||
362 | |||
363 | if (!ret) | ||
364 | ret = count; | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static const struct file_operations ksym_tracing_fops = { | ||
369 | .open = tracing_open_generic, | ||
370 | .read = ksym_trace_filter_read, | ||
371 | .write = ksym_trace_filter_write, | ||
372 | }; | ||
373 | |||
374 | static void ksym_trace_reset(struct trace_array *tr) | ||
375 | { | ||
376 | ksym_tracing_enabled = 0; | ||
377 | __ksym_trace_reset(); | ||
378 | } | ||
379 | |||
380 | static int ksym_trace_init(struct trace_array *tr) | ||
381 | { | ||
382 | int cpu, ret = 0; | ||
383 | |||
384 | for_each_online_cpu(cpu) | ||
385 | tracing_reset(tr, cpu); | ||
386 | ksym_tracing_enabled = 1; | ||
387 | ksym_trace_array = tr; | ||
388 | |||
389 | return ret; | ||
390 | } | ||
391 | |||
392 | static void ksym_trace_print_header(struct seq_file *m) | ||
393 | { | ||
394 | seq_puts(m, | ||
395 | "# TASK-PID CPU# Symbol " | ||
396 | "Type Function\n"); | ||
397 | seq_puts(m, | ||
398 | "# | | | " | ||
399 | " | |\n"); | ||
400 | } | ||
401 | |||
402 | static enum print_line_t ksym_trace_output(struct trace_iterator *iter) | ||
403 | { | ||
404 | struct trace_entry *entry = iter->ent; | ||
405 | struct trace_seq *s = &iter->seq; | ||
406 | struct ksym_trace_entry *field; | ||
407 | char str[KSYM_SYMBOL_LEN]; | ||
408 | int ret; | ||
409 | |||
410 | if (entry->type != TRACE_KSYM) | ||
411 | return TRACE_TYPE_UNHANDLED; | ||
412 | |||
413 | trace_assign_type(field, entry); | ||
414 | |||
415 | ret = trace_seq_printf(s, "%11s-%-5d [%03d] %-30s ", field->cmd, | ||
416 | entry->pid, iter->cpu, field->ksym_name); | ||
417 | if (!ret) | ||
418 | return TRACE_TYPE_PARTIAL_LINE; | ||
419 | |||
420 | switch (field->type) { | ||
421 | case HW_BREAKPOINT_WRITE: | ||
422 | ret = trace_seq_printf(s, " W "); | ||
423 | break; | ||
424 | case HW_BREAKPOINT_RW: | ||
425 | ret = trace_seq_printf(s, " RW "); | ||
426 | break; | ||
427 | default: | ||
428 | return TRACE_TYPE_PARTIAL_LINE; | ||
429 | } | ||
430 | |||
431 | if (!ret) | ||
432 | return TRACE_TYPE_PARTIAL_LINE; | ||
433 | |||
434 | sprint_symbol(str, field->ip); | ||
435 | ret = trace_seq_printf(s, "%s\n", str); | ||
436 | if (!ret) | ||
437 | return TRACE_TYPE_PARTIAL_LINE; | ||
438 | |||
439 | return TRACE_TYPE_HANDLED; | ||
440 | } | ||
441 | |||
442 | struct tracer ksym_tracer __read_mostly = | ||
443 | { | ||
444 | .name = "ksym_tracer", | ||
445 | .init = ksym_trace_init, | ||
446 | .reset = ksym_trace_reset, | ||
447 | #ifdef CONFIG_FTRACE_SELFTEST | ||
448 | .selftest = trace_selftest_startup_ksym, | ||
449 | #endif | ||
450 | .print_header = ksym_trace_print_header, | ||
451 | .print_line = ksym_trace_output | ||
452 | }; | ||
453 | |||
454 | __init static int init_ksym_trace(void) | ||
455 | { | ||
456 | struct dentry *d_tracer; | ||
457 | struct dentry *entry; | ||
458 | |||
459 | d_tracer = tracing_init_dentry(); | ||
460 | ksym_filter_entry_count = 0; | ||
461 | |||
462 | entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer, | ||
463 | NULL, &ksym_tracing_fops); | ||
464 | if (!entry) | ||
465 | pr_warning("Could not create debugfs " | ||
466 | "'ksym_trace_filter' file\n"); | ||
467 | |||
468 | return register_tracer(&ksym_tracer); | ||
469 | } | ||
470 | device_initcall(init_ksym_trace); | ||
471 | |||
472 | |||
473 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
474 | static int ksym_tracer_stat_headers(struct seq_file *m) | ||
475 | { | ||
476 | seq_puts(m, " Access Type "); | ||
477 | seq_puts(m, " Symbol Counter\n"); | ||
478 | seq_puts(m, " ----------- "); | ||
479 | seq_puts(m, " ------ -------\n"); | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int ksym_tracer_stat_show(struct seq_file *m, void *v) | ||
484 | { | ||
485 | struct hlist_node *stat = v; | ||
486 | struct trace_ksym *entry; | ||
487 | int access_type = 0; | ||
488 | char fn_name[KSYM_NAME_LEN]; | ||
489 | |||
490 | entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); | ||
491 | |||
492 | if (entry->ksym_hbp) | ||
493 | access_type = entry->ksym_hbp->info.type; | ||
494 | |||
495 | switch (access_type) { | ||
496 | case HW_BREAKPOINT_WRITE: | ||
497 | seq_puts(m, " W "); | ||
498 | break; | ||
499 | case HW_BREAKPOINT_RW: | ||
500 | seq_puts(m, " RW "); | ||
501 | break; | ||
502 | default: | ||
503 | seq_puts(m, " NA "); | ||
504 | } | ||
505 | |||
506 | if (lookup_symbol_name(entry->ksym_addr, fn_name) >= 0) | ||
507 | seq_printf(m, " %-36s", fn_name); | ||
508 | else | ||
509 | seq_printf(m, " %-36s", "<NA>"); | ||
510 | seq_printf(m, " %15lu\n", entry->counter); | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static void *ksym_tracer_stat_start(struct tracer_stat *trace) | ||
516 | { | ||
517 | return ksym_filter_head.first; | ||
518 | } | ||
519 | |||
520 | static void * | ||
521 | ksym_tracer_stat_next(void *v, int idx) | ||
522 | { | ||
523 | struct hlist_node *stat = v; | ||
524 | |||
525 | return stat->next; | ||
526 | } | ||
527 | |||
528 | static struct tracer_stat ksym_tracer_stats = { | ||
529 | .name = "ksym_tracer", | ||
530 | .stat_start = ksym_tracer_stat_start, | ||
531 | .stat_next = ksym_tracer_stat_next, | ||
532 | .stat_headers = ksym_tracer_stat_headers, | ||
533 | .stat_show = ksym_tracer_stat_show | ||
534 | }; | ||
535 | |||
536 | __init static int ksym_tracer_stat_init(void) | ||
537 | { | ||
538 | int ret; | ||
539 | |||
540 | ret = register_stat_tracer(&ksym_tracer_stats); | ||
541 | if (ret) { | ||
542 | printk(KERN_WARNING "Warning: could not register " | ||
543 | "ksym tracer stats\n"); | ||
544 | return 1; | ||
545 | } | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | fs_initcall(ksym_tracer_stat_init); | ||
550 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index d2cdbabb4ead..7179c12e4f0f 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -17,6 +17,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
17 | case TRACE_GRAPH_ENT: | 17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | 18 | case TRACE_GRAPH_RET: |
19 | case TRACE_HW_BRANCHES: | 19 | case TRACE_HW_BRANCHES: |
20 | case TRACE_KSYM: | ||
20 | return 1; | 21 | return 1; |
21 | } | 22 | } |
22 | return 0; | 23 | return 0; |
@@ -808,3 +809,55 @@ trace_selftest_startup_hw_branches(struct tracer *trace, | |||
808 | return ret; | 809 | return ret; |
809 | } | 810 | } |
810 | #endif /* CONFIG_HW_BRANCH_TRACER */ | 811 | #endif /* CONFIG_HW_BRANCH_TRACER */ |
812 | |||
813 | #ifdef CONFIG_KSYM_TRACER | ||
814 | static int ksym_selftest_dummy; | ||
815 | |||
816 | int | ||
817 | trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) | ||
818 | { | ||
819 | unsigned long count; | ||
820 | int ret; | ||
821 | |||
822 | /* start the tracing */ | ||
823 | ret = tracer_init(trace, tr); | ||
824 | if (ret) { | ||
825 | warn_failed_init_tracer(trace, ret); | ||
826 | return ret; | ||
827 | } | ||
828 | |||
829 | ksym_selftest_dummy = 0; | ||
830 | /* Register the read-write tracing request */ | ||
831 | ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, HW_BREAKPOINT_RW, | ||
832 | (unsigned long)(&ksym_selftest_dummy)); | ||
833 | |||
834 | if (ret < 0) { | ||
835 | printk(KERN_CONT "ksym_trace read-write startup test failed\n"); | ||
836 | goto ret_path; | ||
837 | } | ||
838 | /* Perform a read and a write operation over the dummy variable to | ||
839 | * trigger the tracer | ||
840 | */ | ||
841 | if (ksym_selftest_dummy == 0) | ||
842 | ksym_selftest_dummy++; | ||
843 | |||
844 | /* stop the tracing. */ | ||
845 | tracing_stop(); | ||
846 | /* check the trace buffer */ | ||
847 | ret = trace_test_buffer(tr, &count); | ||
848 | trace->reset(tr); | ||
849 | tracing_start(); | ||
850 | |||
851 | /* read & write operations - one each is performed on the dummy variable | ||
852 | * triggering two entries in the trace buffer | ||
853 | */ | ||
854 | if (!ret && count != 2) { | ||
855 | printk(KERN_CONT "Ksym tracer startup test failed"); | ||
856 | ret = -1; | ||
857 | } | ||
858 | |||
859 | ret_path: | ||
860 | return ret; | ||
861 | } | ||
862 | #endif /* CONFIG_KSYM_TRACER */ | ||
863 | |||