diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 378 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 21 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/trace.h | 23 | ||||
-rw-r--r-- | kernel/trace/trace_ksym.c | 525 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 53 |
7 files changed, 1002 insertions, 0 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 9df4501cb921..f88decb1b445 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -97,6 +97,7 @@ obj-$(CONFIG_TRACING) += trace/ | |||
97 | obj-$(CONFIG_X86_DS) += trace/ | 97 | obj-$(CONFIG_X86_DS) += trace/ |
98 | obj-$(CONFIG_SMP) += sched_cpupri.o | 98 | obj-$(CONFIG_SMP) += sched_cpupri.o |
99 | obj-$(CONFIG_SLOW_WORK) += slow-work.o | 99 | obj-$(CONFIG_SLOW_WORK) += slow-work.o |
100 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | ||
100 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o | 101 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o |
101 | 102 | ||
102 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 103 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..c1f64e65a9f3 --- /dev/null +++ b/kernel/hw_breakpoint.c | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2007 Alan Stern | ||
17 | * Copyright (C) IBM Corporation, 2009 | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | ||
22 | * using the CPU's debug registers. | ||
23 | * This file contains the arch-independent routines. | ||
24 | */ | ||
25 | |||
26 | #include <linux/irqflags.h> | ||
27 | #include <linux/kallsyms.h> | ||
28 | #include <linux/notifier.h> | ||
29 | #include <linux/kprobes.h> | ||
30 | #include <linux/kdebug.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/percpu.h> | ||
34 | #include <linux/sched.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/smp.h> | ||
37 | |||
38 | #include <asm/hw_breakpoint.h> | ||
39 | #include <asm/processor.h> | ||
40 | |||
41 | #ifdef CONFIG_X86 | ||
42 | #include <asm/debugreg.h> | ||
43 | #endif | ||
44 | /* | ||
45 | * Spinlock that protects all (un)register operations over kernel/user-space | ||
46 | * breakpoint requests | ||
47 | */ | ||
48 | static DEFINE_SPINLOCK(hw_breakpoint_lock); | ||
49 | |||
50 | /* Array of kernel-space breakpoint structures */ | ||
51 | struct hw_breakpoint *hbp_kernel[HBP_NUM]; | ||
52 | |||
53 | /* | ||
54 | * Per-processor copy of hbp_kernel[]. Used only when hbp_kernel is being | ||
55 | * modified but we need the older copy to handle any hbp exceptions. It will | ||
56 | * sync with hbp_kernel[] value after updation is done through IPIs. | ||
57 | */ | ||
58 | DEFINE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); | ||
59 | |||
60 | /* | ||
61 | * Kernel breakpoints grow downwards, starting from HBP_NUM | ||
62 | * 'hbp_kernel_pos' denotes lowest numbered breakpoint register occupied for | ||
63 | * kernel-space request. We will initialise it here and not in an __init | ||
64 | * routine because load_debug_registers(), which uses this variable can be | ||
65 | * called very early during CPU initialisation. | ||
66 | */ | ||
67 | unsigned int hbp_kernel_pos = HBP_NUM; | ||
68 | |||
69 | /* | ||
70 | * An array containing refcount of threads using a given bkpt register | ||
71 | * Accesses are synchronised by acquiring hw_breakpoint_lock | ||
72 | */ | ||
73 | unsigned int hbp_user_refcount[HBP_NUM]; | ||
74 | |||
75 | /* | ||
76 | * Load the debug registers during startup of a CPU. | ||
77 | */ | ||
78 | void load_debug_registers(void) | ||
79 | { | ||
80 | unsigned long flags; | ||
81 | struct task_struct *tsk = current; | ||
82 | |||
83 | spin_lock_bh(&hw_breakpoint_lock); | ||
84 | |||
85 | /* Prevent IPIs for new kernel breakpoint updates */ | ||
86 | local_irq_save(flags); | ||
87 | arch_update_kernel_hw_breakpoint(NULL); | ||
88 | local_irq_restore(flags); | ||
89 | |||
90 | if (test_tsk_thread_flag(tsk, TIF_DEBUG)) | ||
91 | arch_install_thread_hw_breakpoint(tsk); | ||
92 | |||
93 | spin_unlock_bh(&hw_breakpoint_lock); | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Erase all the hardware breakpoint info associated with a thread. | ||
98 | * | ||
99 | * If tsk != current then tsk must not be usable (for example, a | ||
100 | * child being cleaned up from a failed fork). | ||
101 | */ | ||
102 | void flush_thread_hw_breakpoint(struct task_struct *tsk) | ||
103 | { | ||
104 | int i; | ||
105 | struct thread_struct *thread = &(tsk->thread); | ||
106 | |||
107 | spin_lock_bh(&hw_breakpoint_lock); | ||
108 | |||
109 | /* The thread no longer has any breakpoints associated with it */ | ||
110 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
111 | for (i = 0; i < HBP_NUM; i++) { | ||
112 | if (thread->hbp[i]) { | ||
113 | hbp_user_refcount[i]--; | ||
114 | kfree(thread->hbp[i]); | ||
115 | thread->hbp[i] = NULL; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | arch_flush_thread_hw_breakpoint(tsk); | ||
120 | |||
121 | /* Actually uninstall the breakpoints if necessary */ | ||
122 | if (tsk == current) | ||
123 | arch_uninstall_thread_hw_breakpoint(); | ||
124 | spin_unlock_bh(&hw_breakpoint_lock); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Copy the hardware breakpoint info from a thread to its cloned child. | ||
129 | */ | ||
130 | int copy_thread_hw_breakpoint(struct task_struct *tsk, | ||
131 | struct task_struct *child, unsigned long clone_flags) | ||
132 | { | ||
133 | /* | ||
134 | * We will assume that breakpoint settings are not inherited | ||
135 | * and the child starts out with no debug registers set. | ||
136 | * But what about CLONE_PTRACE? | ||
137 | */ | ||
138 | clear_tsk_thread_flag(child, TIF_DEBUG); | ||
139 | |||
140 | /* We will call flush routine since the debugregs are not inherited */ | ||
141 | arch_flush_thread_hw_breakpoint(child); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int __register_user_hw_breakpoint(int pos, struct task_struct *tsk, | ||
147 | struct hw_breakpoint *bp) | ||
148 | { | ||
149 | struct thread_struct *thread = &(tsk->thread); | ||
150 | int rc; | ||
151 | |||
152 | /* Do not overcommit. Fail if kernel has used the hbp registers */ | ||
153 | if (pos >= hbp_kernel_pos) | ||
154 | return -ENOSPC; | ||
155 | |||
156 | rc = arch_validate_hwbkpt_settings(bp, tsk); | ||
157 | if (rc) | ||
158 | return rc; | ||
159 | |||
160 | thread->hbp[pos] = bp; | ||
161 | hbp_user_refcount[pos]++; | ||
162 | |||
163 | arch_update_user_hw_breakpoint(pos, tsk); | ||
164 | /* | ||
165 | * Does it need to be installed right now? | ||
166 | * Otherwise it will get installed the next time tsk runs | ||
167 | */ | ||
168 | if (tsk == current) | ||
169 | arch_install_thread_hw_breakpoint(tsk); | ||
170 | |||
171 | return rc; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Modify the address of a hbp register already in use by the task | ||
176 | * Do not invoke this in-lieu of a __unregister_user_hw_breakpoint() | ||
177 | */ | ||
178 | static int __modify_user_hw_breakpoint(int pos, struct task_struct *tsk, | ||
179 | struct hw_breakpoint *bp) | ||
180 | { | ||
181 | struct thread_struct *thread = &(tsk->thread); | ||
182 | |||
183 | if ((pos >= hbp_kernel_pos) || (arch_validate_hwbkpt_settings(bp, tsk))) | ||
184 | return -EINVAL; | ||
185 | |||
186 | if (thread->hbp[pos] == NULL) | ||
187 | return -EINVAL; | ||
188 | |||
189 | thread->hbp[pos] = bp; | ||
190 | /* | ||
191 | * 'pos' must be that of a hbp register already used by 'tsk' | ||
192 | * Otherwise arch_modify_user_hw_breakpoint() will fail | ||
193 | */ | ||
194 | arch_update_user_hw_breakpoint(pos, tsk); | ||
195 | |||
196 | if (tsk == current) | ||
197 | arch_install_thread_hw_breakpoint(tsk); | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | static void __unregister_user_hw_breakpoint(int pos, struct task_struct *tsk) | ||
203 | { | ||
204 | hbp_user_refcount[pos]--; | ||
205 | tsk->thread.hbp[pos] = NULL; | ||
206 | |||
207 | arch_update_user_hw_breakpoint(pos, tsk); | ||
208 | |||
209 | if (tsk == current) | ||
210 | arch_install_thread_hw_breakpoint(tsk); | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | ||
215 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
216 | * @bp: the breakpoint structure to register | ||
217 | * | ||
218 | * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and | ||
219 | * @bp->triggered must be set properly before invocation | ||
220 | * | ||
221 | */ | ||
222 | int register_user_hw_breakpoint(struct task_struct *tsk, | ||
223 | struct hw_breakpoint *bp) | ||
224 | { | ||
225 | struct thread_struct *thread = &(tsk->thread); | ||
226 | int i, rc = -ENOSPC; | ||
227 | |||
228 | spin_lock_bh(&hw_breakpoint_lock); | ||
229 | |||
230 | for (i = 0; i < hbp_kernel_pos; i++) { | ||
231 | if (!thread->hbp[i]) { | ||
232 | rc = __register_user_hw_breakpoint(i, tsk, bp); | ||
233 | break; | ||
234 | } | ||
235 | } | ||
236 | if (!rc) | ||
237 | set_tsk_thread_flag(tsk, TIF_DEBUG); | ||
238 | |||
239 | spin_unlock_bh(&hw_breakpoint_lock); | ||
240 | return rc; | ||
241 | } | ||
242 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | ||
243 | |||
244 | /** | ||
245 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | ||
246 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
247 | * @bp: the breakpoint structure to unregister | ||
248 | * | ||
249 | */ | ||
250 | int modify_user_hw_breakpoint(struct task_struct *tsk, struct hw_breakpoint *bp) | ||
251 | { | ||
252 | struct thread_struct *thread = &(tsk->thread); | ||
253 | int i, ret = -ENOENT; | ||
254 | |||
255 | spin_lock_bh(&hw_breakpoint_lock); | ||
256 | for (i = 0; i < hbp_kernel_pos; i++) { | ||
257 | if (bp == thread->hbp[i]) { | ||
258 | ret = __modify_user_hw_breakpoint(i, tsk, bp); | ||
259 | break; | ||
260 | } | ||
261 | } | ||
262 | spin_unlock_bh(&hw_breakpoint_lock); | ||
263 | return ret; | ||
264 | } | ||
265 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | ||
266 | |||
267 | /** | ||
268 | * unregister_user_hw_breakpoint - unregister a user-space hardware breakpoint | ||
269 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
270 | * @bp: the breakpoint structure to unregister | ||
271 | * | ||
272 | */ | ||
273 | void unregister_user_hw_breakpoint(struct task_struct *tsk, | ||
274 | struct hw_breakpoint *bp) | ||
275 | { | ||
276 | struct thread_struct *thread = &(tsk->thread); | ||
277 | int i, pos = -1, hbp_counter = 0; | ||
278 | |||
279 | spin_lock_bh(&hw_breakpoint_lock); | ||
280 | for (i = 0; i < hbp_kernel_pos; i++) { | ||
281 | if (thread->hbp[i]) | ||
282 | hbp_counter++; | ||
283 | if (bp == thread->hbp[i]) | ||
284 | pos = i; | ||
285 | } | ||
286 | if (pos >= 0) { | ||
287 | __unregister_user_hw_breakpoint(pos, tsk); | ||
288 | hbp_counter--; | ||
289 | } | ||
290 | if (!hbp_counter) | ||
291 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
292 | |||
293 | spin_unlock_bh(&hw_breakpoint_lock); | ||
294 | } | ||
295 | EXPORT_SYMBOL_GPL(unregister_user_hw_breakpoint); | ||
296 | |||
297 | /** | ||
298 | * register_kernel_hw_breakpoint - register a hardware breakpoint for kernel space | ||
299 | * @bp: the breakpoint structure to register | ||
300 | * | ||
301 | * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and | ||
302 | * @bp->triggered must be set properly before invocation | ||
303 | * | ||
304 | */ | ||
305 | int register_kernel_hw_breakpoint(struct hw_breakpoint *bp) | ||
306 | { | ||
307 | int rc; | ||
308 | |||
309 | rc = arch_validate_hwbkpt_settings(bp, NULL); | ||
310 | if (rc) | ||
311 | return rc; | ||
312 | |||
313 | spin_lock_bh(&hw_breakpoint_lock); | ||
314 | |||
315 | rc = -ENOSPC; | ||
316 | /* Check if we are over-committing */ | ||
317 | if ((hbp_kernel_pos > 0) && (!hbp_user_refcount[hbp_kernel_pos-1])) { | ||
318 | hbp_kernel_pos--; | ||
319 | hbp_kernel[hbp_kernel_pos] = bp; | ||
320 | on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); | ||
321 | rc = 0; | ||
322 | } | ||
323 | |||
324 | spin_unlock_bh(&hw_breakpoint_lock); | ||
325 | return rc; | ||
326 | } | ||
327 | EXPORT_SYMBOL_GPL(register_kernel_hw_breakpoint); | ||
328 | |||
329 | /** | ||
330 | * unregister_kernel_hw_breakpoint - unregister a HW breakpoint for kernel space | ||
331 | * @bp: the breakpoint structure to unregister | ||
332 | * | ||
333 | * Uninstalls and unregisters @bp. | ||
334 | */ | ||
335 | void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp) | ||
336 | { | ||
337 | int i, j; | ||
338 | |||
339 | spin_lock_bh(&hw_breakpoint_lock); | ||
340 | |||
341 | /* Find the 'bp' in our list of breakpoints for kernel */ | ||
342 | for (i = hbp_kernel_pos; i < HBP_NUM; i++) | ||
343 | if (bp == hbp_kernel[i]) | ||
344 | break; | ||
345 | |||
346 | /* Check if we did not find a match for 'bp'. If so return early */ | ||
347 | if (i == HBP_NUM) { | ||
348 | spin_unlock_bh(&hw_breakpoint_lock); | ||
349 | return; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * We'll shift the breakpoints one-level above to compact if | ||
354 | * unregistration creates a hole | ||
355 | */ | ||
356 | for (j = i; j > hbp_kernel_pos; j--) | ||
357 | hbp_kernel[j] = hbp_kernel[j-1]; | ||
358 | |||
359 | hbp_kernel[hbp_kernel_pos] = NULL; | ||
360 | on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); | ||
361 | hbp_kernel_pos++; | ||
362 | |||
363 | spin_unlock_bh(&hw_breakpoint_lock); | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(unregister_kernel_hw_breakpoint); | ||
366 | |||
367 | static struct notifier_block hw_breakpoint_exceptions_nb = { | ||
368 | .notifier_call = hw_breakpoint_exceptions_notify, | ||
369 | /* we need to be notified first */ | ||
370 | .priority = 0x7fffffff | ||
371 | }; | ||
372 | |||
373 | static int __init init_hw_breakpoint(void) | ||
374 | { | ||
375 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | ||
376 | } | ||
377 | |||
378 | core_initcall(init_hw_breakpoint); | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 61071fecc82e..ae048a2dbbe8 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -314,6 +314,27 @@ config POWER_TRACER | |||
314 | power management decisions, specifically the C-state and P-state | 314 | power management decisions, specifically the C-state and P-state |
315 | behavior. | 315 | behavior. |
316 | 316 | ||
317 | config KSYM_TRACER | ||
318 | bool "Trace read and write access on kernel memory locations" | ||
319 | depends on HAVE_HW_BREAKPOINT | ||
320 | select TRACING | ||
321 | help | ||
322 | This tracer helps find read and write operations on any given kernel | ||
323 | symbol i.e. /proc/kallsyms. | ||
324 | |||
325 | config PROFILE_KSYM_TRACER | ||
326 | bool "Profile all kernel memory accesses on 'watched' variables" | ||
327 | depends on KSYM_TRACER | ||
328 | help | ||
329 | This tracer profiles kernel accesses on variables watched through the | ||
330 | ksym tracer ftrace plugin. Depending upon the hardware, all read | ||
331 | and write operations on kernel variables can be monitored for | ||
332 | accesses. | ||
333 | |||
334 | The results will be displayed in: | ||
335 | /debugfs/tracing/profile_ksym | ||
336 | |||
337 | Say N if unsure. | ||
317 | 338 | ||
318 | config STACK_TRACER | 339 | config STACK_TRACER |
319 | bool "Trace max stack" | 340 | bool "Trace max stack" |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 844164dca90a..ce3b1cd02732 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -54,5 +54,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o | |||
54 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 54 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
55 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o | 55 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o |
56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
57 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o | ||
57 | 58 | ||
58 | libftrace-y := ftrace.o | 59 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6e735d4771f8..7d5cc37b8fca 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -15,6 +15,10 @@ | |||
15 | #include <linux/trace_seq.h> | 15 | #include <linux/trace_seq.h> |
16 | #include <linux/ftrace_event.h> | 16 | #include <linux/ftrace_event.h> |
17 | 17 | ||
18 | #ifdef CONFIG_KSYM_TRACER | ||
19 | #include <asm/hw_breakpoint.h> | ||
20 | #endif | ||
21 | |||
18 | enum trace_type { | 22 | enum trace_type { |
19 | __TRACE_FIRST_TYPE = 0, | 23 | __TRACE_FIRST_TYPE = 0, |
20 | 24 | ||
@@ -40,6 +44,7 @@ enum trace_type { | |||
40 | TRACE_KMEM_FREE, | 44 | TRACE_KMEM_FREE, |
41 | TRACE_POWER, | 45 | TRACE_POWER, |
42 | TRACE_BLK, | 46 | TRACE_BLK, |
47 | TRACE_KSYM, | ||
43 | 48 | ||
44 | __TRACE_LAST_TYPE, | 49 | __TRACE_LAST_TYPE, |
45 | }; | 50 | }; |
@@ -207,6 +212,21 @@ struct syscall_trace_exit { | |||
207 | unsigned long ret; | 212 | unsigned long ret; |
208 | }; | 213 | }; |
209 | 214 | ||
215 | #define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy" | ||
216 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | ||
217 | |||
218 | struct trace_ksym { | ||
219 | struct trace_entry ent; | ||
220 | struct hw_breakpoint *ksym_hbp; | ||
221 | unsigned long ksym_addr; | ||
222 | unsigned long ip; | ||
223 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
224 | unsigned long counter; | ||
225 | #endif | ||
226 | struct hlist_node ksym_hlist; | ||
227 | char ksym_name[KSYM_NAME_LEN]; | ||
228 | char p_name[TASK_COMM_LEN]; | ||
229 | }; | ||
210 | 230 | ||
211 | /* | 231 | /* |
212 | * trace_flag_type is an enumeration that holds different | 232 | * trace_flag_type is an enumeration that holds different |
@@ -323,6 +343,7 @@ extern void __ftrace_bad_type(void); | |||
323 | TRACE_SYSCALL_ENTER); \ | 343 | TRACE_SYSCALL_ENTER); \ |
324 | IF_ASSIGN(var, ent, struct syscall_trace_exit, \ | 344 | IF_ASSIGN(var, ent, struct syscall_trace_exit, \ |
325 | TRACE_SYSCALL_EXIT); \ | 345 | TRACE_SYSCALL_EXIT); \ |
346 | IF_ASSIGN(var, ent, struct trace_ksym, TRACE_KSYM); \ | ||
326 | __ftrace_bad_type(); \ | 347 | __ftrace_bad_type(); \ |
327 | } while (0) | 348 | } while (0) |
328 | 349 | ||
@@ -540,6 +561,8 @@ extern int trace_selftest_startup_branch(struct tracer *trace, | |||
540 | struct trace_array *tr); | 561 | struct trace_array *tr); |
541 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, | 562 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, |
542 | struct trace_array *tr); | 563 | struct trace_array *tr); |
564 | extern int trace_selftest_startup_ksym(struct tracer *trace, | ||
565 | struct trace_array *tr); | ||
543 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 566 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
544 | 567 | ||
545 | extern void *head_page(struct trace_array_cpu *data); | 568 | extern void *head_page(struct trace_array_cpu *data); |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c new file mode 100644 index 000000000000..eef97e7c8db7 --- /dev/null +++ b/kernel/trace/trace_ksym.c | |||
@@ -0,0 +1,525 @@ | |||
1 | /* | ||
2 | * trace_ksym.c - Kernel Symbol Tracer | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2009 | ||
19 | */ | ||
20 | |||
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/ftrace.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/fs.h> | ||
27 | |||
28 | #include "trace_output.h" | ||
29 | #include "trace_stat.h" | ||
30 | #include "trace.h" | ||
31 | |||
32 | /* For now, let us restrict the no. of symbols traced simultaneously to number | ||
33 | * of available hardware breakpoint registers. | ||
34 | */ | ||
35 | #define KSYM_TRACER_MAX HBP_NUM | ||
36 | |||
37 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ | ||
38 | #define KSYM_FILTER_ENTRY_LEN (KSYM_NAME_LEN + KSYM_TRACER_OP_LEN + 1) | ||
39 | |||
40 | static struct trace_array *ksym_trace_array; | ||
41 | |||
42 | static unsigned int ksym_filter_entry_count; | ||
43 | static unsigned int ksym_tracing_enabled; | ||
44 | |||
45 | static HLIST_HEAD(ksym_filter_head); | ||
46 | |||
47 | static DEFINE_MUTEX(ksym_tracer_mutex); | ||
48 | |||
49 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
50 | |||
51 | #define MAX_UL_INT 0xffffffff | ||
52 | |||
53 | void ksym_collect_stats(unsigned long hbp_hit_addr) | ||
54 | { | ||
55 | struct hlist_node *node; | ||
56 | struct trace_ksym *entry; | ||
57 | |||
58 | rcu_read_lock(); | ||
59 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | ||
60 | if ((entry->ksym_addr == hbp_hit_addr) && | ||
61 | (entry->counter <= MAX_UL_INT)) { | ||
62 | entry->counter++; | ||
63 | break; | ||
64 | } | ||
65 | } | ||
66 | rcu_read_unlock(); | ||
67 | } | ||
68 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
69 | |||
70 | void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) | ||
71 | { | ||
72 | struct ring_buffer_event *event; | ||
73 | struct trace_array *tr; | ||
74 | struct trace_ksym *entry; | ||
75 | int pc; | ||
76 | |||
77 | if (!ksym_tracing_enabled) | ||
78 | return; | ||
79 | |||
80 | tr = ksym_trace_array; | ||
81 | pc = preempt_count(); | ||
82 | |||
83 | event = trace_buffer_lock_reserve(tr, TRACE_KSYM, | ||
84 | sizeof(*entry), 0, pc); | ||
85 | if (!event) | ||
86 | return; | ||
87 | |||
88 | entry = ring_buffer_event_data(event); | ||
89 | strlcpy(entry->ksym_name, hbp->info.name, KSYM_SYMBOL_LEN); | ||
90 | entry->ksym_hbp = hbp; | ||
91 | entry->ip = instruction_pointer(regs); | ||
92 | strlcpy(entry->p_name, current->comm, TASK_COMM_LEN); | ||
93 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
94 | ksym_collect_stats(hbp->info.address); | ||
95 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
96 | |||
97 | trace_buffer_unlock_commit(tr, event, 0, pc); | ||
98 | } | ||
99 | |||
100 | /* Valid access types are represented as | ||
101 | * | ||
102 | * rw- : Set Read/Write Access Breakpoint | ||
103 | * -w- : Set Write Access Breakpoint | ||
104 | * --- : Clear Breakpoints | ||
105 | * --x : Set Execution Break points (Not available yet) | ||
106 | * | ||
107 | */ | ||
108 | static int ksym_trace_get_access_type(char *access_str) | ||
109 | { | ||
110 | int pos, access = 0; | ||
111 | |||
112 | for (pos = 0; pos < KSYM_TRACER_OP_LEN; pos++) { | ||
113 | switch (access_str[pos]) { | ||
114 | case 'r': | ||
115 | access += (pos == 0) ? 4 : -1; | ||
116 | break; | ||
117 | case 'w': | ||
118 | access += (pos == 1) ? 2 : -1; | ||
119 | break; | ||
120 | case '-': | ||
121 | break; | ||
122 | default: | ||
123 | return -EINVAL; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | switch (access) { | ||
128 | case 6: | ||
129 | access = HW_BREAKPOINT_RW; | ||
130 | break; | ||
131 | case 2: | ||
132 | access = HW_BREAKPOINT_WRITE; | ||
133 | break; | ||
134 | case 0: | ||
135 | access = 0; | ||
136 | } | ||
137 | |||
138 | return access; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * There can be several possible malformed requests and we attempt to capture | ||
143 | * all of them. We enumerate some of the rules | ||
144 | * 1. We will not allow kernel symbols with ':' since it is used as a delimiter. | ||
145 | * i.e. multiple ':' symbols disallowed. Possible uses are of the form | ||
146 | * <module>:<ksym_name>:<op>. | ||
147 | * 2. No delimiter symbol ':' in the input string | ||
148 | * 3. Spurious operator symbols or symbols not in their respective positions | ||
149 | * 4. <ksym_name>:--- i.e. clear breakpoint request when ksym_name not in file | ||
150 | * 5. Kernel symbol not a part of /proc/kallsyms | ||
151 | * 6. Duplicate requests | ||
152 | */ | ||
153 | static int parse_ksym_trace_str(char *input_string, char **ksymname, | ||
154 | unsigned long *addr) | ||
155 | { | ||
156 | char *delimiter = ":"; | ||
157 | int ret; | ||
158 | |||
159 | ret = -EINVAL; | ||
160 | *ksymname = strsep(&input_string, delimiter); | ||
161 | *addr = kallsyms_lookup_name(*ksymname); | ||
162 | |||
163 | /* Check for malformed request: (2), (1) and (5) */ | ||
164 | if ((!input_string) || | ||
165 | (strlen(input_string) != (KSYM_TRACER_OP_LEN + 1)) || | ||
166 | (*addr == 0)) | ||
167 | goto return_code; | ||
168 | ret = ksym_trace_get_access_type(input_string); | ||
169 | |||
170 | return_code: | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | ||
175 | { | ||
176 | struct trace_ksym *entry; | ||
177 | int ret; | ||
178 | |||
179 | if (ksym_filter_entry_count >= KSYM_TRACER_MAX) { | ||
180 | printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No" | ||
181 | " new requests for tracing can be accepted now.\n", | ||
182 | KSYM_TRACER_MAX); | ||
183 | return -ENOSPC; | ||
184 | } | ||
185 | |||
186 | entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); | ||
187 | if (!entry) | ||
188 | return -ENOMEM; | ||
189 | |||
190 | entry->ksym_hbp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); | ||
191 | if (!entry->ksym_hbp) { | ||
192 | kfree(entry); | ||
193 | return -ENOMEM; | ||
194 | } | ||
195 | |||
196 | entry->ksym_hbp->info.name = ksymname; | ||
197 | entry->ksym_hbp->info.type = op; | ||
198 | entry->ksym_addr = entry->ksym_hbp->info.address = addr; | ||
199 | #ifdef CONFIG_X86 | ||
200 | entry->ksym_hbp->info.len = HW_BREAKPOINT_LEN_4; | ||
201 | #endif | ||
202 | entry->ksym_hbp->triggered = (void *)ksym_hbp_handler; | ||
203 | |||
204 | ret = register_kernel_hw_breakpoint(entry->ksym_hbp); | ||
205 | if (ret < 0) { | ||
206 | printk(KERN_INFO "ksym_tracer request failed. Try again" | ||
207 | " later!!\n"); | ||
208 | kfree(entry->ksym_hbp); | ||
209 | kfree(entry); | ||
210 | return -EAGAIN; | ||
211 | } | ||
212 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); | ||
213 | ksym_filter_entry_count++; | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, | ||
219 | size_t count, loff_t *ppos) | ||
220 | { | ||
221 | struct trace_ksym *entry; | ||
222 | struct hlist_node *node; | ||
223 | char buf[KSYM_FILTER_ENTRY_LEN * KSYM_TRACER_MAX]; | ||
224 | ssize_t ret, cnt = 0; | ||
225 | |||
226 | mutex_lock(&ksym_tracer_mutex); | ||
227 | |||
228 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
229 | cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, "%s:", | ||
230 | entry->ksym_hbp->info.name); | ||
231 | if (entry->ksym_hbp->info.type == HW_BREAKPOINT_WRITE) | ||
232 | cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, | ||
233 | "-w-\n"); | ||
234 | else if (entry->ksym_hbp->info.type == HW_BREAKPOINT_RW) | ||
235 | cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, | ||
236 | "rw-\n"); | ||
237 | } | ||
238 | ret = simple_read_from_buffer(ubuf, count, ppos, buf, strlen(buf)); | ||
239 | mutex_unlock(&ksym_tracer_mutex); | ||
240 | |||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | static ssize_t ksym_trace_filter_write(struct file *file, | ||
245 | const char __user *buffer, | ||
246 | size_t count, loff_t *ppos) | ||
247 | { | ||
248 | struct trace_ksym *entry; | ||
249 | struct hlist_node *node; | ||
250 | char *input_string, *ksymname = NULL; | ||
251 | unsigned long ksym_addr = 0; | ||
252 | int ret, op, changed = 0; | ||
253 | |||
254 | /* Ignore echo "" > ksym_trace_filter */ | ||
255 | if (count == 0) | ||
256 | return 0; | ||
257 | |||
258 | input_string = kzalloc(count, GFP_KERNEL); | ||
259 | if (!input_string) | ||
260 | return -ENOMEM; | ||
261 | |||
262 | if (copy_from_user(input_string, buffer, count)) { | ||
263 | kfree(input_string); | ||
264 | return -EFAULT; | ||
265 | } | ||
266 | |||
267 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); | ||
268 | if (ret < 0) { | ||
269 | kfree(input_string); | ||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | mutex_lock(&ksym_tracer_mutex); | ||
274 | |||
275 | ret = -EINVAL; | ||
276 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
277 | if (entry->ksym_addr == ksym_addr) { | ||
278 | /* Check for malformed request: (6) */ | ||
279 | if (entry->ksym_hbp->info.type != op) | ||
280 | changed = 1; | ||
281 | else | ||
282 | goto err_ret; | ||
283 | break; | ||
284 | } | ||
285 | } | ||
286 | if (changed) { | ||
287 | unregister_kernel_hw_breakpoint(entry->ksym_hbp); | ||
288 | entry->ksym_hbp->info.type = op; | ||
289 | if (op > 0) { | ||
290 | ret = register_kernel_hw_breakpoint(entry->ksym_hbp); | ||
291 | if (ret == 0) { | ||
292 | ret = count; | ||
293 | goto unlock_ret_path; | ||
294 | } | ||
295 | } | ||
296 | ksym_filter_entry_count--; | ||
297 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
298 | synchronize_rcu(); | ||
299 | kfree(entry->ksym_hbp); | ||
300 | kfree(entry); | ||
301 | ret = count; | ||
302 | goto err_ret; | ||
303 | } else { | ||
304 | /* Check for malformed request: (4) */ | ||
305 | if (op == 0) | ||
306 | goto err_ret; | ||
307 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); | ||
308 | if (ret) | ||
309 | goto err_ret; | ||
310 | } | ||
311 | ret = count; | ||
312 | goto unlock_ret_path; | ||
313 | |||
314 | err_ret: | ||
315 | kfree(input_string); | ||
316 | |||
317 | unlock_ret_path: | ||
318 | mutex_unlock(&ksym_tracer_mutex); | ||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | static const struct file_operations ksym_tracing_fops = { | ||
323 | .open = tracing_open_generic, | ||
324 | .read = ksym_trace_filter_read, | ||
325 | .write = ksym_trace_filter_write, | ||
326 | }; | ||
327 | |||
328 | static void ksym_trace_reset(struct trace_array *tr) | ||
329 | { | ||
330 | struct trace_ksym *entry; | ||
331 | struct hlist_node *node, *node1; | ||
332 | |||
333 | ksym_tracing_enabled = 0; | ||
334 | |||
335 | mutex_lock(&ksym_tracer_mutex); | ||
336 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, | ||
337 | ksym_hlist) { | ||
338 | unregister_kernel_hw_breakpoint(entry->ksym_hbp); | ||
339 | ksym_filter_entry_count--; | ||
340 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
341 | synchronize_rcu(); | ||
342 | /* Free the 'input_string' only if reset | ||
343 | * after startup self-test | ||
344 | */ | ||
345 | #ifdef CONFIG_FTRACE_SELFTEST | ||
346 | if (strncmp(entry->ksym_hbp->info.name, KSYM_SELFTEST_ENTRY, | ||
347 | strlen(KSYM_SELFTEST_ENTRY)) != 0) | ||
348 | #endif /* CONFIG_FTRACE_SELFTEST*/ | ||
349 | kfree(entry->ksym_hbp->info.name); | ||
350 | kfree(entry->ksym_hbp); | ||
351 | kfree(entry); | ||
352 | } | ||
353 | mutex_unlock(&ksym_tracer_mutex); | ||
354 | } | ||
355 | |||
356 | static int ksym_trace_init(struct trace_array *tr) | ||
357 | { | ||
358 | int cpu, ret = 0; | ||
359 | |||
360 | for_each_online_cpu(cpu) | ||
361 | tracing_reset(tr, cpu); | ||
362 | ksym_tracing_enabled = 1; | ||
363 | ksym_trace_array = tr; | ||
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static void ksym_trace_print_header(struct seq_file *m) | ||
369 | { | ||
370 | |||
371 | seq_puts(m, | ||
372 | "# TASK-PID CPU# Symbol Type " | ||
373 | "Function \n"); | ||
374 | seq_puts(m, | ||
375 | "# | | | | " | ||
376 | "| \n"); | ||
377 | } | ||
378 | |||
379 | static enum print_line_t ksym_trace_output(struct trace_iterator *iter) | ||
380 | { | ||
381 | struct trace_entry *entry = iter->ent; | ||
382 | struct trace_seq *s = &iter->seq; | ||
383 | struct trace_ksym *field; | ||
384 | char str[KSYM_SYMBOL_LEN]; | ||
385 | int ret; | ||
386 | |||
387 | if (entry->type != TRACE_KSYM) | ||
388 | return TRACE_TYPE_UNHANDLED; | ||
389 | |||
390 | trace_assign_type(field, entry); | ||
391 | |||
392 | ret = trace_seq_printf(s, "%-15s %-5d %-3d %-20s ", field->p_name, | ||
393 | entry->pid, iter->cpu, field->ksym_name); | ||
394 | if (!ret) | ||
395 | return TRACE_TYPE_PARTIAL_LINE; | ||
396 | |||
397 | switch (field->ksym_hbp->info.type) { | ||
398 | case HW_BREAKPOINT_WRITE: | ||
399 | ret = trace_seq_printf(s, " W "); | ||
400 | break; | ||
401 | case HW_BREAKPOINT_RW: | ||
402 | ret = trace_seq_printf(s, " RW "); | ||
403 | break; | ||
404 | default: | ||
405 | return TRACE_TYPE_PARTIAL_LINE; | ||
406 | } | ||
407 | |||
408 | if (!ret) | ||
409 | return TRACE_TYPE_PARTIAL_LINE; | ||
410 | |||
411 | sprint_symbol(str, field->ip); | ||
412 | ret = trace_seq_printf(s, "%-20s\n", str); | ||
413 | if (!ret) | ||
414 | return TRACE_TYPE_PARTIAL_LINE; | ||
415 | |||
416 | return TRACE_TYPE_HANDLED; | ||
417 | } | ||
418 | |||
419 | struct tracer ksym_tracer __read_mostly = | ||
420 | { | ||
421 | .name = "ksym_tracer", | ||
422 | .init = ksym_trace_init, | ||
423 | .reset = ksym_trace_reset, | ||
424 | #ifdef CONFIG_FTRACE_SELFTEST | ||
425 | .selftest = trace_selftest_startup_ksym, | ||
426 | #endif | ||
427 | .print_header = ksym_trace_print_header, | ||
428 | .print_line = ksym_trace_output | ||
429 | }; | ||
430 | |||
431 | __init static int init_ksym_trace(void) | ||
432 | { | ||
433 | struct dentry *d_tracer; | ||
434 | struct dentry *entry; | ||
435 | |||
436 | d_tracer = tracing_init_dentry(); | ||
437 | ksym_filter_entry_count = 0; | ||
438 | |||
439 | entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer, | ||
440 | NULL, &ksym_tracing_fops); | ||
441 | if (!entry) | ||
442 | pr_warning("Could not create debugfs " | ||
443 | "'ksym_trace_filter' file\n"); | ||
444 | |||
445 | return register_tracer(&ksym_tracer); | ||
446 | } | ||
447 | device_initcall(init_ksym_trace); | ||
448 | |||
449 | |||
450 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
451 | static int ksym_tracer_stat_headers(struct seq_file *m) | ||
452 | { | ||
453 | seq_printf(m, " Access type "); | ||
454 | seq_printf(m, " Symbol Counter \n"); | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static int ksym_tracer_stat_show(struct seq_file *m, void *v) | ||
459 | { | ||
460 | struct hlist_node *stat = v; | ||
461 | struct trace_ksym *entry; | ||
462 | int access_type = 0; | ||
463 | char fn_name[KSYM_NAME_LEN]; | ||
464 | |||
465 | entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); | ||
466 | |||
467 | if (entry->ksym_hbp) | ||
468 | access_type = entry->ksym_hbp->info.type; | ||
469 | |||
470 | switch (access_type) { | ||
471 | case HW_BREAKPOINT_WRITE: | ||
472 | seq_printf(m, " W "); | ||
473 | break; | ||
474 | case HW_BREAKPOINT_RW: | ||
475 | seq_printf(m, " RW "); | ||
476 | break; | ||
477 | default: | ||
478 | seq_printf(m, " NA "); | ||
479 | } | ||
480 | |||
481 | if (lookup_symbol_name(entry->ksym_addr, fn_name) >= 0) | ||
482 | seq_printf(m, " %s ", fn_name); | ||
483 | else | ||
484 | seq_printf(m, " <NA> "); | ||
485 | |||
486 | seq_printf(m, "%15lu\n", entry->counter); | ||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | static void *ksym_tracer_stat_start(struct tracer_stat *trace) | ||
491 | { | ||
492 | return &(ksym_filter_head.first); | ||
493 | } | ||
494 | |||
495 | static void * | ||
496 | ksym_tracer_stat_next(void *v, int idx) | ||
497 | { | ||
498 | struct hlist_node *stat = v; | ||
499 | |||
500 | return stat->next; | ||
501 | } | ||
502 | |||
503 | static struct tracer_stat ksym_tracer_stats = { | ||
504 | .name = "ksym_tracer", | ||
505 | .stat_start = ksym_tracer_stat_start, | ||
506 | .stat_next = ksym_tracer_stat_next, | ||
507 | .stat_headers = ksym_tracer_stat_headers, | ||
508 | .stat_show = ksym_tracer_stat_show | ||
509 | }; | ||
510 | |||
511 | __init static int ksym_tracer_stat_init(void) | ||
512 | { | ||
513 | int ret; | ||
514 | |||
515 | ret = register_stat_tracer(&ksym_tracer_stats); | ||
516 | if (ret) { | ||
517 | printk(KERN_WARNING "Warning: could not register " | ||
518 | "ksym tracer stats\n"); | ||
519 | return 1; | ||
520 | } | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | fs_initcall(ksym_tracer_stat_init); | ||
525 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 00dd6485bdd7..71f2edb0fd84 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -17,6 +17,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
17 | case TRACE_GRAPH_ENT: | 17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | 18 | case TRACE_GRAPH_RET: |
19 | case TRACE_HW_BRANCHES: | 19 | case TRACE_HW_BRANCHES: |
20 | case TRACE_KSYM: | ||
20 | return 1; | 21 | return 1; |
21 | } | 22 | } |
22 | return 0; | 23 | return 0; |
@@ -807,3 +808,55 @@ trace_selftest_startup_hw_branches(struct tracer *trace, | |||
807 | return ret; | 808 | return ret; |
808 | } | 809 | } |
809 | #endif /* CONFIG_HW_BRANCH_TRACER */ | 810 | #endif /* CONFIG_HW_BRANCH_TRACER */ |
811 | |||
812 | #ifdef CONFIG_KSYM_TRACER | ||
813 | static int ksym_selftest_dummy; | ||
814 | |||
815 | int | ||
816 | trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) | ||
817 | { | ||
818 | unsigned long count; | ||
819 | int ret; | ||
820 | |||
821 | /* start the tracing */ | ||
822 | ret = tracer_init(trace, tr); | ||
823 | if (ret) { | ||
824 | warn_failed_init_tracer(trace, ret); | ||
825 | return ret; | ||
826 | } | ||
827 | |||
828 | ksym_selftest_dummy = 0; | ||
829 | /* Register the read-write tracing request */ | ||
830 | ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, HW_BREAKPOINT_RW, | ||
831 | (unsigned long)(&ksym_selftest_dummy)); | ||
832 | |||
833 | if (ret < 0) { | ||
834 | printk(KERN_CONT "ksym_trace read-write startup test failed\n"); | ||
835 | goto ret_path; | ||
836 | } | ||
837 | /* Perform a read and a write operation over the dummy variable to | ||
838 | * trigger the tracer | ||
839 | */ | ||
840 | if (ksym_selftest_dummy == 0) | ||
841 | ksym_selftest_dummy++; | ||
842 | |||
843 | /* stop the tracing. */ | ||
844 | tracing_stop(); | ||
845 | /* check the trace buffer */ | ||
846 | ret = trace_test_buffer(tr, &count); | ||
847 | trace->reset(tr); | ||
848 | tracing_start(); | ||
849 | |||
850 | /* read & write operations - one each is performed on the dummy variable | ||
851 | * triggering two entries in the trace buffer | ||
852 | */ | ||
853 | if (!ret && count != 2) { | ||
854 | printk(KERN_CONT "Ksym tracer startup test failed"); | ||
855 | ret = -1; | ||
856 | } | ||
857 | |||
858 | ret_path: | ||
859 | return ret; | ||
860 | } | ||
861 | #endif /* CONFIG_KSYM_TRACER */ | ||
862 | |||