aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig21
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace.h11
-rw-r--r--kernel/trace/trace_entries.h16
-rw-r--r--kernel/trace/trace_ksym.c551
-rw-r--r--kernel/trace/trace_selftest.c53
6 files changed, 653 insertions, 0 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index b416512ad17f..06c3d5be6759 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -339,6 +339,27 @@ config POWER_TRACER
339 power management decisions, specifically the C-state and P-state 339 power management decisions, specifically the C-state and P-state
340 behavior. 340 behavior.
341 341
342config KSYM_TRACER
343 bool "Trace read and write access on kernel memory locations"
344 depends on HAVE_HW_BREAKPOINT
345 select TRACING
346 help
347 This tracer helps find read and write operations on any given kernel
348 symbol i.e. /proc/kallsyms.
349
350config PROFILE_KSYM_TRACER
351 bool "Profile all kernel memory accesses on 'watched' variables"
352 depends on KSYM_TRACER
353 help
354 This tracer profiles kernel accesses on variables watched through the
355 ksym tracer ftrace plugin. Depending upon the hardware, all read
356 and write operations on kernel variables can be monitored for
357 accesses.
358
359 The results will be displayed in:
360 /debugfs/tracing/profile_ksym
361
362 Say N if unsure.
342 363
343config STACK_TRACER 364config STACK_TRACER
344 bool "Trace max stack" 365 bool "Trace max stack"
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 26f03ac07c2b..0f84c52e58fe 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o 54obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
55obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 55obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
56obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
56obj-$(CONFIG_EVENT_TRACING) += power-traces.o 57obj-$(CONFIG_EVENT_TRACING) += power-traces.o
57 58
58libftrace-y := ftrace.o 59libftrace-y := ftrace.o
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4959ada9e0bb..91c3d0e9a5a1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -15,6 +15,10 @@
15#include <linux/trace_seq.h> 15#include <linux/trace_seq.h>
16#include <linux/ftrace_event.h> 16#include <linux/ftrace_event.h>
17 17
18#ifdef CONFIG_KSYM_TRACER
19#include <asm/hw_breakpoint.h>
20#endif
21
18enum trace_type { 22enum trace_type {
19 __TRACE_FIRST_TYPE = 0, 23 __TRACE_FIRST_TYPE = 0,
20 24
@@ -37,6 +41,7 @@ enum trace_type {
37 TRACE_KMEM_ALLOC, 41 TRACE_KMEM_ALLOC,
38 TRACE_KMEM_FREE, 42 TRACE_KMEM_FREE,
39 TRACE_BLK, 43 TRACE_BLK,
44 TRACE_KSYM,
40 45
41 __TRACE_LAST_TYPE, 46 __TRACE_LAST_TYPE,
42}; 47};
@@ -209,6 +214,7 @@ extern void __ftrace_bad_type(void);
209 TRACE_KMEM_ALLOC); \ 214 TRACE_KMEM_ALLOC); \
210 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ 215 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
211 TRACE_KMEM_FREE); \ 216 TRACE_KMEM_FREE); \
217 IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
212 __ftrace_bad_type(); \ 218 __ftrace_bad_type(); \
213 } while (0) 219 } while (0)
214 220
@@ -364,6 +370,9 @@ int register_tracer(struct tracer *type);
364void unregister_tracer(struct tracer *type); 370void unregister_tracer(struct tracer *type);
365int is_tracing_stopped(void); 371int is_tracing_stopped(void);
366 372
373#define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy"
374extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
375
367extern unsigned long nsecs_to_usecs(unsigned long nsecs); 376extern unsigned long nsecs_to_usecs(unsigned long nsecs);
368 377
369#ifdef CONFIG_TRACER_MAX_TRACE 378#ifdef CONFIG_TRACER_MAX_TRACE
@@ -438,6 +447,8 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
438 struct trace_array *tr); 447 struct trace_array *tr);
439extern int trace_selftest_startup_hw_branches(struct tracer *trace, 448extern int trace_selftest_startup_hw_branches(struct tracer *trace,
440 struct trace_array *tr); 449 struct trace_array *tr);
450extern int trace_selftest_startup_ksym(struct tracer *trace,
451 struct trace_array *tr);
441#endif /* CONFIG_FTRACE_STARTUP_TEST */ 452#endif /* CONFIG_FTRACE_STARTUP_TEST */
442 453
443extern void *head_page(struct trace_array_cpu *data); 454extern void *head_page(struct trace_array_cpu *data);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index ead3d724599d..e19747d4f860 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -364,3 +364,19 @@ FTRACE_ENTRY(kmem_free, kmemtrace_free_entry,
364 F_printk("type:%u call_site:%lx ptr:%p", 364 F_printk("type:%u call_site:%lx ptr:%p",
365 __entry->type_id, __entry->call_site, __entry->ptr) 365 __entry->type_id, __entry->call_site, __entry->ptr)
366); 366);
367
368FTRACE_ENTRY(ksym_trace, ksym_trace_entry,
369
370 TRACE_KSYM,
371
372 F_STRUCT(
373 __field( unsigned long, ip )
374 __field( unsigned char, type )
375 __array( char , ksym_name, KSYM_NAME_LEN )
376 __array( char , cmd, TASK_COMM_LEN )
377 ),
378
379 F_printk("ip: %pF type: %d ksym_name: %s cmd: %s",
380 (void *)__entry->ip, (unsigned int)__entry->type,
381 __entry->ksym_name, __entry->cmd)
382);
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
new file mode 100644
index 000000000000..6d5609c67378
--- /dev/null
+++ b/kernel/trace/trace_ksym.c
@@ -0,0 +1,551 @@
1/*
2 * trace_ksym.c - Kernel Symbol Tracer
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2009
19 */
20
21#include <linux/kallsyms.h>
22#include <linux/uaccess.h>
23#include <linux/debugfs.h>
24#include <linux/ftrace.h>
25#include <linux/module.h>
26#include <linux/fs.h>
27
28#include "trace_output.h"
29#include "trace_stat.h"
30#include "trace.h"
31
32/* For now, let us restrict the no. of symbols traced simultaneously to number
33 * of available hardware breakpoint registers.
34 */
35#define KSYM_TRACER_MAX HBP_NUM
36
37#define KSYM_TRACER_OP_LEN 3 /* rw- */
38
39struct trace_ksym {
40 struct hw_breakpoint *ksym_hbp;
41 unsigned long ksym_addr;
42#ifdef CONFIG_PROFILE_KSYM_TRACER
43 unsigned long counter;
44#endif
45 struct hlist_node ksym_hlist;
46};
47
48static struct trace_array *ksym_trace_array;
49
50static unsigned int ksym_filter_entry_count;
51static unsigned int ksym_tracing_enabled;
52
53static HLIST_HEAD(ksym_filter_head);
54
55static DEFINE_MUTEX(ksym_tracer_mutex);
56
57#ifdef CONFIG_PROFILE_KSYM_TRACER
58
59#define MAX_UL_INT 0xffffffff
60
61void ksym_collect_stats(unsigned long hbp_hit_addr)
62{
63 struct hlist_node *node;
64 struct trace_ksym *entry;
65
66 rcu_read_lock();
67 hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) {
68 if ((entry->ksym_addr == hbp_hit_addr) &&
69 (entry->counter <= MAX_UL_INT)) {
70 entry->counter++;
71 break;
72 }
73 }
74 rcu_read_unlock();
75}
76#endif /* CONFIG_PROFILE_KSYM_TRACER */
77
78void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs)
79{
80 struct ring_buffer_event *event;
81 struct ksym_trace_entry *entry;
82 struct ring_buffer *buffer;
83 int pc;
84
85 if (!ksym_tracing_enabled)
86 return;
87
88 buffer = ksym_trace_array->buffer;
89
90 pc = preempt_count();
91
92 event = trace_buffer_lock_reserve(buffer, TRACE_KSYM,
93 sizeof(*entry), 0, pc);
94 if (!event)
95 return;
96
97 entry = ring_buffer_event_data(event);
98 entry->ip = instruction_pointer(regs);
99 entry->type = hbp->info.type;
100 strlcpy(entry->ksym_name, hbp->info.name, KSYM_SYMBOL_LEN);
101 strlcpy(entry->cmd, current->comm, TASK_COMM_LEN);
102
103#ifdef CONFIG_PROFILE_KSYM_TRACER
104 ksym_collect_stats(hbp->info.address);
105#endif /* CONFIG_PROFILE_KSYM_TRACER */
106
107 trace_buffer_unlock_commit(buffer, event, 0, pc);
108}
109
110/* Valid access types are represented as
111 *
112 * rw- : Set Read/Write Access Breakpoint
113 * -w- : Set Write Access Breakpoint
114 * --- : Clear Breakpoints
115 * --x : Set Execution Break points (Not available yet)
116 *
117 */
118static int ksym_trace_get_access_type(char *str)
119{
120 int access = 0;
121
122 if (str[0] == 'r')
123 access += 4;
124 else if (str[0] != '-')
125 return -EINVAL;
126
127 if (str[1] == 'w')
128 access += 2;
129 else if (str[1] != '-')
130 return -EINVAL;
131
132 if (str[2] != '-')
133 return -EINVAL;
134
135 switch (access) {
136 case 6:
137 access = HW_BREAKPOINT_RW;
138 break;
139 case 4:
140 access = -EINVAL;
141 break;
142 case 2:
143 access = HW_BREAKPOINT_WRITE;
144 break;
145 }
146
147 return access;
148}
149
150/*
151 * There can be several possible malformed requests and we attempt to capture
152 * all of them. We enumerate some of the rules
153 * 1. We will not allow kernel symbols with ':' since it is used as a delimiter.
154 * i.e. multiple ':' symbols disallowed. Possible uses are of the form
155 * <module>:<ksym_name>:<op>.
156 * 2. No delimiter symbol ':' in the input string
157 * 3. Spurious operator symbols or symbols not in their respective positions
158 * 4. <ksym_name>:--- i.e. clear breakpoint request when ksym_name not in file
159 * 5. Kernel symbol not a part of /proc/kallsyms
160 * 6. Duplicate requests
161 */
162static int parse_ksym_trace_str(char *input_string, char **ksymname,
163 unsigned long *addr)
164{
165 int ret;
166
167 *ksymname = strsep(&input_string, ":");
168 *addr = kallsyms_lookup_name(*ksymname);
169
170 /* Check for malformed request: (2), (1) and (5) */
171 if ((!input_string) ||
172 (strlen(input_string) != KSYM_TRACER_OP_LEN) ||
173 (*addr == 0))
174 return -EINVAL;;
175
176 ret = ksym_trace_get_access_type(input_string);
177
178 return ret;
179}
180
181int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
182{
183 struct trace_ksym *entry;
184 int ret = -ENOMEM;
185
186 if (ksym_filter_entry_count >= KSYM_TRACER_MAX) {
187 printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No"
188 " new requests for tracing can be accepted now.\n",
189 KSYM_TRACER_MAX);
190 return -ENOSPC;
191 }
192
193 entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL);
194 if (!entry)
195 return -ENOMEM;
196
197 entry->ksym_hbp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL);
198 if (!entry->ksym_hbp)
199 goto err;
200
201 entry->ksym_hbp->info.name = kstrdup(ksymname, GFP_KERNEL);
202 if (!entry->ksym_hbp->info.name)
203 goto err;
204
205 entry->ksym_hbp->info.type = op;
206 entry->ksym_addr = entry->ksym_hbp->info.address = addr;
207#ifdef CONFIG_X86
208 entry->ksym_hbp->info.len = HW_BREAKPOINT_LEN_4;
209#endif
210 entry->ksym_hbp->triggered = (void *)ksym_hbp_handler;
211
212 ret = register_kernel_hw_breakpoint(entry->ksym_hbp);
213 if (ret < 0) {
214 printk(KERN_INFO "ksym_tracer request failed. Try again"
215 " later!!\n");
216 ret = -EAGAIN;
217 goto err;
218 }
219 hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head);
220 ksym_filter_entry_count++;
221 return 0;
222err:
223 if (entry->ksym_hbp)
224 kfree(entry->ksym_hbp->info.name);
225 kfree(entry->ksym_hbp);
226 kfree(entry);
227 return ret;
228}
229
230static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf,
231 size_t count, loff_t *ppos)
232{
233 struct trace_ksym *entry;
234 struct hlist_node *node;
235 struct trace_seq *s;
236 ssize_t cnt = 0;
237 int ret;
238
239 s = kmalloc(sizeof(*s), GFP_KERNEL);
240 if (!s)
241 return -ENOMEM;
242 trace_seq_init(s);
243
244 mutex_lock(&ksym_tracer_mutex);
245
246 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
247 ret = trace_seq_printf(s, "%s:", entry->ksym_hbp->info.name);
248 if (entry->ksym_hbp->info.type == HW_BREAKPOINT_WRITE)
249 ret = trace_seq_puts(s, "-w-\n");
250 else if (entry->ksym_hbp->info.type == HW_BREAKPOINT_RW)
251 ret = trace_seq_puts(s, "rw-\n");
252 WARN_ON_ONCE(!ret);
253 }
254
255 cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
256
257 mutex_unlock(&ksym_tracer_mutex);
258
259 kfree(s);
260
261 return cnt;
262}
263
264static void __ksym_trace_reset(void)
265{
266 struct trace_ksym *entry;
267 struct hlist_node *node, *node1;
268
269 mutex_lock(&ksym_tracer_mutex);
270 hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head,
271 ksym_hlist) {
272 unregister_kernel_hw_breakpoint(entry->ksym_hbp);
273 ksym_filter_entry_count--;
274 hlist_del_rcu(&(entry->ksym_hlist));
275 synchronize_rcu();
276 kfree(entry->ksym_hbp->info.name);
277 kfree(entry->ksym_hbp);
278 kfree(entry);
279 }
280 mutex_unlock(&ksym_tracer_mutex);
281}
282
283static ssize_t ksym_trace_filter_write(struct file *file,
284 const char __user *buffer,
285 size_t count, loff_t *ppos)
286{
287 struct trace_ksym *entry;
288 struct hlist_node *node;
289 char *input_string, *ksymname = NULL;
290 unsigned long ksym_addr = 0;
291 int ret, op, changed = 0;
292
293 input_string = kzalloc(count + 1, GFP_KERNEL);
294 if (!input_string)
295 return -ENOMEM;
296
297 if (copy_from_user(input_string, buffer, count)) {
298 kfree(input_string);
299 return -EFAULT;
300 }
301 input_string[count] = '\0';
302
303 strstrip(input_string);
304
305 /*
306 * Clear all breakpoints if:
307 * 1: echo > ksym_trace_filter
308 * 2: echo 0 > ksym_trace_filter
309 * 3: echo "*:---" > ksym_trace_filter
310 */
311 if (!input_string[0] || !strcmp(input_string, "0") ||
312 !strcmp(input_string, "*:---")) {
313 __ksym_trace_reset();
314 kfree(input_string);
315 return count;
316 }
317
318 ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr);
319 if (ret < 0) {
320 kfree(input_string);
321 return ret;
322 }
323
324 mutex_lock(&ksym_tracer_mutex);
325
326 ret = -EINVAL;
327 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
328 if (entry->ksym_addr == ksym_addr) {
329 /* Check for malformed request: (6) */
330 if (entry->ksym_hbp->info.type != op)
331 changed = 1;
332 else
333 goto out;
334 break;
335 }
336 }
337 if (changed) {
338 unregister_kernel_hw_breakpoint(entry->ksym_hbp);
339 entry->ksym_hbp->info.type = op;
340 if (op > 0) {
341 ret = register_kernel_hw_breakpoint(entry->ksym_hbp);
342 if (ret == 0)
343 goto out;
344 }
345 ksym_filter_entry_count--;
346 hlist_del_rcu(&(entry->ksym_hlist));
347 synchronize_rcu();
348 kfree(entry->ksym_hbp->info.name);
349 kfree(entry->ksym_hbp);
350 kfree(entry);
351 ret = 0;
352 goto out;
353 } else {
354 /* Check for malformed request: (4) */
355 if (op == 0)
356 goto out;
357 ret = process_new_ksym_entry(ksymname, op, ksym_addr);
358 }
359out:
360 mutex_unlock(&ksym_tracer_mutex);
361
362 kfree(input_string);
363
364 if (!ret)
365 ret = count;
366 return ret;
367}
368
369static const struct file_operations ksym_tracing_fops = {
370 .open = tracing_open_generic,
371 .read = ksym_trace_filter_read,
372 .write = ksym_trace_filter_write,
373};
374
375static void ksym_trace_reset(struct trace_array *tr)
376{
377 ksym_tracing_enabled = 0;
378 __ksym_trace_reset();
379}
380
381static int ksym_trace_init(struct trace_array *tr)
382{
383 int cpu, ret = 0;
384
385 for_each_online_cpu(cpu)
386 tracing_reset(tr, cpu);
387 ksym_tracing_enabled = 1;
388 ksym_trace_array = tr;
389
390 return ret;
391}
392
393static void ksym_trace_print_header(struct seq_file *m)
394{
395 seq_puts(m,
396 "# TASK-PID CPU# Symbol "
397 "Type Function\n");
398 seq_puts(m,
399 "# | | | "
400 " | |\n");
401}
402
403static enum print_line_t ksym_trace_output(struct trace_iterator *iter)
404{
405 struct trace_entry *entry = iter->ent;
406 struct trace_seq *s = &iter->seq;
407 struct ksym_trace_entry *field;
408 char str[KSYM_SYMBOL_LEN];
409 int ret;
410
411 if (entry->type != TRACE_KSYM)
412 return TRACE_TYPE_UNHANDLED;
413
414 trace_assign_type(field, entry);
415
416 ret = trace_seq_printf(s, "%11s-%-5d [%03d] %-30s ", field->cmd,
417 entry->pid, iter->cpu, field->ksym_name);
418 if (!ret)
419 return TRACE_TYPE_PARTIAL_LINE;
420
421 switch (field->type) {
422 case HW_BREAKPOINT_WRITE:
423 ret = trace_seq_printf(s, " W ");
424 break;
425 case HW_BREAKPOINT_RW:
426 ret = trace_seq_printf(s, " RW ");
427 break;
428 default:
429 return TRACE_TYPE_PARTIAL_LINE;
430 }
431
432 if (!ret)
433 return TRACE_TYPE_PARTIAL_LINE;
434
435 sprint_symbol(str, field->ip);
436 ret = trace_seq_printf(s, "%s\n", str);
437 if (!ret)
438 return TRACE_TYPE_PARTIAL_LINE;
439
440 return TRACE_TYPE_HANDLED;
441}
442
443struct tracer ksym_tracer __read_mostly =
444{
445 .name = "ksym_tracer",
446 .init = ksym_trace_init,
447 .reset = ksym_trace_reset,
448#ifdef CONFIG_FTRACE_SELFTEST
449 .selftest = trace_selftest_startup_ksym,
450#endif
451 .print_header = ksym_trace_print_header,
452 .print_line = ksym_trace_output
453};
454
455__init static int init_ksym_trace(void)
456{
457 struct dentry *d_tracer;
458 struct dentry *entry;
459
460 d_tracer = tracing_init_dentry();
461 ksym_filter_entry_count = 0;
462
463 entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer,
464 NULL, &ksym_tracing_fops);
465 if (!entry)
466 pr_warning("Could not create debugfs "
467 "'ksym_trace_filter' file\n");
468
469 return register_tracer(&ksym_tracer);
470}
471device_initcall(init_ksym_trace);
472
473
474#ifdef CONFIG_PROFILE_KSYM_TRACER
475static int ksym_tracer_stat_headers(struct seq_file *m)
476{
477 seq_puts(m, " Access Type ");
478 seq_puts(m, " Symbol Counter\n");
479 seq_puts(m, " ----------- ");
480 seq_puts(m, " ------ -------\n");
481 return 0;
482}
483
484static int ksym_tracer_stat_show(struct seq_file *m, void *v)
485{
486 struct hlist_node *stat = v;
487 struct trace_ksym *entry;
488 int access_type = 0;
489 char fn_name[KSYM_NAME_LEN];
490
491 entry = hlist_entry(stat, struct trace_ksym, ksym_hlist);
492
493 if (entry->ksym_hbp)
494 access_type = entry->ksym_hbp->info.type;
495
496 switch (access_type) {
497 case HW_BREAKPOINT_WRITE:
498 seq_puts(m, " W ");
499 break;
500 case HW_BREAKPOINT_RW:
501 seq_puts(m, " RW ");
502 break;
503 default:
504 seq_puts(m, " NA ");
505 }
506
507 if (lookup_symbol_name(entry->ksym_addr, fn_name) >= 0)
508 seq_printf(m, " %-36s", fn_name);
509 else
510 seq_printf(m, " %-36s", "<NA>");
511 seq_printf(m, " %15lu\n", entry->counter);
512
513 return 0;
514}
515
516static void *ksym_tracer_stat_start(struct tracer_stat *trace)
517{
518 return ksym_filter_head.first;
519}
520
521static void *
522ksym_tracer_stat_next(void *v, int idx)
523{
524 struct hlist_node *stat = v;
525
526 return stat->next;
527}
528
529static struct tracer_stat ksym_tracer_stats = {
530 .name = "ksym_tracer",
531 .stat_start = ksym_tracer_stat_start,
532 .stat_next = ksym_tracer_stat_next,
533 .stat_headers = ksym_tracer_stat_headers,
534 .stat_show = ksym_tracer_stat_show
535};
536
537__init static int ksym_tracer_stat_init(void)
538{
539 int ret;
540
541 ret = register_stat_tracer(&ksym_tracer_stats);
542 if (ret) {
543 printk(KERN_WARNING "Warning: could not register "
544 "ksym tracer stats\n");
545 return 1;
546 }
547
548 return 0;
549}
550fs_initcall(ksym_tracer_stat_init);
551#endif /* CONFIG_PROFILE_KSYM_TRACER */
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index d2cdbabb4ead..7179c12e4f0f 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -17,6 +17,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
17 case TRACE_GRAPH_ENT: 17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET: 18 case TRACE_GRAPH_RET:
19 case TRACE_HW_BRANCHES: 19 case TRACE_HW_BRANCHES:
20 case TRACE_KSYM:
20 return 1; 21 return 1;
21 } 22 }
22 return 0; 23 return 0;
@@ -808,3 +809,55 @@ trace_selftest_startup_hw_branches(struct tracer *trace,
808 return ret; 809 return ret;
809} 810}
810#endif /* CONFIG_HW_BRANCH_TRACER */ 811#endif /* CONFIG_HW_BRANCH_TRACER */
812
813#ifdef CONFIG_KSYM_TRACER
814static int ksym_selftest_dummy;
815
816int
817trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
818{
819 unsigned long count;
820 int ret;
821
822 /* start the tracing */
823 ret = tracer_init(trace, tr);
824 if (ret) {
825 warn_failed_init_tracer(trace, ret);
826 return ret;
827 }
828
829 ksym_selftest_dummy = 0;
830 /* Register the read-write tracing request */
831 ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, HW_BREAKPOINT_RW,
832 (unsigned long)(&ksym_selftest_dummy));
833
834 if (ret < 0) {
835 printk(KERN_CONT "ksym_trace read-write startup test failed\n");
836 goto ret_path;
837 }
838 /* Perform a read and a write operation over the dummy variable to
839 * trigger the tracer
840 */
841 if (ksym_selftest_dummy == 0)
842 ksym_selftest_dummy++;
843
844 /* stop the tracing. */
845 tracing_stop();
846 /* check the trace buffer */
847 ret = trace_test_buffer(tr, &count);
848 trace->reset(tr);
849 tracing_start();
850
851 /* read & write operations - one each is performed on the dummy variable
852 * triggering two entries in the trace buffer
853 */
854 if (!ret && count != 2) {
855 printk(KERN_CONT "Ksym tracer startup test failed");
856 ret = -1;
857 }
858
859ret_path:
860 return ret;
861}
862#endif /* CONFIG_KSYM_TRACER */
863