aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2014-04-08 17:26:21 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-04-08 20:43:28 -0400
commitde7b2973903c6cc50b31ee5682a69b2219b9919d (patch)
treea5b9d78102854b0073f5893cafb29920f8fb55e1
parent68114e5eb862ad0a7a261b91497281b026102715 (diff)
tracepoint: Use struct pointer instead of name hash for reg/unreg tracepoints
Register/unregister tracepoint probes with struct tracepoint pointer rather than tracepoint name. This change, which vastly simplifies tracepoint.c, has been proposed by Steven Rostedt. It also removes 8.8kB (mostly of text) to the vmlinux size. From this point on, the tracers need to pass a struct tracepoint pointer to probe register/unregister. A probe can now only be connected to a tracepoint that exists. Moreover, tracers are responsible for unregistering the probe before the module containing its associated tracepoint is unloaded. text data bss dec hex filename 10443444 4282528 10391552 25117524 17f4354 vmlinux.orig 10434930 4282848 10391552 25109330 17f2352 vmlinux Link: http://lkml.kernel.org/r/1396992381-23785-2-git-send-email-mathieu.desnoyers@efficios.com CC: Ingo Molnar <mingo@kernel.org> CC: Frederic Weisbecker <fweisbec@gmail.com> CC: Andrew Morton <akpm@linux-foundation.org> CC: Frank Ch. Eigler <fche@redhat.com> CC: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> [ SDR - fixed return val in void func in tracepoint_module_going() ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--include/linux/ftrace_event.h22
-rw-r--r--include/linux/tracepoint.h41
-rw-r--r--include/trace/ftrace.h9
-rw-r--r--kernel/trace/trace_events.c55
-rw-r--r--kernel/trace/trace_events_trigger.c2
-rw-r--r--kernel/trace/trace_kprobe.c21
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_uprobe.c20
-rw-r--r--kernel/tracepoint.c511
9 files changed, 331 insertions, 352 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index cdc30111d2f8..d16da3e53bc7 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -7,6 +7,7 @@
7#include <linux/percpu.h> 7#include <linux/percpu.h>
8#include <linux/hardirq.h> 8#include <linux/hardirq.h>
9#include <linux/perf_event.h> 9#include <linux/perf_event.h>
10#include <linux/tracepoint.h>
10 11
11struct trace_array; 12struct trace_array;
12struct trace_buffer; 13struct trace_buffer;
@@ -232,6 +233,7 @@ enum {
232 TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 233 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
233 TRACE_EVENT_FL_WAS_ENABLED_BIT, 234 TRACE_EVENT_FL_WAS_ENABLED_BIT,
234 TRACE_EVENT_FL_USE_CALL_FILTER_BIT, 235 TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
236 TRACE_EVENT_FL_TRACEPOINT_BIT,
235}; 237};
236 238
237/* 239/*
@@ -244,6 +246,7 @@ enum {
244 * (used for module unloading, if a module event is enabled, 246 * (used for module unloading, if a module event is enabled,
245 * it is best to clear the buffers that used it). 247 * it is best to clear the buffers that used it).
246 * USE_CALL_FILTER - For ftrace internal events, don't use file filter 248 * USE_CALL_FILTER - For ftrace internal events, don't use file filter
249 * TRACEPOINT - Event is a tracepoint
247 */ 250 */
248enum { 251enum {
249 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), 252 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -252,12 +255,17 @@ enum {
252 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 255 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
253 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), 256 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
254 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), 257 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
258 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
255}; 259};
256 260
257struct ftrace_event_call { 261struct ftrace_event_call {
258 struct list_head list; 262 struct list_head list;
259 struct ftrace_event_class *class; 263 struct ftrace_event_class *class;
260 char *name; 264 union {
265 char *name;
266 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
267 struct tracepoint *tp;
268 };
261 struct trace_event event; 269 struct trace_event event;
262 const char *print_fmt; 270 const char *print_fmt;
263 struct event_filter *filter; 271 struct event_filter *filter;
@@ -271,6 +279,7 @@ struct ftrace_event_call {
271 * bit 3: ftrace internal event (do not enable) 279 * bit 3: ftrace internal event (do not enable)
272 * bit 4: Event was enabled by module 280 * bit 4: Event was enabled by module
273 * bit 5: use call filter rather than file filter 281 * bit 5: use call filter rather than file filter
282 * bit 6: Event is a tracepoint
274 */ 283 */
275 int flags; /* static flags of different events */ 284 int flags; /* static flags of different events */
276 285
@@ -283,6 +292,15 @@ struct ftrace_event_call {
283#endif 292#endif
284}; 293};
285 294
295static inline const char *
296ftrace_event_name(struct ftrace_event_call *call)
297{
298 if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
299 return call->tp ? call->tp->name : NULL;
300 else
301 return call->name;
302}
303
286struct trace_array; 304struct trace_array;
287struct ftrace_subsystem_dir; 305struct ftrace_subsystem_dir;
288 306
@@ -353,7 +371,7 @@ struct ftrace_event_file {
353#define __TRACE_EVENT_FLAGS(name, value) \ 371#define __TRACE_EVENT_FLAGS(name, value) \
354 static int __init trace_init_flags_##name(void) \ 372 static int __init trace_init_flags_##name(void) \
355 { \ 373 { \
356 event_##name.flags = value; \ 374 event_##name.flags |= value; \
357 return 0; \ 375 return 0; \
358 } \ 376 } \
359 early_initcall(trace_init_flags_##name); 377 early_initcall(trace_init_flags_##name);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 812b2553dfd8..08150e265761 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * See Documentation/trace/tracepoints.txt. 7 * See Documentation/trace/tracepoints.txt.
8 * 8 *
9 * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> 9 * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 * 10 *
11 * Heavily inspired from the Linux Kernel Markers. 11 * Heavily inspired from the Linux Kernel Markers.
12 * 12 *
@@ -21,6 +21,7 @@
21 21
22struct module; 22struct module;
23struct tracepoint; 23struct tracepoint;
24struct notifier_block;
24 25
25struct tracepoint_func { 26struct tracepoint_func {
26 void *func; 27 void *func;
@@ -35,18 +36,13 @@ struct tracepoint {
35 struct tracepoint_func __rcu *funcs; 36 struct tracepoint_func __rcu *funcs;
36}; 37};
37 38
38/*
39 * Connect a probe to a tracepoint.
40 * Internal API, should not be used directly.
41 */
42extern int tracepoint_probe_register(const char *name, void *probe, void *data);
43
44/*
45 * Disconnect a probe from a tracepoint.
46 * Internal API, should not be used directly.
47 */
48extern int 39extern int
49tracepoint_probe_unregister(const char *name, void *probe, void *data); 40tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
41extern int
42tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
43extern void
44for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
45 void *priv);
50 46
51#ifdef CONFIG_MODULES 47#ifdef CONFIG_MODULES
52struct tp_module { 48struct tp_module {
@@ -54,12 +50,25 @@ struct tp_module {
54 unsigned int num_tracepoints; 50 unsigned int num_tracepoints;
55 struct tracepoint * const *tracepoints_ptrs; 51 struct tracepoint * const *tracepoints_ptrs;
56}; 52};
53
57bool trace_module_has_bad_taint(struct module *mod); 54bool trace_module_has_bad_taint(struct module *mod);
55extern int register_tracepoint_module_notifier(struct notifier_block *nb);
56extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
58#else 57#else
59static inline bool trace_module_has_bad_taint(struct module *mod) 58static inline bool trace_module_has_bad_taint(struct module *mod)
60{ 59{
61 return false; 60 return false;
62} 61}
62static inline
63int register_tracepoint_module_notifier(struct notifier_block *nb)
64{
65 return 0;
66}
67static inline
68int unregister_tracepoint_module_notifier(struct notifier_block *nb)
69{
70 return 0;
71}
63#endif /* CONFIG_MODULES */ 72#endif /* CONFIG_MODULES */
64 73
65/* 74/*
@@ -160,14 +169,14 @@ static inline void tracepoint_synchronize_unregister(void)
160 static inline int \ 169 static inline int \
161 register_trace_##name(void (*probe)(data_proto), void *data) \ 170 register_trace_##name(void (*probe)(data_proto), void *data) \
162 { \ 171 { \
163 return tracepoint_probe_register(#name, (void *)probe, \ 172 return tracepoint_probe_register(&__tracepoint_##name, \
164 data); \ 173 (void *)probe, data); \
165 } \ 174 } \
166 static inline int \ 175 static inline int \
167 unregister_trace_##name(void (*probe)(data_proto), void *data) \ 176 unregister_trace_##name(void (*probe)(data_proto), void *data) \
168 { \ 177 { \
169 return tracepoint_probe_unregister(#name, (void *)probe, \ 178 return tracepoint_probe_unregister(&__tracepoint_##name,\
170 data); \ 179 (void *)probe, data); \
171 } \ 180 } \
172 static inline void \ 181 static inline void \
173 check_trace_callback_type_##name(void (*cb)(data_proto)) \ 182 check_trace_callback_type_##name(void (*cb)(data_proto)) \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 8765126b328c..9c44c11cd9bb 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -470,10 +470,11 @@ static inline notrace int ftrace_get_offsets_##call( \
470 * }; 470 * };
471 * 471 *
472 * static struct ftrace_event_call event_<call> = { 472 * static struct ftrace_event_call event_<call> = {
473 * .name = "<call>", 473 * .tp = &__tracepoint_<call>,
474 * .class = event_class_<template>, 474 * .class = event_class_<template>,
475 * .event = &ftrace_event_type_<call>, 475 * .event = &ftrace_event_type_<call>,
476 * .print_fmt = print_fmt_<call>, 476 * .print_fmt = print_fmt_<call>,
477 * .flags = TRACE_EVENT_FL_TRACEPOINT,
477 * }; 478 * };
478 * // its only safe to use pointers when doing linker tricks to 479 * // its only safe to use pointers when doing linker tricks to
479 * // create an array. 480 * // create an array.
@@ -605,10 +606,11 @@ static struct ftrace_event_class __used __refdata event_class_##call = { \
605#define DEFINE_EVENT(template, call, proto, args) \ 606#define DEFINE_EVENT(template, call, proto, args) \
606 \ 607 \
607static struct ftrace_event_call __used event_##call = { \ 608static struct ftrace_event_call __used event_##call = { \
608 .name = #call, \ 609 .tp = &__tracepoint_##call, \
609 .class = &event_class_##template, \ 610 .class = &event_class_##template, \
610 .event.funcs = &ftrace_event_type_funcs_##template, \ 611 .event.funcs = &ftrace_event_type_funcs_##template, \
611 .print_fmt = print_fmt_##template, \ 612 .print_fmt = print_fmt_##template, \
613 .flags = TRACE_EVENT_FL_TRACEPOINT, \
612}; \ 614}; \
613static struct ftrace_event_call __used \ 615static struct ftrace_event_call __used \
614__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 616__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
@@ -619,10 +621,11 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
619static const char print_fmt_##call[] = print; \ 621static const char print_fmt_##call[] = print; \
620 \ 622 \
621static struct ftrace_event_call __used event_##call = { \ 623static struct ftrace_event_call __used event_##call = { \
622 .name = #call, \ 624 .tp = &__tracepoint_##call, \
623 .class = &event_class_##template, \ 625 .class = &event_class_##template, \
624 .event.funcs = &ftrace_event_type_funcs_##call, \ 626 .event.funcs = &ftrace_event_type_funcs_##call, \
625 .print_fmt = print_fmt_##call, \ 627 .print_fmt = print_fmt_##call, \
628 .flags = TRACE_EVENT_FL_TRACEPOINT, \
626}; \ 629}; \
627static struct ftrace_event_call __used \ 630static struct ftrace_event_call __used \
628__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 631__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 83a4378dc5e0..3ddfd8f62c05 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -223,24 +223,25 @@ int ftrace_event_reg(struct ftrace_event_call *call,
223{ 223{
224 struct ftrace_event_file *file = data; 224 struct ftrace_event_file *file = data;
225 225
226 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
226 switch (type) { 227 switch (type) {
227 case TRACE_REG_REGISTER: 228 case TRACE_REG_REGISTER:
228 return tracepoint_probe_register(call->name, 229 return tracepoint_probe_register(call->tp,
229 call->class->probe, 230 call->class->probe,
230 file); 231 file);
231 case TRACE_REG_UNREGISTER: 232 case TRACE_REG_UNREGISTER:
232 tracepoint_probe_unregister(call->name, 233 tracepoint_probe_unregister(call->tp,
233 call->class->probe, 234 call->class->probe,
234 file); 235 file);
235 return 0; 236 return 0;
236 237
237#ifdef CONFIG_PERF_EVENTS 238#ifdef CONFIG_PERF_EVENTS
238 case TRACE_REG_PERF_REGISTER: 239 case TRACE_REG_PERF_REGISTER:
239 return tracepoint_probe_register(call->name, 240 return tracepoint_probe_register(call->tp,
240 call->class->perf_probe, 241 call->class->perf_probe,
241 call); 242 call);
242 case TRACE_REG_PERF_UNREGISTER: 243 case TRACE_REG_PERF_UNREGISTER:
243 tracepoint_probe_unregister(call->name, 244 tracepoint_probe_unregister(call->tp,
244 call->class->perf_probe, 245 call->class->perf_probe,
245 call); 246 call);
246 return 0; 247 return 0;
@@ -352,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
352 if (ret) { 353 if (ret) {
353 tracing_stop_cmdline_record(); 354 tracing_stop_cmdline_record();
354 pr_info("event trace: Could not enable event " 355 pr_info("event trace: Could not enable event "
355 "%s\n", call->name); 356 "%s\n", ftrace_event_name(call));
356 break; 357 break;
357 } 358 }
358 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 359 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
@@ -481,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
481{ 482{
482 struct ftrace_event_file *file; 483 struct ftrace_event_file *file;
483 struct ftrace_event_call *call; 484 struct ftrace_event_call *call;
485 const char *name;
484 int ret = -EINVAL; 486 int ret = -EINVAL;
485 487
486 list_for_each_entry(file, &tr->events, list) { 488 list_for_each_entry(file, &tr->events, list) {
487 489
488 call = file->event_call; 490 call = file->event_call;
491 name = ftrace_event_name(call);
489 492
490 if (!call->name || !call->class || !call->class->reg) 493 if (!name || !call->class || !call->class->reg)
491 continue; 494 continue;
492 495
493 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 496 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
494 continue; 497 continue;
495 498
496 if (match && 499 if (match &&
497 strcmp(match, call->name) != 0 && 500 strcmp(match, name) != 0 &&
498 strcmp(match, call->class->system) != 0) 501 strcmp(match, call->class->system) != 0)
499 continue; 502 continue;
500 503
501 if (sub && strcmp(sub, call->class->system) != 0) 504 if (sub && strcmp(sub, call->class->system) != 0)
502 continue; 505 continue;
503 506
504 if (event && strcmp(event, call->name) != 0) 507 if (event && strcmp(event, name) != 0)
505 continue; 508 continue;
506 509
507 ftrace_event_enable_disable(file, set); 510 ftrace_event_enable_disable(file, set);
@@ -699,7 +702,7 @@ static int t_show(struct seq_file *m, void *v)
699 702
700 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 703 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
701 seq_printf(m, "%s:", call->class->system); 704 seq_printf(m, "%s:", call->class->system);
702 seq_printf(m, "%s\n", call->name); 705 seq_printf(m, "%s\n", ftrace_event_name(call));
703 706
704 return 0; 707 return 0;
705} 708}
@@ -792,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
792 mutex_lock(&event_mutex); 795 mutex_lock(&event_mutex);
793 list_for_each_entry(file, &tr->events, list) { 796 list_for_each_entry(file, &tr->events, list) {
794 call = file->event_call; 797 call = file->event_call;
795 if (!call->name || !call->class || !call->class->reg) 798 if (!ftrace_event_name(call) || !call->class || !call->class->reg)
796 continue; 799 continue;
797 800
798 if (system && strcmp(call->class->system, system->name) != 0) 801 if (system && strcmp(call->class->system, system->name) != 0)
@@ -907,7 +910,7 @@ static int f_show(struct seq_file *m, void *v)
907 910
908 switch ((unsigned long)v) { 911 switch ((unsigned long)v) {
909 case FORMAT_HEADER: 912 case FORMAT_HEADER:
910 seq_printf(m, "name: %s\n", call->name); 913 seq_printf(m, "name: %s\n", ftrace_event_name(call));
911 seq_printf(m, "ID: %d\n", call->event.type); 914 seq_printf(m, "ID: %d\n", call->event.type);
912 seq_printf(m, "format:\n"); 915 seq_printf(m, "format:\n");
913 return 0; 916 return 0;
@@ -1527,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1527 struct trace_array *tr = file->tr; 1530 struct trace_array *tr = file->tr;
1528 struct list_head *head; 1531 struct list_head *head;
1529 struct dentry *d_events; 1532 struct dentry *d_events;
1533 const char *name;
1530 int ret; 1534 int ret;
1531 1535
1532 /* 1536 /*
@@ -1540,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1540 } else 1544 } else
1541 d_events = parent; 1545 d_events = parent;
1542 1546
1543 file->dir = debugfs_create_dir(call->name, d_events); 1547 name = ftrace_event_name(call);
1548 file->dir = debugfs_create_dir(name, d_events);
1544 if (!file->dir) { 1549 if (!file->dir) {
1545 pr_warning("Could not create debugfs '%s' directory\n", 1550 pr_warning("Could not create debugfs '%s' directory\n",
1546 call->name); 1551 name);
1547 return -1; 1552 return -1;
1548 } 1553 }
1549 1554
@@ -1567,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1567 ret = call->class->define_fields(call); 1572 ret = call->class->define_fields(call);
1568 if (ret < 0) { 1573 if (ret < 0) {
1569 pr_warning("Could not initialize trace point" 1574 pr_warning("Could not initialize trace point"
1570 " events/%s\n", call->name); 1575 " events/%s\n", name);
1571 return -1; 1576 return -1;
1572 } 1577 }
1573 } 1578 }
@@ -1631,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call)
1631static int event_init(struct ftrace_event_call *call) 1636static int event_init(struct ftrace_event_call *call)
1632{ 1637{
1633 int ret = 0; 1638 int ret = 0;
1639 const char *name;
1634 1640
1635 if (WARN_ON(!call->name)) 1641 name = ftrace_event_name(call);
1642 if (WARN_ON(!name))
1636 return -EINVAL; 1643 return -EINVAL;
1637 1644
1638 if (call->class->raw_init) { 1645 if (call->class->raw_init) {
1639 ret = call->class->raw_init(call); 1646 ret = call->class->raw_init(call);
1640 if (ret < 0 && ret != -ENOSYS) 1647 if (ret < 0 && ret != -ENOSYS)
1641 pr_warn("Could not initialize trace events/%s\n", 1648 pr_warn("Could not initialize trace events/%s\n",
1642 call->name); 1649 name);
1643 } 1650 }
1644 1651
1645 return ret; 1652 return ret;
@@ -1885,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr)
1885 ret = __trace_add_new_event(call, tr); 1892 ret = __trace_add_new_event(call, tr);
1886 if (ret < 0) 1893 if (ret < 0)
1887 pr_warning("Could not create directory for event %s\n", 1894 pr_warning("Could not create directory for event %s\n",
1888 call->name); 1895 ftrace_event_name(call));
1889 } 1896 }
1890} 1897}
1891 1898
@@ -1894,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
1894{ 1901{
1895 struct ftrace_event_file *file; 1902 struct ftrace_event_file *file;
1896 struct ftrace_event_call *call; 1903 struct ftrace_event_call *call;
1904 const char *name;
1897 1905
1898 list_for_each_entry(file, &tr->events, list) { 1906 list_for_each_entry(file, &tr->events, list) {
1899 1907
1900 call = file->event_call; 1908 call = file->event_call;
1909 name = ftrace_event_name(call);
1901 1910
1902 if (!call->name || !call->class || !call->class->reg) 1911 if (!name || !call->class || !call->class->reg)
1903 continue; 1912 continue;
1904 1913
1905 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 1914 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1906 continue; 1915 continue;
1907 1916
1908 if (strcmp(event, call->name) == 0 && 1917 if (strcmp(event, name) == 0 &&
1909 strcmp(system, call->class->system) == 0) 1918 strcmp(system, call->class->system) == 0)
1910 return file; 1919 return file;
1911 } 1920 }
@@ -1973,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip,
1973 seq_printf(m, "%s:%s:%s", 1982 seq_printf(m, "%s:%s:%s",
1974 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1983 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1975 data->file->event_call->class->system, 1984 data->file->event_call->class->system,
1976 data->file->event_call->name); 1985 ftrace_event_name(data->file->event_call));
1977 1986
1978 if (data->count == -1) 1987 if (data->count == -1)
1979 seq_printf(m, ":unlimited\n"); 1988 seq_printf(m, ":unlimited\n");
@@ -2193,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
2193 ret = event_create_dir(tr->event_dir, file); 2202 ret = event_create_dir(tr->event_dir, file);
2194 if (ret < 0) 2203 if (ret < 0)
2195 pr_warning("Could not create directory for event %s\n", 2204 pr_warning("Could not create directory for event %s\n",
2196 file->event_call->name); 2205 ftrace_event_name(file->event_call));
2197 } 2206 }
2198} 2207}
2199 2208
@@ -2217,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr)
2217 ret = __trace_early_add_new_event(call, tr); 2226 ret = __trace_early_add_new_event(call, tr);
2218 if (ret < 0) 2227 if (ret < 0)
2219 pr_warning("Could not create early event %s\n", 2228 pr_warning("Could not create early event %s\n",
2220 call->name); 2229 ftrace_event_name(call));
2221 } 2230 }
2222} 2231}
2223 2232
@@ -2549,7 +2558,7 @@ static __init void event_trace_self_tests(void)
2549 continue; 2558 continue;
2550#endif 2559#endif
2551 2560
2552 pr_info("Testing event %s: ", call->name); 2561 pr_info("Testing event %s: ", ftrace_event_name(call));
2553 2562
2554 /* 2563 /*
2555 * If an event is already enabled, someone is using 2564 * If an event is already enabled, someone is using
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 8efbb69b04f0..925f537f07d1 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1095 seq_printf(m, "%s:%s:%s", 1095 seq_printf(m, "%s:%s:%s",
1096 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1096 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1097 enable_data->file->event_call->class->system, 1097 enable_data->file->event_call->class->system,
1098 enable_data->file->event_call->name); 1098 ftrace_event_name(enable_data->file->event_call));
1099 1099
1100 if (data->count == -1) 1100 if (data->count == -1)
1101 seq_puts(m, ":unlimited"); 1101 seq_puts(m, ":unlimited");
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d021d21dd150..903ae28962be 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -341,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
341 struct trace_kprobe *tk; 341 struct trace_kprobe *tk;
342 342
343 list_for_each_entry(tk, &probe_list, list) 343 list_for_each_entry(tk, &probe_list, list)
344 if (strcmp(tk->tp.call.name, event) == 0 && 344 if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 &&
345 strcmp(tk->tp.call.class->system, group) == 0) 345 strcmp(tk->tp.call.class->system, group) == 0)
346 return tk; 346 return tk;
347 return NULL; 347 return NULL;
@@ -516,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
516 mutex_lock(&probe_lock); 516 mutex_lock(&probe_lock);
517 517
518 /* Delete old (same name) event if exist */ 518 /* Delete old (same name) event if exist */
519 old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); 519 old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call),
520 tk->tp.call.class->system);
520 if (old_tk) { 521 if (old_tk) {
521 ret = unregister_trace_kprobe(old_tk); 522 ret = unregister_trace_kprobe(old_tk);
522 if (ret < 0) 523 if (ret < 0)
@@ -564,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
564 if (ret) 565 if (ret)
565 pr_warning("Failed to re-register probe %s on" 566 pr_warning("Failed to re-register probe %s on"
566 "%s: %d\n", 567 "%s: %d\n",
567 tk->tp.call.name, mod->name, ret); 568 ftrace_event_name(&tk->tp.call),
569 mod->name, ret);
568 } 570 }
569 } 571 }
570 mutex_unlock(&probe_lock); 572 mutex_unlock(&probe_lock);
@@ -818,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
818 int i; 820 int i;
819 821
820 seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); 822 seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
821 seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); 823 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
824 ftrace_event_name(&tk->tp.call));
822 825
823 if (!tk->symbol) 826 if (!tk->symbol)
824 seq_printf(m, " 0x%p", tk->rp.kp.addr); 827 seq_printf(m, " 0x%p", tk->rp.kp.addr);
@@ -876,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
876{ 879{
877 struct trace_kprobe *tk = v; 880 struct trace_kprobe *tk = v;
878 881
879 seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, 882 seq_printf(m, " %-44s %15lu %15lu\n",
883 ftrace_event_name(&tk->tp.call), tk->nhit,
880 tk->rp.kp.nmissed); 884 tk->rp.kp.nmissed);
881 885
882 return 0; 886 return 0;
@@ -1011,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
1011 field = (struct kprobe_trace_entry_head *)iter->ent; 1015 field = (struct kprobe_trace_entry_head *)iter->ent;
1012 tp = container_of(event, struct trace_probe, call.event); 1016 tp = container_of(event, struct trace_probe, call.event);
1013 1017
1014 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1018 if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
1015 goto partial; 1019 goto partial;
1016 1020
1017 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1021 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1047,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
1047 field = (struct kretprobe_trace_entry_head *)iter->ent; 1051 field = (struct kretprobe_trace_entry_head *)iter->ent;
1048 tp = container_of(event, struct trace_probe, call.event); 1052 tp = container_of(event, struct trace_probe, call.event);
1049 1053
1050 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1054 if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
1051 goto partial; 1055 goto partial;
1052 1056
1053 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1057 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1286,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk)
1286 call->data = tk; 1290 call->data = tk;
1287 ret = trace_add_event_call(call); 1291 ret = trace_add_event_call(call);
1288 if (ret) { 1292 if (ret) {
1289 pr_info("Failed to register kprobe event: %s\n", call->name); 1293 pr_info("Failed to register kprobe event: %s\n",
1294 ftrace_event_name(call));
1290 kfree(call->print_fmt); 1295 kfree(call->print_fmt);
1291 unregister_ftrace_event(&call->event); 1296 unregister_ftrace_event(&call->event);
1292 } 1297 }
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ca0e79e2abaa..a436de18aa99 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter,
431 } 431 }
432 432
433 trace_seq_init(p); 433 trace_seq_init(p);
434 ret = trace_seq_printf(s, "%s: ", event->name); 434 ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event));
435 if (!ret) 435 if (!ret)
436 return TRACE_TYPE_PARTIAL_LINE; 436 return TRACE_TYPE_PARTIAL_LINE;
437 437
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index e4473367e7a4..930e51462dc8 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -294,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
294 struct trace_uprobe *tu; 294 struct trace_uprobe *tu;
295 295
296 list_for_each_entry(tu, &uprobe_list, list) 296 list_for_each_entry(tu, &uprobe_list, list)
297 if (strcmp(tu->tp.call.name, event) == 0 && 297 if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 &&
298 strcmp(tu->tp.call.class->system, group) == 0) 298 strcmp(tu->tp.call.class->system, group) == 0)
299 return tu; 299 return tu;
300 300
@@ -324,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
324 mutex_lock(&uprobe_lock); 324 mutex_lock(&uprobe_lock);
325 325
326 /* register as an event */ 326 /* register as an event */
327 old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); 327 old_tu = find_probe_event(ftrace_event_name(&tu->tp.call),
328 tu->tp.call.class->system);
328 if (old_tu) { 329 if (old_tu) {
329 /* delete old event */ 330 /* delete old event */
330 ret = unregister_trace_uprobe(old_tu); 331 ret = unregister_trace_uprobe(old_tu);
@@ -599,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
599 char c = is_ret_probe(tu) ? 'r' : 'p'; 600 char c = is_ret_probe(tu) ? 'r' : 'p';
600 int i; 601 int i;
601 602
602 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); 603 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
604 ftrace_event_name(&tu->tp.call));
603 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); 605 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
604 606
605 for (i = 0; i < tu->tp.nr_args; i++) 607 for (i = 0; i < tu->tp.nr_args; i++)
@@ -649,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
649{ 651{
650 struct trace_uprobe *tu = v; 652 struct trace_uprobe *tu = v;
651 653
652 seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); 654 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
655 ftrace_event_name(&tu->tp.call), tu->nhit);
653 return 0; 656 return 0;
654} 657}
655 658
@@ -844,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
844 tu = container_of(event, struct trace_uprobe, tp.call.event); 847 tu = container_of(event, struct trace_uprobe, tp.call.event);
845 848
846 if (is_ret_probe(tu)) { 849 if (is_ret_probe(tu)) {
847 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, 850 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
851 ftrace_event_name(&tu->tp.call),
848 entry->vaddr[1], entry->vaddr[0])) 852 entry->vaddr[1], entry->vaddr[0]))
849 goto partial; 853 goto partial;
850 data = DATAOF_TRACE_ENTRY(entry, true); 854 data = DATAOF_TRACE_ENTRY(entry, true);
851 } else { 855 } else {
852 if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, 856 if (!trace_seq_printf(s, "%s: (0x%lx)",
857 ftrace_event_name(&tu->tp.call),
853 entry->vaddr[0])) 858 entry->vaddr[0]))
854 goto partial; 859 goto partial;
855 data = DATAOF_TRACE_ENTRY(entry, false); 860 data = DATAOF_TRACE_ENTRY(entry, false);
@@ -1275,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu)
1275 ret = trace_add_event_call(call); 1280 ret = trace_add_event_call(call);
1276 1281
1277 if (ret) { 1282 if (ret) {
1278 pr_info("Failed to register uprobe event: %s\n", call->name); 1283 pr_info("Failed to register uprobe event: %s\n",
1284 ftrace_event_name(call));
1279 kfree(call->print_fmt); 1285 kfree(call->print_fmt);
1280 unregister_ftrace_event(&call->event); 1286 unregister_ftrace_event(&call->event);
1281 } 1287 }
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 50f8329c2042..01b3bd84daa1 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Mathieu Desnoyers 2 * Copyright (C) 2008-2014 Mathieu Desnoyers
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -33,39 +33,27 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[];
33/* Set to 1 to enable tracepoint debug output */ 33/* Set to 1 to enable tracepoint debug output */
34static const int tracepoint_debug; 34static const int tracepoint_debug;
35 35
36#ifdef CONFIG_MODULES
36/* 37/*
37 * Tracepoints mutex protects the builtin and module tracepoints and the hash 38 * Tracepoint module list mutex protects the local module list.
38 * table, as well as the local module list.
39 */ 39 */
40static DEFINE_MUTEX(tracepoints_mutex); 40static DEFINE_MUTEX(tracepoint_module_list_mutex);
41 41
42#ifdef CONFIG_MODULES 42/* Local list of struct tp_module */
43/* Local list of struct module */
44static LIST_HEAD(tracepoint_module_list); 43static LIST_HEAD(tracepoint_module_list);
45#endif /* CONFIG_MODULES */ 44#endif /* CONFIG_MODULES */
46 45
47/* 46/*
48 * Tracepoint hash table, containing the active tracepoints. 47 * tracepoints_mutex protects the builtin and module tracepoints.
49 * Protected by tracepoints_mutex. 48 * tracepoints_mutex nests inside tracepoint_module_list_mutex.
50 */ 49 */
51#define TRACEPOINT_HASH_BITS 6 50static DEFINE_MUTEX(tracepoints_mutex);
52#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
53static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
54 51
55/* 52/*
56 * Note about RCU : 53 * Note about RCU :
57 * It is used to delay the free of multiple probes array until a quiescent 54 * It is used to delay the free of multiple probes array until a quiescent
58 * state is reached. 55 * state is reached.
59 * Tracepoint entries modifications are protected by the tracepoints_mutex.
60 */ 56 */
61struct tracepoint_entry {
62 struct hlist_node hlist;
63 struct tracepoint_func *funcs;
64 int refcount; /* Number of times armed. 0 if disarmed. */
65 int enabled; /* Tracepoint enabled */
66 char name[0];
67};
68
69struct tp_probes { 57struct tp_probes {
70 struct rcu_head rcu; 58 struct rcu_head rcu;
71 struct tracepoint_func probes[0]; 59 struct tracepoint_func probes[0];
@@ -92,34 +80,33 @@ static inline void release_probes(struct tracepoint_func *old)
92 } 80 }
93} 81}
94 82
95static void debug_print_probes(struct tracepoint_entry *entry) 83static void debug_print_probes(struct tracepoint_func *funcs)
96{ 84{
97 int i; 85 int i;
98 86
99 if (!tracepoint_debug || !entry->funcs) 87 if (!tracepoint_debug || !funcs)
100 return; 88 return;
101 89
102 for (i = 0; entry->funcs[i].func; i++) 90 for (i = 0; funcs[i].func; i++)
103 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); 91 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
104} 92}
105 93
106static struct tracepoint_func * 94static struct tracepoint_func *func_add(struct tracepoint_func **funcs,
107tracepoint_entry_add_probe(struct tracepoint_entry *entry, 95 struct tracepoint_func *tp_func)
108 void *probe, void *data)
109{ 96{
110 int nr_probes = 0; 97 int nr_probes = 0;
111 struct tracepoint_func *old, *new; 98 struct tracepoint_func *old, *new;
112 99
113 if (WARN_ON(!probe)) 100 if (WARN_ON(!tp_func->func))
114 return ERR_PTR(-EINVAL); 101 return ERR_PTR(-EINVAL);
115 102
116 debug_print_probes(entry); 103 debug_print_probes(*funcs);
117 old = entry->funcs; 104 old = *funcs;
118 if (old) { 105 if (old) {
119 /* (N -> N+1), (N != 0, 1) probes */ 106 /* (N -> N+1), (N != 0, 1) probes */
120 for (nr_probes = 0; old[nr_probes].func; nr_probes++) 107 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
121 if (old[nr_probes].func == probe && 108 if (old[nr_probes].func == tp_func->func &&
122 old[nr_probes].data == data) 109 old[nr_probes].data == tp_func->data)
123 return ERR_PTR(-EEXIST); 110 return ERR_PTR(-EEXIST);
124 } 111 }
125 /* + 2 : one for new probe, one for NULL func */ 112 /* + 2 : one for new probe, one for NULL func */
@@ -128,33 +115,30 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry,
128 return ERR_PTR(-ENOMEM); 115 return ERR_PTR(-ENOMEM);
129 if (old) 116 if (old)
130 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); 117 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
131 new[nr_probes].func = probe; 118 new[nr_probes] = *tp_func;
132 new[nr_probes].data = data;
133 new[nr_probes + 1].func = NULL; 119 new[nr_probes + 1].func = NULL;
134 entry->refcount = nr_probes + 1; 120 *funcs = new;
135 entry->funcs = new; 121 debug_print_probes(*funcs);
136 debug_print_probes(entry);
137 return old; 122 return old;
138} 123}
139 124
140static void * 125static void *func_remove(struct tracepoint_func **funcs,
141tracepoint_entry_remove_probe(struct tracepoint_entry *entry, 126 struct tracepoint_func *tp_func)
142 void *probe, void *data)
143{ 127{
144 int nr_probes = 0, nr_del = 0, i; 128 int nr_probes = 0, nr_del = 0, i;
145 struct tracepoint_func *old, *new; 129 struct tracepoint_func *old, *new;
146 130
147 old = entry->funcs; 131 old = *funcs;
148 132
149 if (!old) 133 if (!old)
150 return ERR_PTR(-ENOENT); 134 return ERR_PTR(-ENOENT);
151 135
152 debug_print_probes(entry); 136 debug_print_probes(*funcs);
153 /* (N -> M), (N > 1, M >= 0) probes */ 137 /* (N -> M), (N > 1, M >= 0) probes */
154 if (probe) { 138 if (tp_func->func) {
155 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 139 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
156 if (old[nr_probes].func == probe && 140 if (old[nr_probes].func == tp_func->func &&
157 old[nr_probes].data == data) 141 old[nr_probes].data == tp_func->data)
158 nr_del++; 142 nr_del++;
159 } 143 }
160 } 144 }
@@ -165,9 +149,8 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
165 */ 149 */
166 if (nr_probes - nr_del == 0) { 150 if (nr_probes - nr_del == 0) {
167 /* N -> 0, (N > 1) */ 151 /* N -> 0, (N > 1) */
168 entry->funcs = NULL; 152 *funcs = NULL;
169 entry->refcount = 0; 153 debug_print_probes(*funcs);
170 debug_print_probes(entry);
171 return old; 154 return old;
172 } else { 155 } else {
173 int j = 0; 156 int j = 0;
@@ -177,91 +160,34 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
177 if (new == NULL) 160 if (new == NULL)
178 return ERR_PTR(-ENOMEM); 161 return ERR_PTR(-ENOMEM);
179 for (i = 0; old[i].func; i++) 162 for (i = 0; old[i].func; i++)
180 if (old[i].func != probe || old[i].data != data) 163 if (old[i].func != tp_func->func
164 || old[i].data != tp_func->data)
181 new[j++] = old[i]; 165 new[j++] = old[i];
182 new[nr_probes - nr_del].func = NULL; 166 new[nr_probes - nr_del].func = NULL;
183 entry->refcount = nr_probes - nr_del; 167 *funcs = new;
184 entry->funcs = new;
185 } 168 }
186 debug_print_probes(entry); 169 debug_print_probes(*funcs);
187 return old; 170 return old;
188} 171}
189 172
190/* 173/*
191 * Get tracepoint if the tracepoint is present in the tracepoint hash table. 174 * Add the probe function to a tracepoint.
192 * Must be called with tracepoints_mutex held.
193 * Returns NULL if not present.
194 */ 175 */
195static struct tracepoint_entry *get_tracepoint(const char *name) 176static int tracepoint_add_func(struct tracepoint *tp,
177 struct tracepoint_func *func)
196{ 178{
197 struct hlist_head *head; 179 struct tracepoint_func *old, *tp_funcs;
198 struct tracepoint_entry *e;
199 u32 hash = jhash(name, strlen(name), 0);
200
201 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
202 hlist_for_each_entry(e, head, hlist) {
203 if (!strcmp(name, e->name))
204 return e;
205 }
206 return NULL;
207}
208 180
209/* 181 if (tp->regfunc && !static_key_enabled(&tp->key))
210 * Add the tracepoint to the tracepoint hash table. Must be called with 182 tp->regfunc();
211 * tracepoints_mutex held.
212 */
213static struct tracepoint_entry *add_tracepoint(const char *name)
214{
215 struct hlist_head *head;
216 struct tracepoint_entry *e;
217 size_t name_len = strlen(name) + 1;
218 u32 hash = jhash(name, name_len-1, 0);
219
220 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
221 hlist_for_each_entry(e, head, hlist) {
222 if (!strcmp(name, e->name)) {
223 printk(KERN_NOTICE
224 "tracepoint %s busy\n", name);
225 return ERR_PTR(-EEXIST); /* Already there */
226 }
227 }
228 /*
229 * Using kmalloc here to allocate a variable length element. Could
230 * cause some memory fragmentation if overused.
231 */
232 e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
233 if (!e)
234 return ERR_PTR(-ENOMEM);
235 memcpy(&e->name[0], name, name_len);
236 e->funcs = NULL;
237 e->refcount = 0;
238 e->enabled = 0;
239 hlist_add_head(&e->hlist, head);
240 return e;
241}
242 183
243/* 184 tp_funcs = tp->funcs;
244 * Remove the tracepoint from the tracepoint hash table. Must be called with 185 old = func_add(&tp_funcs, func);
245 * mutex_lock held. 186 if (IS_ERR(old)) {
246 */ 187 WARN_ON_ONCE(1);
247static inline void remove_tracepoint(struct tracepoint_entry *e) 188 return PTR_ERR(old);
248{ 189 }
249 hlist_del(&e->hlist); 190 release_probes(old);
250 kfree(e);
251}
252
253/*
254 * Sets the probe callback corresponding to one tracepoint.
255 */
256static void set_tracepoint(struct tracepoint_entry **entry,
257 struct tracepoint *elem, int active)
258{
259 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
260
261 if (elem->regfunc && !static_key_enabled(&elem->key) && active)
262 elem->regfunc();
263 else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
264 elem->unregfunc();
265 191
266 /* 192 /*
267 * rcu_assign_pointer has a smp_wmb() which makes sure that the new 193 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
@@ -270,199 +196,163 @@ static void set_tracepoint(struct tracepoint_entry **entry,
270 * include/linux/tracepoints.h. A matching smp_read_barrier_depends() 196 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
271 * is used. 197 * is used.
272 */ 198 */
273 rcu_assign_pointer(elem->funcs, (*entry)->funcs); 199 rcu_assign_pointer(tp->funcs, tp_funcs);
274 if (active && !static_key_enabled(&elem->key)) 200 if (!static_key_enabled(&tp->key))
275 static_key_slow_inc(&elem->key); 201 static_key_slow_inc(&tp->key);
276 else if (!active && static_key_enabled(&elem->key)) 202 return 0;
277 static_key_slow_dec(&elem->key);
278} 203}
279 204
280/* 205/*
281 * Disable a tracepoint and its probe callback. 206 * Remove a probe function from a tracepoint.
282 * Note: only waiting an RCU period after setting elem->call to the empty 207 * Note: only waiting an RCU period after setting elem->call to the empty
283 * function insures that the original callback is not used anymore. This insured 208 * function insures that the original callback is not used anymore. This insured
284 * by preempt_disable around the call site. 209 * by preempt_disable around the call site.
285 */ 210 */
286static void disable_tracepoint(struct tracepoint *elem) 211static int tracepoint_remove_func(struct tracepoint *tp,
212 struct tracepoint_func *func)
287{ 213{
288 if (elem->unregfunc && static_key_enabled(&elem->key)) 214 struct tracepoint_func *old, *tp_funcs;
289 elem->unregfunc();
290
291 if (static_key_enabled(&elem->key))
292 static_key_slow_dec(&elem->key);
293 rcu_assign_pointer(elem->funcs, NULL);
294}
295 215
296/** 216 tp_funcs = tp->funcs;
297 * tracepoint_update_probe_range - Update a probe range 217 old = func_remove(&tp_funcs, func);
298 * @begin: beginning of the range 218 if (IS_ERR(old)) {
299 * @end: end of the range 219 WARN_ON_ONCE(1);
300 * 220 return PTR_ERR(old);
301 * Updates the probe callback corresponding to a range of tracepoints.
302 * Called with tracepoints_mutex held.
303 */
304static void tracepoint_update_probe_range(struct tracepoint * const *begin,
305 struct tracepoint * const *end)
306{
307 struct tracepoint * const *iter;
308 struct tracepoint_entry *mark_entry;
309
310 if (!begin)
311 return;
312
313 for (iter = begin; iter < end; iter++) {
314 mark_entry = get_tracepoint((*iter)->name);
315 if (mark_entry) {
316 set_tracepoint(&mark_entry, *iter,
317 !!mark_entry->refcount);
318 mark_entry->enabled = !!mark_entry->refcount;
319 } else {
320 disable_tracepoint(*iter);
321 }
322 } 221 }
323} 222 release_probes(old);
324
325#ifdef CONFIG_MODULES
326void module_update_tracepoints(void)
327{
328 struct tp_module *tp_mod;
329
330 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
331 tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
332 tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
333}
334#else /* CONFIG_MODULES */
335void module_update_tracepoints(void)
336{
337}
338#endif /* CONFIG_MODULES */
339 223
224 if (!tp_funcs) {
225 /* Removed last function */
226 if (tp->unregfunc && static_key_enabled(&tp->key))
227 tp->unregfunc();
340 228
341/* 229 if (static_key_enabled(&tp->key))
342 * Update probes, removing the faulty probes. 230 static_key_slow_dec(&tp->key);
343 * Called with tracepoints_mutex held.
344 */
345static void tracepoint_update_probes(void)
346{
347 /* Core kernel tracepoints */
348 tracepoint_update_probe_range(__start___tracepoints_ptrs,
349 __stop___tracepoints_ptrs);
350 /* tracepoints in modules. */
351 module_update_tracepoints();
352}
353
354static struct tracepoint_func *
355tracepoint_add_probe(const char *name, void *probe, void *data)
356{
357 struct tracepoint_entry *entry;
358 struct tracepoint_func *old;
359
360 entry = get_tracepoint(name);
361 if (!entry) {
362 entry = add_tracepoint(name);
363 if (IS_ERR(entry))
364 return (struct tracepoint_func *)entry;
365 } 231 }
366 old = tracepoint_entry_add_probe(entry, probe, data); 232 rcu_assign_pointer(tp->funcs, tp_funcs);
367 if (IS_ERR(old) && !entry->refcount) 233 return 0;
368 remove_tracepoint(entry);
369 return old;
370} 234}
371 235
372/** 236/**
373 * tracepoint_probe_register - Connect a probe to a tracepoint 237 * tracepoint_probe_register - Connect a probe to a tracepoint
374 * @name: tracepoint name 238 * @tp: tracepoint
375 * @probe: probe handler 239 * @probe: probe handler
376 * @data: probe private data
377 *
378 * Returns:
379 * - 0 if the probe was successfully registered, and tracepoint
380 * callsites are currently loaded for that probe,
381 * - -ENODEV if the probe was successfully registered, but no tracepoint
382 * callsite is currently loaded for that probe,
383 * - other negative error value on error.
384 *
385 * When tracepoint_probe_register() returns either 0 or -ENODEV,
386 * parameters @name, @probe, and @data may be used by the tracepoint
387 * infrastructure until the probe is unregistered.
388 * 240 *
389 * The probe address must at least be aligned on the architecture pointer size. 241 * Returns 0 if ok, error value on error.
242 * Note: if @tp is within a module, the caller is responsible for
243 * unregistering the probe before the module is gone. This can be
244 * performed either with a tracepoint module going notifier, or from
245 * within module exit functions.
390 */ 246 */
391int tracepoint_probe_register(const char *name, void *probe, void *data) 247int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
392{ 248{
393 struct tracepoint_func *old; 249 struct tracepoint_func tp_func;
394 struct tracepoint_entry *entry; 250 int ret;
395 int ret = 0;
396 251
397 mutex_lock(&tracepoints_mutex); 252 mutex_lock(&tracepoints_mutex);
398 old = tracepoint_add_probe(name, probe, data); 253 tp_func.func = probe;
399 if (IS_ERR(old)) { 254 tp_func.data = data;
400 mutex_unlock(&tracepoints_mutex); 255 ret = tracepoint_add_func(tp, &tp_func);
401 return PTR_ERR(old);
402 }
403 tracepoint_update_probes(); /* may update entry */
404 entry = get_tracepoint(name);
405 /* Make sure the entry was enabled */
406 if (!entry || !entry->enabled)
407 ret = -ENODEV;
408 mutex_unlock(&tracepoints_mutex); 256 mutex_unlock(&tracepoints_mutex);
409 release_probes(old);
410 return ret; 257 return ret;
411} 258}
412EXPORT_SYMBOL_GPL(tracepoint_probe_register); 259EXPORT_SYMBOL_GPL(tracepoint_probe_register);
413 260
414static struct tracepoint_func *
415tracepoint_remove_probe(const char *name, void *probe, void *data)
416{
417 struct tracepoint_entry *entry;
418 struct tracepoint_func *old;
419
420 entry = get_tracepoint(name);
421 if (!entry)
422 return ERR_PTR(-ENOENT);
423 old = tracepoint_entry_remove_probe(entry, probe, data);
424 if (IS_ERR(old))
425 return old;
426 if (!entry->refcount)
427 remove_tracepoint(entry);
428 return old;
429}
430
431/** 261/**
432 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint 262 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
433 * @name: tracepoint name 263 * @tp: tracepoint
434 * @probe: probe function pointer 264 * @probe: probe function pointer
435 * @data: probe private data
436 * 265 *
437 * We do not need to call a synchronize_sched to make sure the probes have 266 * Returns 0 if ok, error value on error.
438 * finished running before doing a module unload, because the module unload
439 * itself uses stop_machine(), which insures that every preempt disabled section
440 * have finished.
441 */ 267 */
442int tracepoint_probe_unregister(const char *name, void *probe, void *data) 268int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
443{ 269{
444 struct tracepoint_func *old; 270 struct tracepoint_func tp_func;
271 int ret;
445 272
446 mutex_lock(&tracepoints_mutex); 273 mutex_lock(&tracepoints_mutex);
447 old = tracepoint_remove_probe(name, probe, data); 274 tp_func.func = probe;
448 if (IS_ERR(old)) { 275 tp_func.data = data;
449 mutex_unlock(&tracepoints_mutex); 276 ret = tracepoint_remove_func(tp, &tp_func);
450 return PTR_ERR(old);
451 }
452 tracepoint_update_probes(); /* may update entry */
453 mutex_unlock(&tracepoints_mutex); 277 mutex_unlock(&tracepoints_mutex);
454 release_probes(old); 278 return ret;
455 return 0;
456} 279}
457EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 280EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
458 281
459
460#ifdef CONFIG_MODULES 282#ifdef CONFIG_MODULES
461bool trace_module_has_bad_taint(struct module *mod) 283bool trace_module_has_bad_taint(struct module *mod)
462{ 284{
463 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); 285 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
464} 286}
465 287
288static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
289
290/**
291 * register_tracepoint_notifier - register tracepoint coming/going notifier
292 * @nb: notifier block
293 *
294 * Notifiers registered with this function are called on module
295 * coming/going with the tracepoint_module_list_mutex held.
296 * The notifier block callback should expect a "struct tp_module" data
297 * pointer.
298 */
299int register_tracepoint_module_notifier(struct notifier_block *nb)
300{
301 struct tp_module *tp_mod;
302 int ret;
303
304 mutex_lock(&tracepoint_module_list_mutex);
305 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
306 if (ret)
307 goto end;
308 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
309 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
310end:
311 mutex_unlock(&tracepoint_module_list_mutex);
312 return ret;
313}
314EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
315
316/**
317 * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
318 * @nb: notifier block
319 *
320 * The notifier block callback should expect a "struct tp_module" data
321 * pointer.
322 */
323int unregister_tracepoint_module_notifier(struct notifier_block *nb)
324{
325 struct tp_module *tp_mod;
326 int ret;
327
328 mutex_lock(&tracepoint_module_list_mutex);
329 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
330 if (ret)
331 goto end;
332 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
333 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
334end:
335 mutex_unlock(&tracepoint_module_list_mutex);
336 return ret;
337
338}
339EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
340
341/*
342 * Ensure the tracer unregistered the module's probes before the module
343 * teardown is performed. Prevents leaks of probe and data pointers.
344 */
345static void tp_module_going_check_quiescent(struct tracepoint * const *begin,
346 struct tracepoint * const *end)
347{
348 struct tracepoint * const *iter;
349
350 if (!begin)
351 return;
352 for (iter = begin; iter < end; iter++)
353 WARN_ON_ONCE((*iter)->funcs);
354}
355
466static int tracepoint_module_coming(struct module *mod) 356static int tracepoint_module_coming(struct module *mod)
467{ 357{
468 struct tp_module *tp_mod; 358 struct tp_module *tp_mod;
@@ -478,7 +368,7 @@ static int tracepoint_module_coming(struct module *mod)
478 */ 368 */
479 if (trace_module_has_bad_taint(mod)) 369 if (trace_module_has_bad_taint(mod))
480 return 0; 370 return 0;
481 mutex_lock(&tracepoints_mutex); 371 mutex_lock(&tracepoint_module_list_mutex);
482 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); 372 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
483 if (!tp_mod) { 373 if (!tp_mod) {
484 ret = -ENOMEM; 374 ret = -ENOMEM;
@@ -487,27 +377,33 @@ static int tracepoint_module_coming(struct module *mod)
487 tp_mod->num_tracepoints = mod->num_tracepoints; 377 tp_mod->num_tracepoints = mod->num_tracepoints;
488 tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; 378 tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
489 list_add_tail(&tp_mod->list, &tracepoint_module_list); 379 list_add_tail(&tp_mod->list, &tracepoint_module_list);
490 tracepoint_update_probe_range(mod->tracepoints_ptrs, 380 blocking_notifier_call_chain(&tracepoint_notify_list,
491 mod->tracepoints_ptrs + mod->num_tracepoints); 381 MODULE_STATE_COMING, tp_mod);
492end: 382end:
493 mutex_unlock(&tracepoints_mutex); 383 mutex_unlock(&tracepoint_module_list_mutex);
494 return ret; 384 return ret;
495} 385}
496 386
497static int tracepoint_module_going(struct module *mod) 387static void tracepoint_module_going(struct module *mod)
498{ 388{
499 struct tp_module *pos; 389 struct tp_module *tp_mod;
500 390
501 if (!mod->num_tracepoints) 391 if (!mod->num_tracepoints)
502 return 0; 392 return;
503 393
504 mutex_lock(&tracepoints_mutex); 394 mutex_lock(&tracepoint_module_list_mutex);
505 tracepoint_update_probe_range(mod->tracepoints_ptrs, 395 list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
506 mod->tracepoints_ptrs + mod->num_tracepoints); 396 if (tp_mod->tracepoints_ptrs == mod->tracepoints_ptrs) {
507 list_for_each_entry(pos, &tracepoint_module_list, list) { 397 blocking_notifier_call_chain(&tracepoint_notify_list,
508 if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { 398 MODULE_STATE_GOING, tp_mod);
509 list_del(&pos->list); 399 list_del(&tp_mod->list);
510 kfree(pos); 400 kfree(tp_mod);
401 /*
402 * Called the going notifier before checking for
403 * quiescence.
404 */
405 tp_module_going_check_quiescent(mod->tracepoints_ptrs,
406 mod->tracepoints_ptrs + mod->num_tracepoints);
511 break; 407 break;
512 } 408 }
513 } 409 }
@@ -517,12 +413,11 @@ static int tracepoint_module_going(struct module *mod)
517 * flag on "going", in case a module taints the kernel only after being 413 * flag on "going", in case a module taints the kernel only after being
518 * loaded. 414 * loaded.
519 */ 415 */
520 mutex_unlock(&tracepoints_mutex); 416 mutex_unlock(&tracepoint_module_list_mutex);
521 return 0;
522} 417}
523 418
524int tracepoint_module_notify(struct notifier_block *self, 419static int tracepoint_module_notify(struct notifier_block *self,
525 unsigned long val, void *data) 420 unsigned long val, void *data)
526{ 421{
527 struct module *mod = data; 422 struct module *mod = data;
528 int ret = 0; 423 int ret = 0;
@@ -534,24 +429,58 @@ int tracepoint_module_notify(struct notifier_block *self,
534 case MODULE_STATE_LIVE: 429 case MODULE_STATE_LIVE:
535 break; 430 break;
536 case MODULE_STATE_GOING: 431 case MODULE_STATE_GOING:
537 ret = tracepoint_module_going(mod); 432 tracepoint_module_going(mod);
433 break;
434 case MODULE_STATE_UNFORMED:
538 break; 435 break;
539 } 436 }
540 return ret; 437 return ret;
541} 438}
542 439
543struct notifier_block tracepoint_module_nb = { 440static struct notifier_block tracepoint_module_nb = {
544 .notifier_call = tracepoint_module_notify, 441 .notifier_call = tracepoint_module_notify,
545 .priority = 0, 442 .priority = 0,
546}; 443};
547 444
548static int init_tracepoints(void) 445static __init int init_tracepoints(void)
549{ 446{
550 return register_module_notifier(&tracepoint_module_nb); 447 int ret;
448
449 ret = register_module_notifier(&tracepoint_module_nb);
450 if (ret) {
451 pr_warning("Failed to register tracepoint module enter notifier\n");
452 }
453 return ret;
551} 454}
552__initcall(init_tracepoints); 455__initcall(init_tracepoints);
553#endif /* CONFIG_MODULES */ 456#endif /* CONFIG_MODULES */
554 457
458static void for_each_tracepoint_range(struct tracepoint * const *begin,
459 struct tracepoint * const *end,
460 void (*fct)(struct tracepoint *tp, void *priv),
461 void *priv)
462{
463 struct tracepoint * const *iter;
464
465 if (!begin)
466 return;
467 for (iter = begin; iter < end; iter++)
468 fct(*iter, priv);
469}
470
471/**
472 * for_each_kernel_tracepoint - iteration on all kernel tracepoints
473 * @fct: callback
474 * @priv: private data
475 */
476void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
477 void *priv)
478{
479 for_each_tracepoint_range(__start___tracepoints_ptrs,
480 __stop___tracepoints_ptrs, fct, priv);
481}
482EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
483
555#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 484#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
556 485
557/* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 486/* NB: reg/unreg are called while guarded with the tracepoints_mutex */