aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-04-21 12:27:06 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-14 14:19:14 -0400
commit2239291aeb0379fe47980b0e560e0eb9fd7e82ec (patch)
treee75bb60ec24e6fd7137f01db48e0ea7e5b1eddd4
parent38516ab59fbc5b3bb278cf5e1fe2867c70cff32e (diff)
tracing: Remove per event trace registering
This patch removes the register functions of TRACE_EVENT() to enable and disable tracepoints. The registering of a event is now down directly in the trace_events.c file. The tracepoint_probe_register() is now called directly. The prototypes are no longer type checked, but this should not be an issue since the tracepoints are created automatically by the macros. If a prototype is incorrect in the TRACE_EVENT() macro, then other macros will catch it. The trace_event_class structure now holds the probes to be called by the callbacks. This removes needing to have each event have a separate pointer for the probe. To handle kprobes and syscalls, since they register probes in a different manner, a "reg" field is added to the ftrace_event_class structure. If the "reg" field is assigned, then it will be called for enabling and disabling of the probe for either ftrace or perf. To let the reg function know what is happening, a new enum (trace_reg) is created that has the type of control that is needed. With this new rework, the 82 kernel events and 618 syscall events has their footprint dramatically lowered: text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4914025 1088868 861512 6864405 68be15 vmlinux.class 4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint 4900252 1057412 861512 6819176 680d68 vmlinux.regs The size went from 6863829 to 6819176, that's a total of 44K in savings. With tracepoints being continuously added, this is critical that the footprint becomes minimal. v5: Added #ifdef CONFIG_PERF_EVENTS around a reference to perf specific structure in trace_events.c. v4: Fixed trace self tests to check probe because regfunc no longer exists. v3: Updated to handle void *data in beginning of probe parameters. Also added the tracepoint: check_trace_callback_type_##call(). v2: Changed the callback probes to pass void * and typecast the value within the function. Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--include/linux/ftrace_event.h19
-rw-r--r--include/linux/syscalls.h29
-rw-r--r--include/trace/ftrace.h133
-rw-r--r--kernel/trace/trace_event_perf.c15
-rw-r--r--kernel/trace/trace_events.c32
-rw-r--r--kernel/trace/trace_kprobe.c34
-rw-r--r--kernel/trace/trace_syscalls.c56
7 files changed, 171 insertions, 147 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 496eea898ee4..e665ed38b4bf 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -113,8 +113,23 @@ void tracing_record_cmdline(struct task_struct *tsk);
113 113
114struct event_filter; 114struct event_filter;
115 115
116enum trace_reg {
117 TRACE_REG_REGISTER,
118 TRACE_REG_UNREGISTER,
119 TRACE_REG_PERF_REGISTER,
120 TRACE_REG_PERF_UNREGISTER,
121};
122
123struct ftrace_event_call;
124
116struct ftrace_event_class { 125struct ftrace_event_class {
117 char *system; 126 char *system;
127 void *probe;
128#ifdef CONFIG_PERF_EVENTS
129 void *perf_probe;
130#endif
131 int (*reg)(struct ftrace_event_call *event,
132 enum trace_reg type);
118}; 133};
119 134
120struct ftrace_event_call { 135struct ftrace_event_call {
@@ -124,8 +139,6 @@ struct ftrace_event_call {
124 struct dentry *dir; 139 struct dentry *dir;
125 struct trace_event *event; 140 struct trace_event *event;
126 int enabled; 141 int enabled;
127 int (*regfunc)(struct ftrace_event_call *);
128 void (*unregfunc)(struct ftrace_event_call *);
129 int id; 142 int id;
130 const char *print_fmt; 143 const char *print_fmt;
131 int (*raw_init)(struct ftrace_event_call *); 144 int (*raw_init)(struct ftrace_event_call *);
@@ -137,8 +150,6 @@ struct ftrace_event_call {
137 void *data; 150 void *data;
138 151
139 int perf_refcount; 152 int perf_refcount;
140 int (*perf_event_enable)(struct ftrace_event_call *);
141 void (*perf_event_disable)(struct ftrace_event_call *);
142}; 153};
143 154
144#define PERF_MAX_TRACE_SIZE 2048 155#define PERF_MAX_TRACE_SIZE 2048
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index ac5791df2506..e3348c4c22e8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -103,22 +103,6 @@ struct perf_event_attr;
103#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 103#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
104#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 104#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
105 105
106#ifdef CONFIG_PERF_EVENTS
107
108#define TRACE_SYS_ENTER_PERF_INIT(sname) \
109 .perf_event_enable = perf_sysenter_enable, \
110 .perf_event_disable = perf_sysenter_disable,
111
112#define TRACE_SYS_EXIT_PERF_INIT(sname) \
113 .perf_event_enable = perf_sysexit_enable, \
114 .perf_event_disable = perf_sysexit_disable,
115#else
116#define TRACE_SYS_ENTER_PERF(sname)
117#define TRACE_SYS_ENTER_PERF_INIT(sname)
118#define TRACE_SYS_EXIT_PERF(sname)
119#define TRACE_SYS_EXIT_PERF_INIT(sname)
120#endif /* CONFIG_PERF_EVENTS */
121
122#ifdef CONFIG_FTRACE_SYSCALLS 106#ifdef CONFIG_FTRACE_SYSCALLS
123#define __SC_STR_ADECL1(t, a) #a 107#define __SC_STR_ADECL1(t, a) #a
124#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) 108#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
@@ -134,7 +118,8 @@ struct perf_event_attr;
134#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) 118#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
135#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) 119#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
136 120
137extern struct ftrace_event_class event_class_syscalls; 121extern struct ftrace_event_class event_class_syscall_enter;
122extern struct ftrace_event_class event_class_syscall_exit;
138 123
139#define SYSCALL_TRACE_ENTER_EVENT(sname) \ 124#define SYSCALL_TRACE_ENTER_EVENT(sname) \
140 static const struct syscall_metadata __syscall_meta_##sname; \ 125 static const struct syscall_metadata __syscall_meta_##sname; \
@@ -148,14 +133,11 @@ extern struct ftrace_event_class event_class_syscalls;
148 __attribute__((section("_ftrace_events"))) \ 133 __attribute__((section("_ftrace_events"))) \
149 event_enter_##sname = { \ 134 event_enter_##sname = { \
150 .name = "sys_enter"#sname, \ 135 .name = "sys_enter"#sname, \
151 .class = &event_class_syscalls, \ 136 .class = &event_class_syscall_enter, \
152 .event = &enter_syscall_print_##sname, \ 137 .event = &enter_syscall_print_##sname, \
153 .raw_init = init_syscall_trace, \ 138 .raw_init = init_syscall_trace, \
154 .define_fields = syscall_enter_define_fields, \ 139 .define_fields = syscall_enter_define_fields, \
155 .regfunc = reg_event_syscall_enter, \
156 .unregfunc = unreg_event_syscall_enter, \
157 .data = (void *)&__syscall_meta_##sname,\ 140 .data = (void *)&__syscall_meta_##sname,\
158 TRACE_SYS_ENTER_PERF_INIT(sname) \
159 } 141 }
160 142
161#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 143#define SYSCALL_TRACE_EXIT_EVENT(sname) \
@@ -170,14 +152,11 @@ extern struct ftrace_event_class event_class_syscalls;
170 __attribute__((section("_ftrace_events"))) \ 152 __attribute__((section("_ftrace_events"))) \
171 event_exit_##sname = { \ 153 event_exit_##sname = { \
172 .name = "sys_exit"#sname, \ 154 .name = "sys_exit"#sname, \
173 .class = &event_class_syscalls, \ 155 .class = &event_class_syscall_exit, \
174 .event = &exit_syscall_print_##sname, \ 156 .event = &exit_syscall_print_##sname, \
175 .raw_init = init_syscall_trace, \ 157 .raw_init = init_syscall_trace, \
176 .define_fields = syscall_exit_define_fields, \ 158 .define_fields = syscall_exit_define_fields, \
177 .regfunc = reg_event_syscall_exit, \
178 .unregfunc = unreg_event_syscall_exit, \
179 .data = (void *)&__syscall_meta_##sname,\ 159 .data = (void *)&__syscall_meta_##sname,\
180 TRACE_SYS_EXIT_PERF_INIT(sname) \
181 } 160 }
182 161
183#define SYSCALL_METADATA(sname, nb) \ 162#define SYSCALL_METADATA(sname, nb) \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index ba28b644f41b..26d132418f92 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -381,53 +381,6 @@ static inline notrace int ftrace_get_offsets_##call( \
381 381
382#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 382#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
383 383
384#ifdef CONFIG_PERF_EVENTS
385
386/*
387 * Generate the functions needed for tracepoint perf_event support.
388 *
389 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
390 *
391 * static int ftrace_profile_enable_<call>(void)
392 * {
393 * return register_trace_<call>(ftrace_profile_<call>);
394 * }
395 *
396 * static void ftrace_profile_disable_<call>(void)
397 * {
398 * unregister_trace_<call>(ftrace_profile_<call>);
399 * }
400 *
401 */
402
403#undef DECLARE_EVENT_CLASS
404#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
405
406#undef DEFINE_EVENT
407#define DEFINE_EVENT(template, name, proto, args) \
408 \
409static void perf_trace_##name(void *, proto); \
410 \
411static notrace int \
412perf_trace_enable_##name(struct ftrace_event_call *unused) \
413{ \
414 return register_trace_##name(perf_trace_##name, NULL); \
415} \
416 \
417static notrace void \
418perf_trace_disable_##name(struct ftrace_event_call *unused) \
419{ \
420 unregister_trace_##name(perf_trace_##name, NULL); \
421}
422
423#undef DEFINE_EVENT_PRINT
424#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
425 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
426
427#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
428
429#endif /* CONFIG_PERF_EVENTS */
430
431/* 384/*
432 * Stage 4 of the trace events. 385 * Stage 4 of the trace events.
433 * 386 *
@@ -437,8 +390,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
437 * 390 *
438 * static struct ftrace_event_call event_<call>; 391 * static struct ftrace_event_call event_<call>;
439 * 392 *
440 * static void ftrace_raw_event_<call>(proto) 393 * static void ftrace_raw_event_<call>(void *__data, proto)
441 * { 394 * {
395 * struct ftrace_event_call *event_call = __data;
442 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 396 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
443 * struct ring_buffer_event *event; 397 * struct ring_buffer_event *event;
444 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 398 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
@@ -468,16 +422,6 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
468 * event, irq_flags, pc); 422 * event, irq_flags, pc);
469 * } 423 * }
470 * 424 *
471 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
472 * {
473 * return register_trace_<call>(ftrace_raw_event_<call>);
474 * }
475 *
476 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
477 * {
478 * unregister_trace_<call>(ftrace_raw_event_<call>);
479 * }
480 *
481 * static struct trace_event ftrace_event_type_<call> = { 425 * static struct trace_event ftrace_event_type_<call> = {
482 * .trace = ftrace_raw_output_<call>, <-- stage 2 426 * .trace = ftrace_raw_output_<call>, <-- stage 2
483 * }; 427 * };
@@ -504,11 +448,15 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
504 448
505#ifdef CONFIG_PERF_EVENTS 449#ifdef CONFIG_PERF_EVENTS
506 450
451#define _TRACE_PERF_PROTO(call, proto) \
452 static notrace void \
453 perf_trace_##call(void *__data, proto);
454
507#define _TRACE_PERF_INIT(call) \ 455#define _TRACE_PERF_INIT(call) \
508 .perf_event_enable = perf_trace_enable_##call, \ 456 .perf_probe = perf_trace_##call,
509 .perf_event_disable = perf_trace_disable_##call,
510 457
511#else 458#else
459#define _TRACE_PERF_PROTO(call, proto)
512#define _TRACE_PERF_INIT(call) 460#define _TRACE_PERF_INIT(call)
513#endif /* CONFIG_PERF_EVENTS */ 461#endif /* CONFIG_PERF_EVENTS */
514 462
@@ -542,9 +490,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
542#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 490#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
543 \ 491 \
544static notrace void \ 492static notrace void \
545ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 493ftrace_raw_event_##call(void *__data, proto) \
546 proto) \
547{ \ 494{ \
495 struct ftrace_event_call *event_call = __data; \
548 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 496 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
549 struct ring_buffer_event *event; \ 497 struct ring_buffer_event *event; \
550 struct ftrace_raw_##call *entry; \ 498 struct ftrace_raw_##call *entry; \
@@ -574,30 +522,23 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
574 trace_nowake_buffer_unlock_commit(buffer, \ 522 trace_nowake_buffer_unlock_commit(buffer, \
575 event, irq_flags, pc); \ 523 event, irq_flags, pc); \
576} 524}
525/*
526 * The ftrace_test_probe is compiled out, it is only here as a build time check
527 * to make sure that if the tracepoint handling changes, the ftrace probe will
528 * fail to compile unless it too is updated.
529 */
577 530
578#undef DEFINE_EVENT 531#undef DEFINE_EVENT
579#define DEFINE_EVENT(template, call, proto, args) \ 532#define DEFINE_EVENT(template, call, proto, args) \
580 \ 533 \
581static notrace void ftrace_raw_event_##call(void *__ignore, proto) \
582{ \
583 ftrace_raw_event_id_##template(&event_##call, args); \
584} \
585 \
586static notrace int \
587ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
588{ \
589 return register_trace_##call(ftrace_raw_event_##call, NULL); \
590} \
591 \
592static notrace void \
593ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
594{ \
595 unregister_trace_##call(ftrace_raw_event_##call, NULL); \
596} \
597 \
598static struct trace_event ftrace_event_type_##call = { \ 534static struct trace_event ftrace_event_type_##call = { \
599 .trace = ftrace_raw_output_##call, \ 535 .trace = ftrace_raw_output_##call, \
600}; 536}; \
537 \
538static inline void ftrace_test_probe_##call(void) \
539{ \
540 check_trace_callback_type_##call(ftrace_raw_event_##template); \
541}
601 542
602#undef DEFINE_EVENT_PRINT 543#undef DEFINE_EVENT_PRINT
603#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 544#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
@@ -618,9 +559,12 @@ static struct trace_event ftrace_event_type_##call = { \
618 559
619#undef DECLARE_EVENT_CLASS 560#undef DECLARE_EVENT_CLASS
620#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 561#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
562_TRACE_PERF_PROTO(call, PARAMS(proto)); \
621static const char print_fmt_##call[] = print; \ 563static const char print_fmt_##call[] = print; \
622static struct ftrace_event_class __used event_class_##call = { \ 564static struct ftrace_event_class __used event_class_##call = { \
623 .system = __stringify(TRACE_SYSTEM) \ 565 .system = __stringify(TRACE_SYSTEM), \
566 .probe = ftrace_raw_event_##call, \
567 _TRACE_PERF_INIT(call) \
624}; 568};
625 569
626#undef DEFINE_EVENT 570#undef DEFINE_EVENT
@@ -633,11 +577,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
633 .class = &event_class_##template, \ 577 .class = &event_class_##template, \
634 .event = &ftrace_event_type_##call, \ 578 .event = &ftrace_event_type_##call, \
635 .raw_init = trace_event_raw_init, \ 579 .raw_init = trace_event_raw_init, \
636 .regfunc = ftrace_raw_reg_event_##call, \
637 .unregfunc = ftrace_raw_unreg_event_##call, \
638 .print_fmt = print_fmt_##template, \ 580 .print_fmt = print_fmt_##template, \
639 .define_fields = ftrace_define_fields_##template, \ 581 .define_fields = ftrace_define_fields_##template, \
640 _TRACE_PERF_INIT(call) \
641}; 582};
642 583
643#undef DEFINE_EVENT_PRINT 584#undef DEFINE_EVENT_PRINT
@@ -652,11 +593,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
652 .class = &event_class_##template, \ 593 .class = &event_class_##template, \
653 .event = &ftrace_event_type_##call, \ 594 .event = &ftrace_event_type_##call, \
654 .raw_init = trace_event_raw_init, \ 595 .raw_init = trace_event_raw_init, \
655 .regfunc = ftrace_raw_reg_event_##call, \
656 .unregfunc = ftrace_raw_unreg_event_##call, \
657 .print_fmt = print_fmt_##call, \ 596 .print_fmt = print_fmt_##call, \
658 .define_fields = ftrace_define_fields_##template, \ 597 .define_fields = ftrace_define_fields_##template, \
659 _TRACE_PERF_INIT(call) \
660} 598}
661 599
662#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 600#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -756,9 +694,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
756#undef DECLARE_EVENT_CLASS 694#undef DECLARE_EVENT_CLASS
757#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 695#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
758static notrace void \ 696static notrace void \
759perf_trace_templ_##call(struct ftrace_event_call *event_call, \ 697perf_trace_##call(void *__data, proto) \
760 proto) \
761{ \ 698{ \
699 struct ftrace_event_call *event_call = __data; \
762 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 700 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
763 struct ftrace_raw_##call *entry; \ 701 struct ftrace_raw_##call *entry; \
764 u64 __addr = 0, __count = 1; \ 702 u64 __addr = 0, __count = 1; \
@@ -791,15 +729,20 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
791 __count, irq_flags, __regs); \ 729 __count, irq_flags, __regs); \
792} 730}
793 731
732/*
733 * This part is compiled out, it is only here as a build time check
734 * to make sure that if the tracepoint handling changes, the
735 * perf probe will fail to compile unless it too is updated.
736 */
794#undef DEFINE_EVENT 737#undef DEFINE_EVENT
795#define DEFINE_EVENT(template, call, proto, args) \ 738#define DEFINE_EVENT(template, call, proto, args) \
796static notrace void perf_trace_##call(void *__ignore, proto) \ 739static inline void perf_test_probe_##call(void) \
797{ \ 740{ \
798 struct ftrace_event_call *event_call = &event_##call; \ 741 check_trace_callback_type_##call(perf_trace_##template); \
799 \ 742 \
800 perf_trace_templ_##template(event_call, args); \
801} 743}
802 744
745
803#undef DEFINE_EVENT_PRINT 746#undef DEFINE_EVENT_PRINT
804#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 747#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
805 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 748 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 0565bb42566f..196fe9d26773 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -49,7 +49,12 @@ static int perf_trace_event_enable(struct ftrace_event_call *event)
49 rcu_assign_pointer(perf_trace_buf_nmi, buf); 49 rcu_assign_pointer(perf_trace_buf_nmi, buf);
50 } 50 }
51 51
52 ret = event->perf_event_enable(event); 52 if (event->class->reg)
53 ret = event->class->reg(event, TRACE_REG_PERF_REGISTER);
54 else
55 ret = tracepoint_probe_register(event->name,
56 event->class->perf_probe,
57 event);
53 if (!ret) { 58 if (!ret) {
54 total_ref_count++; 59 total_ref_count++;
55 return 0; 60 return 0;
@@ -75,7 +80,8 @@ int perf_trace_enable(int event_id)
75 80
76 mutex_lock(&event_mutex); 81 mutex_lock(&event_mutex);
77 list_for_each_entry(event, &ftrace_events, list) { 82 list_for_each_entry(event, &ftrace_events, list) {
78 if (event->id == event_id && event->perf_event_enable && 83 if (event->id == event_id &&
84 event->class && event->class->perf_probe &&
79 try_module_get(event->mod)) { 85 try_module_get(event->mod)) {
80 ret = perf_trace_event_enable(event); 86 ret = perf_trace_event_enable(event);
81 break; 87 break;
@@ -93,7 +99,10 @@ static void perf_trace_event_disable(struct ftrace_event_call *event)
93 if (--event->perf_refcount > 0) 99 if (--event->perf_refcount > 0)
94 return; 100 return;
95 101
96 event->perf_event_disable(event); 102 if (event->class->reg)
103 event->class->reg(event, TRACE_REG_PERF_UNREGISTER);
104 else
105 tracepoint_probe_unregister(event->name, event->class->perf_probe, event);
97 106
98 if (!--total_ref_count) { 107 if (!--total_ref_count) {
99 buf = perf_trace_buf; 108 buf = perf_trace_buf;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 2f54b48d3632..19d1eb0a7188 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -127,13 +127,23 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
127 if (call->enabled) { 127 if (call->enabled) {
128 call->enabled = 0; 128 call->enabled = 0;
129 tracing_stop_cmdline_record(); 129 tracing_stop_cmdline_record();
130 call->unregfunc(call); 130 if (call->class->reg)
131 call->class->reg(call, TRACE_REG_UNREGISTER);
132 else
133 tracepoint_probe_unregister(call->name,
134 call->class->probe,
135 call);
131 } 136 }
132 break; 137 break;
133 case 1: 138 case 1:
134 if (!call->enabled) { 139 if (!call->enabled) {
135 tracing_start_cmdline_record(); 140 tracing_start_cmdline_record();
136 ret = call->regfunc(call); 141 if (call->class->reg)
142 ret = call->class->reg(call, TRACE_REG_REGISTER);
143 else
144 ret = tracepoint_probe_register(call->name,
145 call->class->probe,
146 call);
137 if (ret) { 147 if (ret) {
138 tracing_stop_cmdline_record(); 148 tracing_stop_cmdline_record();
139 pr_info("event trace: Could not enable event " 149 pr_info("event trace: Could not enable event "
@@ -171,7 +181,8 @@ static int __ftrace_set_clr_event(const char *match, const char *sub,
171 mutex_lock(&event_mutex); 181 mutex_lock(&event_mutex);
172 list_for_each_entry(call, &ftrace_events, list) { 182 list_for_each_entry(call, &ftrace_events, list) {
173 183
174 if (!call->name || !call->regfunc) 184 if (!call->name || !call->class ||
185 (!call->class->probe && !call->class->reg))
175 continue; 186 continue;
176 187
177 if (match && 188 if (match &&
@@ -297,7 +308,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
297 * The ftrace subsystem is for showing formats only. 308 * The ftrace subsystem is for showing formats only.
298 * They can not be enabled or disabled via the event files. 309 * They can not be enabled or disabled via the event files.
299 */ 310 */
300 if (call->regfunc) 311 if (call->class && (call->class->probe || call->class->reg))
301 return call; 312 return call;
302 } 313 }
303 314
@@ -450,7 +461,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
450 461
451 mutex_lock(&event_mutex); 462 mutex_lock(&event_mutex);
452 list_for_each_entry(call, &ftrace_events, list) { 463 list_for_each_entry(call, &ftrace_events, list) {
453 if (!call->name || !call->regfunc) 464 if (!call->name || !call->class ||
465 (!call->class->probe && !call->class->reg))
454 continue; 466 continue;
455 467
456 if (system && strcmp(call->class->system, system) != 0) 468 if (system && strcmp(call->class->system, system) != 0)
@@ -935,13 +947,15 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
935 return -1; 947 return -1;
936 } 948 }
937 949
938 if (call->regfunc) 950 if (call->class->probe || call->class->reg)
939 trace_create_file("enable", 0644, call->dir, call, 951 trace_create_file("enable", 0644, call->dir, call,
940 enable); 952 enable);
941 953
942 if (call->id && call->perf_event_enable) 954#ifdef CONFIG_PERF_EVENTS
955 if (call->id && (call->class->perf_probe || call->class->reg))
943 trace_create_file("id", 0444, call->dir, call, 956 trace_create_file("id", 0444, call->dir, call,
944 id); 957 id);
958#endif
945 959
946 if (call->define_fields) { 960 if (call->define_fields) {
947 ret = trace_define_common_fields(call); 961 ret = trace_define_common_fields(call);
@@ -1388,8 +1402,8 @@ static __init void event_trace_self_tests(void)
1388 1402
1389 list_for_each_entry(call, &ftrace_events, list) { 1403 list_for_each_entry(call, &ftrace_events, list) {
1390 1404
1391 /* Only test those that have a regfunc */ 1405 /* Only test those that have a probe */
1392 if (!call->regfunc) 1406 if (!call->class || !call->class->probe)
1393 continue; 1407 continue;
1394 1408
1395/* 1409/*
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index eda220bf2065..f8af21a53f0c 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -202,6 +202,7 @@ struct trace_probe {
202 unsigned long nhit; 202 unsigned long nhit;
203 unsigned int flags; /* For TP_FLAG_* */ 203 unsigned int flags; /* For TP_FLAG_* */
204 const char *symbol; /* symbol name */ 204 const char *symbol; /* symbol name */
205 struct ftrace_event_class class;
205 struct ftrace_event_call call; 206 struct ftrace_event_call call;
206 struct trace_event event; 207 struct trace_event event;
207 unsigned int nr_args; 208 unsigned int nr_args;
@@ -323,6 +324,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
323 goto error; 324 goto error;
324 } 325 }
325 326
327 tp->call.class = &tp->class;
326 tp->call.name = kstrdup(event, GFP_KERNEL); 328 tp->call.name = kstrdup(event, GFP_KERNEL);
327 if (!tp->call.name) 329 if (!tp->call.name)
328 goto error; 330 goto error;
@@ -332,8 +334,8 @@ static struct trace_probe *alloc_trace_probe(const char *group,
332 goto error; 334 goto error;
333 } 335 }
334 336
335 tp->call.class->system = kstrdup(group, GFP_KERNEL); 337 tp->class.system = kstrdup(group, GFP_KERNEL);
336 if (!tp->call.class->system) 338 if (!tp->class.system)
337 goto error; 339 goto error;
338 340
339 INIT_LIST_HEAD(&tp->list); 341 INIT_LIST_HEAD(&tp->list);
@@ -1302,6 +1304,26 @@ static void probe_perf_disable(struct ftrace_event_call *call)
1302} 1304}
1303#endif /* CONFIG_PERF_EVENTS */ 1305#endif /* CONFIG_PERF_EVENTS */
1304 1306
1307static __kprobes
1308int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
1309{
1310 switch (type) {
1311 case TRACE_REG_REGISTER:
1312 return probe_event_enable(event);
1313 case TRACE_REG_UNREGISTER:
1314 probe_event_disable(event);
1315 return 0;
1316
1317#ifdef CONFIG_PERF_EVENTS
1318 case TRACE_REG_PERF_REGISTER:
1319 return probe_perf_enable(event);
1320 case TRACE_REG_PERF_UNREGISTER:
1321 probe_perf_disable(event);
1322 return 0;
1323#endif
1324 }
1325 return 0;
1326}
1305 1327
1306static __kprobes 1328static __kprobes
1307int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1329int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
@@ -1355,13 +1377,7 @@ static int register_probe_event(struct trace_probe *tp)
1355 return -ENODEV; 1377 return -ENODEV;
1356 } 1378 }
1357 call->enabled = 0; 1379 call->enabled = 0;
1358 call->regfunc = probe_event_enable; 1380 call->class->reg = kprobe_register;
1359 call->unregfunc = probe_event_disable;
1360
1361#ifdef CONFIG_PERF_EVENTS
1362 call->perf_event_enable = probe_perf_enable;
1363 call->perf_event_disable = probe_perf_disable;
1364#endif
1365 call->data = tp; 1381 call->data = tp;
1366 ret = trace_add_event_call(call); 1382 ret = trace_add_event_call(call);
1367 if (ret) { 1383 if (ret) {
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index b8d30e7ecd05..a21d366cae46 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -15,8 +15,19 @@ static int sys_refcount_exit;
15static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); 15static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
16static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); 16static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
17 17
18struct ftrace_event_class event_class_syscalls = { 18static int syscall_enter_register(struct ftrace_event_call *event,
19 .system = "syscalls" 19 enum trace_reg type);
20static int syscall_exit_register(struct ftrace_event_call *event,
21 enum trace_reg type);
22
23struct ftrace_event_class event_class_syscall_enter = {
24 .system = "syscalls",
25 .reg = syscall_enter_register
26};
27
28struct ftrace_event_class event_class_syscall_exit = {
29 .system = "syscalls",
30 .reg = syscall_exit_register
20}; 31};
21 32
22extern unsigned long __start_syscalls_metadata[]; 33extern unsigned long __start_syscalls_metadata[];
@@ -587,3 +598,44 @@ void perf_sysexit_disable(struct ftrace_event_call *call)
587 598
588#endif /* CONFIG_PERF_EVENTS */ 599#endif /* CONFIG_PERF_EVENTS */
589 600
601static int syscall_enter_register(struct ftrace_event_call *event,
602 enum trace_reg type)
603{
604 switch (type) {
605 case TRACE_REG_REGISTER:
606 return reg_event_syscall_enter(event);
607 case TRACE_REG_UNREGISTER:
608 unreg_event_syscall_enter(event);
609 return 0;
610
611#ifdef CONFIG_PERF_EVENTS
612 case TRACE_REG_PERF_REGISTER:
613 return perf_sysenter_enable(event);
614 case TRACE_REG_PERF_UNREGISTER:
615 perf_sysenter_disable(event);
616 return 0;
617#endif
618 }
619 return 0;
620}
621
622static int syscall_exit_register(struct ftrace_event_call *event,
623 enum trace_reg type)
624{
625 switch (type) {
626 case TRACE_REG_REGISTER:
627 return reg_event_syscall_exit(event);
628 case TRACE_REG_UNREGISTER:
629 unreg_event_syscall_exit(event);
630 return 0;
631
632#ifdef CONFIG_PERF_EVENTS
633 case TRACE_REG_PERF_REGISTER:
634 return perf_sysexit_enable(event);
635 case TRACE_REG_PERF_UNREGISTER:
636 perf_sysexit_disable(event);
637 return 0;
638#endif
639 }
640 return 0;
641}