aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h289
1 files changed, 84 insertions, 205 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6fe03e902ca..ea6f9d4a20e9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -65,7 +65,8 @@
65 }; 65 };
66#undef DEFINE_EVENT 66#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \ 67#define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call event_##name 68 static struct ftrace_event_call \
69 __attribute__((__aligned__(4))) event_##name
69 70
70#undef DEFINE_EVENT_PRINT 71#undef DEFINE_EVENT_PRINT
71#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 72#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
@@ -131,130 +132,6 @@
131#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 132#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
132 133
133/* 134/*
134 * Setup the showing format of trace point.
135 *
136 * int
137 * ftrace_format_##call(struct trace_seq *s)
138 * {
139 * struct ftrace_raw_##call field;
140 * int ret;
141 *
142 * ret = trace_seq_printf(s, #type " " #item ";"
143 * " offset:%u; size:%u;\n",
144 * offsetof(struct ftrace_raw_##call, item),
145 * sizeof(field.type));
146 *
147 * }
148 */
149
150#undef TP_STRUCT__entry
151#define TP_STRUCT__entry(args...) args
152
153#undef __field
154#define __field(type, item) \
155 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
156 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
157 (unsigned int)offsetof(typeof(field), item), \
158 (unsigned int)sizeof(field.item), \
159 (unsigned int)is_signed_type(type)); \
160 if (!ret) \
161 return 0;
162
163#undef __field_ext
164#define __field_ext(type, item, filter_type) __field(type, item)
165
166#undef __array
167#define __array(type, item, len) \
168 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
169 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
170 (unsigned int)offsetof(typeof(field), item), \
171 (unsigned int)sizeof(field.item), \
172 (unsigned int)is_signed_type(type)); \
173 if (!ret) \
174 return 0;
175
176#undef __dynamic_array
177#define __dynamic_array(type, item, len) \
178 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
179 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
180 (unsigned int)offsetof(typeof(field), \
181 __data_loc_##item), \
182 (unsigned int)sizeof(field.__data_loc_##item), \
183 (unsigned int)is_signed_type(type)); \
184 if (!ret) \
185 return 0;
186
187#undef __string
188#define __string(item, src) __dynamic_array(char, item, -1)
189
190#undef __entry
191#define __entry REC
192
193#undef __print_symbolic
194#undef __get_dynamic_array
195#undef __get_str
196
197#undef TP_printk
198#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
199
200#undef TP_fast_assign
201#define TP_fast_assign(args...) args
202
203#undef TP_perf_assign
204#define TP_perf_assign(args...)
205
206#undef DECLARE_EVENT_CLASS
207#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
208static int \
209ftrace_format_setup_##call(struct ftrace_event_call *unused, \
210 struct trace_seq *s) \
211{ \
212 struct ftrace_raw_##call field __attribute__((unused)); \
213 int ret = 0; \
214 \
215 tstruct; \
216 \
217 return ret; \
218} \
219 \
220static int \
221ftrace_format_##call(struct ftrace_event_call *unused, \
222 struct trace_seq *s) \
223{ \
224 int ret = 0; \
225 \
226 ret = ftrace_format_setup_##call(unused, s); \
227 if (!ret) \
228 return ret; \
229 \
230 ret = trace_seq_printf(s, "\nprint fmt: " print); \
231 \
232 return ret; \
233}
234
235#undef DEFINE_EVENT
236#define DEFINE_EVENT(template, name, proto, args)
237
238#undef DEFINE_EVENT_PRINT
239#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
240static int \
241ftrace_format_##name(struct ftrace_event_call *unused, \
242 struct trace_seq *s) \
243{ \
244 int ret = 0; \
245 \
246 ret = ftrace_format_setup_##template(unused, s); \
247 if (!ret) \
248 return ret; \
249 \
250 trace_seq_printf(s, "\nprint fmt: " print); \
251 \
252 return ret; \
253}
254
255#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
256
257/*
258 * Stage 3 of the trace events. 135 * Stage 3 of the trace events.
259 * 136 *
260 * Override the macros in <trace/trace_events.h> to include the following: 137 * Override the macros in <trace/trace_events.h> to include the following:
@@ -323,7 +200,7 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
323 200
324#undef DECLARE_EVENT_CLASS 201#undef DECLARE_EVENT_CLASS
325#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 202#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
326static enum print_line_t \ 203static notrace enum print_line_t \
327ftrace_raw_output_id_##call(int event_id, const char *name, \ 204ftrace_raw_output_id_##call(int event_id, const char *name, \
328 struct trace_iterator *iter, int flags) \ 205 struct trace_iterator *iter, int flags) \
329{ \ 206{ \
@@ -356,7 +233,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
356 233
357#undef DEFINE_EVENT 234#undef DEFINE_EVENT
358#define DEFINE_EVENT(template, name, proto, args) \ 235#define DEFINE_EVENT(template, name, proto, args) \
359static enum print_line_t \ 236static notrace enum print_line_t \
360ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ 237ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
361{ \ 238{ \
362 return ftrace_raw_output_id_##template(event_##name.id, \ 239 return ftrace_raw_output_id_##template(event_##name.id, \
@@ -365,7 +242,7 @@ ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
365 242
366#undef DEFINE_EVENT_PRINT 243#undef DEFINE_EVENT_PRINT
367#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 244#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
368static enum print_line_t \ 245static notrace enum print_line_t \
369ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 246ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
370{ \ 247{ \
371 struct trace_seq *s = &iter->seq; \ 248 struct trace_seq *s = &iter->seq; \
@@ -431,7 +308,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
431 308
432#undef DECLARE_EVENT_CLASS 309#undef DECLARE_EVENT_CLASS
433#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 310#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
434static int \ 311static int notrace \
435ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 312ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
436{ \ 313{ \
437 struct ftrace_raw_##call field; \ 314 struct ftrace_raw_##call field; \
@@ -479,7 +356,7 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
479 356
480#undef DECLARE_EVENT_CLASS 357#undef DECLARE_EVENT_CLASS
481#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 358#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
482static inline int ftrace_get_offsets_##call( \ 359static inline notrace int ftrace_get_offsets_##call( \
483 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 360 struct ftrace_data_offsets_##call *__data_offsets, proto) \
484{ \ 361{ \
485 int __data_size = 0; \ 362 int __data_size = 0; \
@@ -499,7 +376,7 @@ static inline int ftrace_get_offsets_##call( \
499 376
500#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
501 378
502#ifdef CONFIG_EVENT_PROFILE 379#ifdef CONFIG_PERF_EVENTS
503 380
504/* 381/*
505 * Generate the functions needed for tracepoint perf_event support. 382 * Generate the functions needed for tracepoint perf_event support.
@@ -524,16 +401,18 @@ static inline int ftrace_get_offsets_##call( \
524#undef DEFINE_EVENT 401#undef DEFINE_EVENT
525#define DEFINE_EVENT(template, name, proto, args) \ 402#define DEFINE_EVENT(template, name, proto, args) \
526 \ 403 \
527static void ftrace_profile_##name(proto); \ 404static void perf_trace_##name(proto); \
528 \ 405 \
529static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\ 406static notrace int \
407perf_trace_enable_##name(struct ftrace_event_call *unused) \
530{ \ 408{ \
531 return register_trace_##name(ftrace_profile_##name); \ 409 return register_trace_##name(perf_trace_##name); \
532} \ 410} \
533 \ 411 \
534static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ 412static notrace void \
413perf_trace_disable_##name(struct ftrace_event_call *unused) \
535{ \ 414{ \
536 unregister_trace_##name(ftrace_profile_##name); \ 415 unregister_trace_##name(perf_trace_##name); \
537} 416}
538 417
539#undef DEFINE_EVENT_PRINT 418#undef DEFINE_EVENT_PRINT
@@ -542,7 +421,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
542 421
543#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
544 423
545#endif 424#endif /* CONFIG_PERF_EVENTS */
546 425
547/* 426/*
548 * Stage 4 of the trace events. 427 * Stage 4 of the trace events.
@@ -622,20 +501,19 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
622 * .raw_init = trace_event_raw_init, 501 * .raw_init = trace_event_raw_init,
623 * .regfunc = ftrace_reg_event_<call>, 502 * .regfunc = ftrace_reg_event_<call>,
624 * .unregfunc = ftrace_unreg_event_<call>, 503 * .unregfunc = ftrace_unreg_event_<call>,
625 * .show_format = ftrace_format_<call>,
626 * } 504 * }
627 * 505 *
628 */ 506 */
629 507
630#ifdef CONFIG_EVENT_PROFILE 508#ifdef CONFIG_PERF_EVENTS
631 509
632#define _TRACE_PROFILE_INIT(call) \ 510#define _TRACE_PERF_INIT(call) \
633 .profile_enable = ftrace_profile_enable_##call, \ 511 .perf_event_enable = perf_trace_enable_##call, \
634 .profile_disable = ftrace_profile_disable_##call, 512 .perf_event_disable = perf_trace_disable_##call,
635 513
636#else 514#else
637#define _TRACE_PROFILE_INIT(call) 515#define _TRACE_PERF_INIT(call)
638#endif 516#endif /* CONFIG_PERF_EVENTS */
639 517
640#undef __entry 518#undef __entry
641#define __entry entry 519#define __entry entry
@@ -657,10 +535,17 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
657#define __assign_str(dst, src) \ 535#define __assign_str(dst, src) \
658 strcpy(__get_str(dst), src); 536 strcpy(__get_str(dst), src);
659 537
538#undef TP_fast_assign
539#define TP_fast_assign(args...) args
540
541#undef TP_perf_assign
542#define TP_perf_assign(args...)
543
660#undef DECLARE_EVENT_CLASS 544#undef DECLARE_EVENT_CLASS
661#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 545#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
662 \ 546 \
663static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 547static notrace void \
548ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
664 proto) \ 549 proto) \
665{ \ 550{ \
666 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 551 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
@@ -697,17 +582,19 @@ static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
697#undef DEFINE_EVENT 582#undef DEFINE_EVENT
698#define DEFINE_EVENT(template, call, proto, args) \ 583#define DEFINE_EVENT(template, call, proto, args) \
699 \ 584 \
700static void ftrace_raw_event_##call(proto) \ 585static notrace void ftrace_raw_event_##call(proto) \
701{ \ 586{ \
702 ftrace_raw_event_id_##template(&event_##call, args); \ 587 ftrace_raw_event_id_##template(&event_##call, args); \
703} \ 588} \
704 \ 589 \
705static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ 590static notrace int \
591ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
706{ \ 592{ \
707 return register_trace_##call(ftrace_raw_event_##call); \ 593 return register_trace_##call(ftrace_raw_event_##call); \
708} \ 594} \
709 \ 595 \
710static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ 596static notrace void \
597ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
711{ \ 598{ \
712 unregister_trace_##call(ftrace_raw_event_##call); \ 599 unregister_trace_##call(ftrace_raw_event_##call); \
713} \ 600} \
@@ -722,8 +609,20 @@ static struct trace_event ftrace_event_type_##call = { \
722 609
723#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 610#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
724 611
612#undef __entry
613#define __entry REC
614
615#undef __print_flags
616#undef __print_symbolic
617#undef __get_dynamic_array
618#undef __get_str
619
620#undef TP_printk
621#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
622
725#undef DECLARE_EVENT_CLASS 623#undef DECLARE_EVENT_CLASS
726#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) 624#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
625static const char print_fmt_##call[] = print;
727 626
728#undef DEFINE_EVENT 627#undef DEFINE_EVENT
729#define DEFINE_EVENT(template, call, proto, args) \ 628#define DEFINE_EVENT(template, call, proto, args) \
@@ -737,14 +636,16 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
737 .raw_init = trace_event_raw_init, \ 636 .raw_init = trace_event_raw_init, \
738 .regfunc = ftrace_raw_reg_event_##call, \ 637 .regfunc = ftrace_raw_reg_event_##call, \
739 .unregfunc = ftrace_raw_unreg_event_##call, \ 638 .unregfunc = ftrace_raw_unreg_event_##call, \
740 .show_format = ftrace_format_##template, \ 639 .print_fmt = print_fmt_##template, \
741 .define_fields = ftrace_define_fields_##template, \ 640 .define_fields = ftrace_define_fields_##template, \
742 _TRACE_PROFILE_INIT(call) \ 641 _TRACE_PERF_INIT(call) \
743} 642}
744 643
745#undef DEFINE_EVENT_PRINT 644#undef DEFINE_EVENT_PRINT
746#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 645#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
747 \ 646 \
647static const char print_fmt_##call[] = print; \
648 \
748static struct ftrace_event_call __used \ 649static struct ftrace_event_call __used \
749__attribute__((__aligned__(4))) \ 650__attribute__((__aligned__(4))) \
750__attribute__((section("_ftrace_events"))) event_##call = { \ 651__attribute__((section("_ftrace_events"))) event_##call = { \
@@ -754,20 +655,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
754 .raw_init = trace_event_raw_init, \ 655 .raw_init = trace_event_raw_init, \
755 .regfunc = ftrace_raw_reg_event_##call, \ 656 .regfunc = ftrace_raw_reg_event_##call, \
756 .unregfunc = ftrace_raw_unreg_event_##call, \ 657 .unregfunc = ftrace_raw_unreg_event_##call, \
757 .show_format = ftrace_format_##call, \ 658 .print_fmt = print_fmt_##call, \
758 .define_fields = ftrace_define_fields_##template, \ 659 .define_fields = ftrace_define_fields_##template, \
759 _TRACE_PROFILE_INIT(call) \ 660 _TRACE_PERF_INIT(call) \
760} 661}
761 662
762#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
763 664
764/* 665/*
765 * Define the insertion callback to profile events 666 * Define the insertion callback to perf events
766 * 667 *
767 * The job is very similar to ftrace_raw_event_<call> except that we don't 668 * The job is very similar to ftrace_raw_event_<call> except that we don't
768 * insert in the ring buffer but in a perf counter. 669 * insert in the ring buffer but in a perf counter.
769 * 670 *
770 * static void ftrace_profile_<call>(proto) 671 * static void ftrace_perf_<call>(proto)
771 * { 672 * {
772 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
773 * struct ftrace_event_call *event_call = &event_<call>; 674 * struct ftrace_event_call *event_call = &event_<call>;
@@ -798,9 +699,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
798 * __cpu = smp_processor_id(); 699 * __cpu = smp_processor_id();
799 * 700 *
800 * if (in_nmi()) 701 * if (in_nmi())
801 * trace_buf = rcu_dereference(perf_trace_buf_nmi); 702 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
802 * else 703 * else
803 * trace_buf = rcu_dereference(perf_trace_buf); 704 * trace_buf = rcu_dereference_sched(perf_trace_buf);
804 * 705 *
805 * if (!trace_buf) 706 * if (!trace_buf)
806 * goto end; 707 * goto end;
@@ -835,7 +736,17 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
835 * } 736 * }
836 */ 737 */
837 738
838#ifdef CONFIG_EVENT_PROFILE 739#ifdef CONFIG_PERF_EVENTS
740
741#undef __entry
742#define __entry entry
743
744#undef __get_dynamic_array
745#define __get_dynamic_array(field) \
746 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
747
748#undef __get_str
749#define __get_str(field) (char *)__get_dynamic_array(field)
839 750
840#undef __perf_addr 751#undef __perf_addr
841#define __perf_addr(a) __addr = (a) 752#define __perf_addr(a) __addr = (a)
@@ -845,81 +756,49 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
845 756
846#undef DECLARE_EVENT_CLASS 757#undef DECLARE_EVENT_CLASS
847#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
848static void \ 759static notrace void \
849ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 760perf_trace_templ_##call(struct ftrace_event_call *event_call, \
850 proto) \ 761 proto) \
851{ \ 762{ \
852 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
853 extern int perf_swevent_get_recursion_context(void); \
854 extern void perf_swevent_put_recursion_context(int rctx); \
855 extern void perf_tp_event(int, u64, u64, void *, int); \
856 struct ftrace_raw_##call *entry; \ 764 struct ftrace_raw_##call *entry; \
857 u64 __addr = 0, __count = 1; \ 765 u64 __addr = 0, __count = 1; \
858 unsigned long irq_flags; \ 766 unsigned long irq_flags; \
859 struct trace_entry *ent; \ 767 struct pt_regs *__regs; \
860 int __entry_size; \ 768 int __entry_size; \
861 int __data_size; \ 769 int __data_size; \
862 char *trace_buf; \
863 char *raw_data; \
864 int __cpu; \
865 int rctx; \ 770 int rctx; \
866 int pc; \
867 \
868 pc = preempt_count(); \
869 \ 771 \
870 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 772 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
871 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 773 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
872 sizeof(u64)); \ 774 sizeof(u64)); \
873 __entry_size -= sizeof(u32); \ 775 __entry_size -= sizeof(u32); \
874 \ 776 \
875 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 777 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
876 "profile buffer not large enough")) \ 778 "profile buffer not large enough")) \
877 return; \ 779 return; \
878 \ 780 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
879 local_irq_save(irq_flags); \ 781 __entry_size, event_call->id, &rctx, &irq_flags); \
880 \ 782 if (!entry) \
881 rctx = perf_swevent_get_recursion_context(); \ 783 return; \
882 if (rctx < 0) \
883 goto end_recursion; \
884 \
885 __cpu = smp_processor_id(); \
886 \
887 if (in_nmi()) \
888 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
889 else \
890 trace_buf = rcu_dereference(perf_trace_buf); \
891 \
892 if (!trace_buf) \
893 goto end; \
894 \
895 raw_data = per_cpu_ptr(trace_buf, __cpu); \
896 \
897 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
898 entry = (struct ftrace_raw_##call *)raw_data; \
899 ent = &entry->ent; \
900 tracing_generic_entry_update(ent, irq_flags, pc); \
901 ent->type = event_call->id; \
902 \
903 tstruct \ 784 tstruct \
904 \ 785 \
905 { assign; } \ 786 { assign; } \
906 \ 787 \
907 perf_tp_event(event_call->id, __addr, __count, entry, \ 788 __regs = &__get_cpu_var(perf_trace_regs); \
908 __entry_size); \ 789 perf_fetch_caller_regs(__regs, 2); \
909 \ 790 \
910end: \ 791 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
911 perf_swevent_put_recursion_context(rctx); \ 792 __count, irq_flags, __regs); \
912end_recursion: \
913 local_irq_restore(irq_flags); \
914} 793}
915 794
916#undef DEFINE_EVENT 795#undef DEFINE_EVENT
917#define DEFINE_EVENT(template, call, proto, args) \ 796#define DEFINE_EVENT(template, call, proto, args) \
918static void ftrace_profile_##call(proto) \ 797static notrace void perf_trace_##call(proto) \
919{ \ 798{ \
920 struct ftrace_event_call *event_call = &event_##call; \ 799 struct ftrace_event_call *event_call = &event_##call; \
921 \ 800 \
922 ftrace_profile_templ_##template(event_call, args); \ 801 perf_trace_templ_##template(event_call, args); \
923} 802}
924 803
925#undef DEFINE_EVENT_PRINT 804#undef DEFINE_EVENT_PRINT
@@ -927,7 +806,7 @@ static void ftrace_profile_##call(proto) \
927 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 806 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
928 807
929#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 808#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
930#endif /* CONFIG_EVENT_PROFILE */ 809#endif /* CONFIG_PERF_EVENTS */
931 810
932#undef _TRACE_PROFILE_INIT 811#undef _TRACE_PROFILE_INIT
933 812