aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h48
1 files changed, 6 insertions, 42 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 4a46a60c2077..f2c09e4d656c 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -850,22 +850,12 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
850 proto) \ 850 proto) \
851{ \ 851{ \
852 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 852 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
853 extern int perf_swevent_get_recursion_context(void); \
854 extern void perf_swevent_put_recursion_context(int rctx); \
855 extern void perf_tp_event(int, u64, u64, void *, int); \
856 struct ftrace_raw_##call *entry; \ 853 struct ftrace_raw_##call *entry; \
857 u64 __addr = 0, __count = 1; \ 854 u64 __addr = 0, __count = 1; \
858 unsigned long irq_flags; \ 855 unsigned long irq_flags; \
859 struct trace_entry *ent; \
860 int __entry_size; \ 856 int __entry_size; \
861 int __data_size; \ 857 int __data_size; \
862 char *trace_buf; \
863 char *raw_data; \
864 int __cpu; \
865 int rctx; \ 858 int rctx; \
866 int pc; \
867 \
868 pc = preempt_count(); \
869 \ 859 \
870 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 860 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
871 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 861 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
@@ -875,42 +865,16 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
875 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 865 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
876 "profile buffer not large enough")) \ 866 "profile buffer not large enough")) \
877 return; \ 867 return; \
878 \ 868 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \
879 local_irq_save(irq_flags); \ 869 __entry_size, event_call->id, &rctx, &irq_flags); \
880 \ 870 if (!entry) \
881 rctx = perf_swevent_get_recursion_context(); \ 871 return; \
882 if (rctx < 0) \
883 goto end_recursion; \
884 \
885 __cpu = smp_processor_id(); \
886 \
887 if (in_nmi()) \
888 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
889 else \
890 trace_buf = rcu_dereference(perf_trace_buf); \
891 \
892 if (!trace_buf) \
893 goto end; \
894 \
895 raw_data = per_cpu_ptr(trace_buf, __cpu); \
896 \
897 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
898 entry = (struct ftrace_raw_##call *)raw_data; \
899 ent = &entry->ent; \
900 tracing_generic_entry_update(ent, irq_flags, pc); \
901 ent->type = event_call->id; \
902 \
903 tstruct \ 872 tstruct \
904 \ 873 \
905 { assign; } \ 874 { assign; } \
906 \ 875 \
907 perf_tp_event(event_call->id, __addr, __count, entry, \ 876 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \
908 __entry_size); \ 877 __count, irq_flags); \
909 \
910end: \
911 perf_swevent_put_recursion_context(rctx); \
912end_recursion: \
913 local_irq_restore(irq_flags); \
914} 878}
915 879
916#undef DEFINE_EVENT 880#undef DEFINE_EVENT