aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h218
1 files changed, 132 insertions, 86 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index f64fbaae781a..cc0d9667e182 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -21,11 +21,14 @@
21#undef __field 21#undef __field
22#define __field(type, item) type item; 22#define __field(type, item) type item;
23 23
24#undef __field_ext
25#define __field_ext(type, item, filter_type) type item;
26
24#undef __array 27#undef __array
25#define __array(type, item, len) type item[len]; 28#define __array(type, item, len) type item[len];
26 29
27#undef __dynamic_array 30#undef __dynamic_array
28#define __dynamic_array(type, item, len) unsigned short __data_loc_##item; 31#define __dynamic_array(type, item, len) u32 __data_loc_##item;
29 32
30#undef __string 33#undef __string
31#define __string(item, src) __dynamic_array(char, item, -1) 34#define __string(item, src) __dynamic_array(char, item, -1)
@@ -42,6 +45,16 @@
42 }; \ 45 }; \
43 static struct ftrace_event_call event_##name 46 static struct ftrace_event_call event_##name
44 47
48#undef __cpparg
49#define __cpparg(arg...) arg
50
51/* Callbacks are meaningless to ftrace. */
52#undef TRACE_EVENT_FN
53#define TRACE_EVENT_FN(name, proto, args, tstruct, \
54 assign, print, reg, unreg) \
55 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
56 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
57
45#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 58#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
46 59
47 60
@@ -51,23 +64,27 @@
51 * Include the following: 64 * Include the following:
52 * 65 *
53 * struct ftrace_data_offsets_<call> { 66 * struct ftrace_data_offsets_<call> {
54 * int <item1>; 67 * u32 <item1>;
55 * int <item2>; 68 * u32 <item2>;
56 * [...] 69 * [...]
57 * }; 70 * };
58 * 71 *
59 * The __dynamic_array() macro will create each int <item>, this is 72 * The __dynamic_array() macro will create each u32 <item>, this is
60 * to keep the offset of each array from the beginning of the event. 73 * to keep the offset of each array from the beginning of the event.
74 * The size of an array is also encoded, in the higher 16 bits of <item>.
61 */ 75 */
62 76
63#undef __field 77#undef __field
64#define __field(type, item); 78#define __field(type, item)
79
80#undef __field_ext
81#define __field_ext(type, item, filter_type)
65 82
66#undef __array 83#undef __array
67#define __array(type, item, len) 84#define __array(type, item, len)
68 85
69#undef __dynamic_array 86#undef __dynamic_array
70#define __dynamic_array(type, item, len) int item; 87#define __dynamic_array(type, item, len) u32 item;
71 88
72#undef __string 89#undef __string
73#define __string(item, src) __dynamic_array(char, item, -1) 90#define __string(item, src) __dynamic_array(char, item, -1)
@@ -109,6 +126,9 @@
109 if (!ret) \ 126 if (!ret) \
110 return 0; 127 return 0;
111 128
129#undef __field_ext
130#define __field_ext(type, item, filter_type) __field(type, item)
131
112#undef __array 132#undef __array
113#define __array(type, item, len) \ 133#define __array(type, item, len) \
114 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ 134 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
@@ -120,7 +140,7 @@
120 140
121#undef __dynamic_array 141#undef __dynamic_array
122#define __dynamic_array(type, item, len) \ 142#define __dynamic_array(type, item, len) \
123 ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ 143 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
124 "offset:%u;\tsize:%u;\n", \ 144 "offset:%u;\tsize:%u;\n", \
125 (unsigned int)offsetof(typeof(field), \ 145 (unsigned int)offsetof(typeof(field), \
126 __data_loc_##item), \ 146 __data_loc_##item), \
@@ -150,7 +170,8 @@
150#undef TRACE_EVENT 170#undef TRACE_EVENT
151#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 171#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
152static int \ 172static int \
153ftrace_format_##call(struct trace_seq *s) \ 173ftrace_format_##call(struct ftrace_event_call *unused, \
174 struct trace_seq *s) \
154{ \ 175{ \
155 struct ftrace_raw_##call field __attribute__((unused)); \ 176 struct ftrace_raw_##call field __attribute__((unused)); \
156 int ret = 0; \ 177 int ret = 0; \
@@ -210,7 +231,7 @@ ftrace_format_##call(struct trace_seq *s) \
210 231
211#undef __get_dynamic_array 232#undef __get_dynamic_array
212#define __get_dynamic_array(field) \ 233#define __get_dynamic_array(field) \
213 ((void *)__entry + __entry->__data_loc_##field) 234 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
214 235
215#undef __get_str 236#undef __get_str
216#define __get_str(field) (char *)__get_dynamic_array(field) 237#define __get_str(field) (char *)__get_dynamic_array(field)
@@ -218,9 +239,9 @@ ftrace_format_##call(struct trace_seq *s) \
218#undef __print_flags 239#undef __print_flags
219#define __print_flags(flag, delim, flag_array...) \ 240#define __print_flags(flag, delim, flag_array...) \
220 ({ \ 241 ({ \
221 static const struct trace_print_flags flags[] = \ 242 static const struct trace_print_flags __flags[] = \
222 { flag_array, { -1, NULL }}; \ 243 { flag_array, { -1, NULL }}; \
223 ftrace_print_flags_seq(p, delim, flag, flags); \ 244 ftrace_print_flags_seq(p, delim, flag, __flags); \
224 }) 245 })
225 246
226#undef __print_symbolic 247#undef __print_symbolic
@@ -233,7 +254,7 @@ ftrace_format_##call(struct trace_seq *s) \
233 254
234#undef TRACE_EVENT 255#undef TRACE_EVENT
235#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
236enum print_line_t \ 257static enum print_line_t \
237ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
238{ \ 259{ \
239 struct trace_seq *s = &iter->seq; \ 260 struct trace_seq *s = &iter->seq; \
@@ -263,46 +284,48 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
263 284
264#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
265 286
266#undef __field 287#undef __field_ext
267#define __field(type, item) \ 288#define __field_ext(type, item, filter_type) \
268 ret = trace_define_field(event_call, #type, #item, \ 289 ret = trace_define_field(event_call, #type, #item, \
269 offsetof(typeof(field), item), \ 290 offsetof(typeof(field), item), \
270 sizeof(field.item), is_signed_type(type)); \ 291 sizeof(field.item), \
292 is_signed_type(type), filter_type); \
271 if (ret) \ 293 if (ret) \
272 return ret; 294 return ret;
273 295
296#undef __field
297#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
298
274#undef __array 299#undef __array
275#define __array(type, item, len) \ 300#define __array(type, item, len) \
276 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
277 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 302 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
278 offsetof(typeof(field), item), \ 303 offsetof(typeof(field), item), \
279 sizeof(field.item), 0); \ 304 sizeof(field.item), 0, FILTER_OTHER); \
280 if (ret) \ 305 if (ret) \
281 return ret; 306 return ret;
282 307
283#undef __dynamic_array 308#undef __dynamic_array
284#define __dynamic_array(type, item, len) \ 309#define __dynamic_array(type, item, len) \
285 ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ 310 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
286 offsetof(typeof(field), __data_loc_##item), \ 311 offsetof(typeof(field), __data_loc_##item), \
287 sizeof(field.__data_loc_##item), 0); 312 sizeof(field.__data_loc_##item), 0, \
313 FILTER_OTHER);
288 314
289#undef __string 315#undef __string
290#define __string(item, src) __dynamic_array(char, item, -1) 316#define __string(item, src) __dynamic_array(char, item, -1)
291 317
292#undef TRACE_EVENT 318#undef TRACE_EVENT
293#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
294int \ 320static int \
295ftrace_define_fields_##call(void) \ 321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
296{ \ 322{ \
297 struct ftrace_raw_##call field; \ 323 struct ftrace_raw_##call field; \
298 struct ftrace_event_call *event_call = &event_##call; \
299 int ret; \ 324 int ret; \
300 \ 325 \
301 __common_field(int, type, 1); \ 326 ret = trace_define_common_fields(event_call); \
302 __common_field(unsigned char, flags, 0); \ 327 if (ret) \
303 __common_field(unsigned char, preempt_count, 0); \ 328 return ret; \
304 __common_field(int, pid, 1); \
305 __common_field(int, tgid, 1); \
306 \ 329 \
307 tstruct; \ 330 tstruct; \
308 \ 331 \
@@ -321,6 +344,9 @@ ftrace_define_fields_##call(void) \
321#undef __field 344#undef __field
322#define __field(type, item) 345#define __field(type, item)
323 346
347#undef __field_ext
348#define __field_ext(type, item, filter_type)
349
324#undef __array 350#undef __array
325#define __array(type, item, len) 351#define __array(type, item, len)
326 352
@@ -328,6 +354,7 @@ ftrace_define_fields_##call(void) \
328#define __dynamic_array(type, item, len) \ 354#define __dynamic_array(type, item, len) \
329 __data_offsets->item = __data_size + \ 355 __data_offsets->item = __data_size + \
330 offsetof(typeof(*entry), __data); \ 356 offsetof(typeof(*entry), __data); \
357 __data_offsets->item |= (len * sizeof(type)) << 16; \
331 __data_size += (len) * sizeof(type); 358 __data_size += (len) * sizeof(type);
332 359
333#undef __string 360#undef __string
@@ -351,24 +378,18 @@ static inline int ftrace_get_offsets_##call( \
351#ifdef CONFIG_EVENT_PROFILE 378#ifdef CONFIG_EVENT_PROFILE
352 379
353/* 380/*
354 * Generate the functions needed for tracepoint perf_counter support. 381 * Generate the functions needed for tracepoint perf_event support.
355 * 382 *
356 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
357 * 384 *
358 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) 385 * static int ftrace_profile_enable_<call>(void)
359 * { 386 * {
360 * int ret = 0; 387 * return register_trace_<call>(ftrace_profile_<call>);
361 *
362 * if (!atomic_inc_return(&event_call->profile_count))
363 * ret = register_trace_<call>(ftrace_profile_<call>);
364 *
365 * return ret;
366 * } 388 * }
367 * 389 *
368 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) 390 * static void ftrace_profile_disable_<call>(void)
369 * { 391 * {
370 * if (atomic_add_negative(-1, &event->call->profile_count)) 392 * unregister_trace_<call>(ftrace_profile_<call>);
371 * unregister_trace_<call>(ftrace_profile_<call>);
372 * } 393 * }
373 * 394 *
374 */ 395 */
@@ -378,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \
378 \ 399 \
379static void ftrace_profile_##call(proto); \ 400static void ftrace_profile_##call(proto); \
380 \ 401 \
381static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ 402static int ftrace_profile_enable_##call(void) \
382{ \ 403{ \
383 int ret = 0; \ 404 return register_trace_##call(ftrace_profile_##call); \
384 \
385 if (!atomic_inc_return(&event_call->profile_count)) \
386 ret = register_trace_##call(ftrace_profile_##call); \
387 \
388 return ret; \
389} \ 405} \
390 \ 406 \
391static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ 407static void ftrace_profile_disable_##call(void) \
392{ \ 408{ \
393 if (atomic_add_negative(-1, &event_call->profile_count)) \ 409 unregister_trace_##call(ftrace_profile_##call); \
394 unregister_trace_##call(ftrace_profile_##call); \
395} 410}
396 411
397#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -433,13 +448,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
433 * { 448 * {
434 * struct ring_buffer_event *event; 449 * struct ring_buffer_event *event;
435 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 450 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
451 * struct ring_buffer *buffer;
436 * unsigned long irq_flags; 452 * unsigned long irq_flags;
437 * int pc; 453 * int pc;
438 * 454 *
439 * local_save_flags(irq_flags); 455 * local_save_flags(irq_flags);
440 * pc = preempt_count(); 456 * pc = preempt_count();
441 * 457 *
442 * event = trace_current_buffer_lock_reserve(event_<call>.id, 458 * event = trace_current_buffer_lock_reserve(&buffer,
459 * event_<call>.id,
443 * sizeof(struct ftrace_raw_<call>), 460 * sizeof(struct ftrace_raw_<call>),
444 * irq_flags, pc); 461 * irq_flags, pc);
445 * if (!event) 462 * if (!event)
@@ -449,7 +466,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
449 * <assign>; <-- Here we assign the entries by the __field and 466 * <assign>; <-- Here we assign the entries by the __field and
450 * __array macros. 467 * __array macros.
451 * 468 *
452 * trace_current_buffer_unlock_commit(event, irq_flags, pc); 469 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
453 * } 470 * }
454 * 471 *
455 * static int ftrace_raw_reg_event_<call>(void) 472 * static int ftrace_raw_reg_event_<call>(void)
@@ -541,6 +558,7 @@ static void ftrace_raw_event_##call(proto) \
541 struct ftrace_event_call *event_call = &event_##call; \ 558 struct ftrace_event_call *event_call = &event_##call; \
542 struct ring_buffer_event *event; \ 559 struct ring_buffer_event *event; \
543 struct ftrace_raw_##call *entry; \ 560 struct ftrace_raw_##call *entry; \
561 struct ring_buffer *buffer; \
544 unsigned long irq_flags; \ 562 unsigned long irq_flags; \
545 int __data_size; \ 563 int __data_size; \
546 int pc; \ 564 int pc; \
@@ -550,7 +568,8 @@ static void ftrace_raw_event_##call(proto) \
550 \ 568 \
551 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 569 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
552 \ 570 \
553 event = trace_current_buffer_lock_reserve(event_##call.id, \ 571 event = trace_current_buffer_lock_reserve(&buffer, \
572 event_##call.id, \
554 sizeof(*entry) + __data_size, \ 573 sizeof(*entry) + __data_size, \
555 irq_flags, pc); \ 574 irq_flags, pc); \
556 if (!event) \ 575 if (!event) \
@@ -562,11 +581,12 @@ static void ftrace_raw_event_##call(proto) \
562 \ 581 \
563 { assign; } \ 582 { assign; } \
564 \ 583 \
565 if (!filter_current_check_discard(event_call, entry, event)) \ 584 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
566 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ 585 trace_nowake_buffer_unlock_commit(buffer, \
586 event, irq_flags, pc); \
567} \ 587} \
568 \ 588 \
569static int ftrace_raw_reg_event_##call(void) \ 589static int ftrace_raw_reg_event_##call(void *ptr) \
570{ \ 590{ \
571 int ret; \ 591 int ret; \
572 \ 592 \
@@ -577,7 +597,7 @@ static int ftrace_raw_reg_event_##call(void) \
577 return ret; \ 597 return ret; \
578} \ 598} \
579 \ 599 \
580static void ftrace_raw_unreg_event_##call(void) \ 600static void ftrace_raw_unreg_event_##call(void *ptr) \
581{ \ 601{ \
582 unregister_trace_##call(ftrace_raw_event_##call); \ 602 unregister_trace_##call(ftrace_raw_event_##call); \
583} \ 603} \
@@ -595,7 +615,6 @@ static int ftrace_raw_init_event_##call(void) \
595 return -ENODEV; \ 615 return -ENODEV; \
596 event_##call.id = id; \ 616 event_##call.id = id; \
597 INIT_LIST_HEAD(&event_##call.fields); \ 617 INIT_LIST_HEAD(&event_##call.fields); \
598 init_preds(&event_##call); \
599 return 0; \ 618 return 0; \
600} \ 619} \
601 \ 620 \
@@ -625,15 +644,16 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
625 * { 644 * {
626 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
627 * struct ftrace_event_call *event_call = &event_<call>; 646 * struct ftrace_event_call *event_call = &event_<call>;
628 * extern void perf_tpcounter_event(int, u64, u64, void *, int); 647 * extern void perf_tp_event(int, u64, u64, void *, int);
629 * struct ftrace_raw_##call *entry; 648 * struct ftrace_raw_##call *entry;
630 * u64 __addr = 0, __count = 1; 649 * u64 __addr = 0, __count = 1;
631 * unsigned long irq_flags; 650 * unsigned long irq_flags;
651 * struct trace_entry *ent;
632 * int __entry_size; 652 * int __entry_size;
633 * int __data_size; 653 * int __data_size;
654 * int __cpu
634 * int pc; 655 * int pc;
635 * 656 *
636 * local_save_flags(irq_flags);
637 * pc = preempt_count(); 657 * pc = preempt_count();
638 * 658 *
639 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 659 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
@@ -644,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
644 * sizeof(u64)); 664 * sizeof(u64));
645 * __entry_size -= sizeof(u32); 665 * __entry_size -= sizeof(u32);
646 * 666 *
647 * do { 667 * // Protect the non nmi buffer
648 * char raw_data[__entry_size]; <- allocate our sample in the stack 668 * // This also protects the rcu read side
649 * struct trace_entry *ent; 669 * local_irq_save(irq_flags);
670 * __cpu = smp_processor_id();
650 * 671 *
651 * zero dead bytes from alignment to avoid stack leak to userspace: 672 * if (in_nmi())
673 * raw_data = rcu_dereference(trace_profile_buf_nmi);
674 * else
675 * raw_data = rcu_dereference(trace_profile_buf);
652 * 676 *
653 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 677 * if (!raw_data)
654 * entry = (struct ftrace_raw_<call> *)raw_data; 678 * goto end;
655 * ent = &entry->ent;
656 * tracing_generic_entry_update(ent, irq_flags, pc);
657 * ent->type = event_call->id;
658 * 679 *
659 * <tstruct> <- do some jobs with dynamic arrays 680 * raw_data = per_cpu_ptr(raw_data, __cpu);
660 * 681 *
661 * <assign> <- affect our values 682 * //zero dead bytes from alignment to avoid stack leak to userspace:
683 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
684 * entry = (struct ftrace_raw_<call> *)raw_data;
685 * ent = &entry->ent;
686 * tracing_generic_entry_update(ent, irq_flags, pc);
687 * ent->type = event_call->id;
662 * 688 *
663 * perf_tpcounter_event(event_call->id, __addr, __count, entry, 689 * <tstruct> <- do some jobs with dynamic arrays
664 * __entry_size); <- submit them to perf counter 690 *
665 * } while (0); 691 * <assign> <- affect our values
692 *
693 * perf_tp_event(event_call->id, __addr, __count, entry,
694 * __entry_size); <- submit them to perf counter
666 * 695 *
667 * } 696 * }
668 */ 697 */
@@ -681,15 +710,17 @@ static void ftrace_profile_##call(proto) \
681{ \ 710{ \
682 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
683 struct ftrace_event_call *event_call = &event_##call; \ 712 struct ftrace_event_call *event_call = &event_##call; \
684 extern void perf_tpcounter_event(int, u64, u64, void *, int); \ 713 extern void perf_tp_event(int, u64, u64, void *, int); \
685 struct ftrace_raw_##call *entry; \ 714 struct ftrace_raw_##call *entry; \
686 u64 __addr = 0, __count = 1; \ 715 u64 __addr = 0, __count = 1; \
687 unsigned long irq_flags; \ 716 unsigned long irq_flags; \
717 struct trace_entry *ent; \
688 int __entry_size; \ 718 int __entry_size; \
689 int __data_size; \ 719 int __data_size; \
720 char *raw_data; \
721 int __cpu; \
690 int pc; \ 722 int pc; \
691 \ 723 \
692 local_save_flags(irq_flags); \
693 pc = preempt_count(); \ 724 pc = preempt_count(); \
694 \ 725 \
695 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
@@ -697,23 +728,38 @@ static void ftrace_profile_##call(proto) \
697 sizeof(u64)); \ 728 sizeof(u64)); \
698 __entry_size -= sizeof(u32); \ 729 __entry_size -= sizeof(u32); \
699 \ 730 \
700 do { \ 731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
701 char raw_data[__entry_size]; \ 732 "profile buffer not large enough")) \
702 struct trace_entry *ent; \ 733 return; \
703 \ 734 \
704 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ 735 local_irq_save(irq_flags); \
705 entry = (struct ftrace_raw_##call *)raw_data; \ 736 __cpu = smp_processor_id(); \
706 ent = &entry->ent; \
707 tracing_generic_entry_update(ent, irq_flags, pc); \
708 ent->type = event_call->id; \
709 \ 737 \
710 tstruct \ 738 if (in_nmi()) \
739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
740 else \
741 raw_data = rcu_dereference(trace_profile_buf); \
711 \ 742 \
712 { assign; } \ 743 if (!raw_data) \
744 goto end; \
713 \ 745 \
714 perf_tpcounter_event(event_call->id, __addr, __count, entry,\ 746 raw_data = per_cpu_ptr(raw_data, __cpu); \
747 \
748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
753 \
754 tstruct \
755 \
756 { assign; } \
757 \
758 perf_tp_event(event_call->id, __addr, __count, entry, \
715 __entry_size); \ 759 __entry_size); \
716 } while (0); \ 760 \
761end: \
762 local_irq_restore(irq_flags); \
717 \ 763 \
718} 764}
719 765