diff options
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 299 |
1 files changed, 239 insertions, 60 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1867553c61e5..cc0d9667e182 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -21,11 +21,14 @@ | |||
21 | #undef __field | 21 | #undef __field |
22 | #define __field(type, item) type item; | 22 | #define __field(type, item) type item; |
23 | 23 | ||
24 | #undef __field_ext | ||
25 | #define __field_ext(type, item, filter_type) type item; | ||
26 | |||
24 | #undef __array | 27 | #undef __array |
25 | #define __array(type, item, len) type item[len]; | 28 | #define __array(type, item, len) type item[len]; |
26 | 29 | ||
27 | #undef __dynamic_array | 30 | #undef __dynamic_array |
28 | #define __dynamic_array(type, item, len) unsigned short __data_loc_##item; | 31 | #define __dynamic_array(type, item, len) u32 __data_loc_##item; |
29 | 32 | ||
30 | #undef __string | 33 | #undef __string |
31 | #define __string(item, src) __dynamic_array(char, item, -1) | 34 | #define __string(item, src) __dynamic_array(char, item, -1) |
@@ -42,6 +45,16 @@ | |||
42 | }; \ | 45 | }; \ |
43 | static struct ftrace_event_call event_##name | 46 | static struct ftrace_event_call event_##name |
44 | 47 | ||
48 | #undef __cpparg | ||
49 | #define __cpparg(arg...) arg | ||
50 | |||
51 | /* Callbacks are meaningless to ftrace. */ | ||
52 | #undef TRACE_EVENT_FN | ||
53 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | ||
54 | assign, print, reg, unreg) \ | ||
55 | TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \ | ||
56 | __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \ | ||
57 | |||
45 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 58 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
46 | 59 | ||
47 | 60 | ||
@@ -51,23 +64,27 @@ | |||
51 | * Include the following: | 64 | * Include the following: |
52 | * | 65 | * |
53 | * struct ftrace_data_offsets_<call> { | 66 | * struct ftrace_data_offsets_<call> { |
54 | * int <item1>; | 67 | * u32 <item1>; |
55 | * int <item2>; | 68 | * u32 <item2>; |
56 | * [...] | 69 | * [...] |
57 | * }; | 70 | * }; |
58 | * | 71 | * |
59 | * The __dynamic_array() macro will create each int <item>, this is | 72 | * The __dynamic_array() macro will create each u32 <item>, this is |
60 | * to keep the offset of each array from the beginning of the event. | 73 | * to keep the offset of each array from the beginning of the event. |
74 | * The size of an array is also encoded, in the higher 16 bits of <item>. | ||
61 | */ | 75 | */ |
62 | 76 | ||
63 | #undef __field | 77 | #undef __field |
64 | #define __field(type, item); | 78 | #define __field(type, item) |
79 | |||
80 | #undef __field_ext | ||
81 | #define __field_ext(type, item, filter_type) | ||
65 | 82 | ||
66 | #undef __array | 83 | #undef __array |
67 | #define __array(type, item, len) | 84 | #define __array(type, item, len) |
68 | 85 | ||
69 | #undef __dynamic_array | 86 | #undef __dynamic_array |
70 | #define __dynamic_array(type, item, len) int item; | 87 | #define __dynamic_array(type, item, len) u32 item; |
71 | 88 | ||
72 | #undef __string | 89 | #undef __string |
73 | #define __string(item, src) __dynamic_array(char, item, -1) | 90 | #define __string(item, src) __dynamic_array(char, item, -1) |
@@ -109,6 +126,9 @@ | |||
109 | if (!ret) \ | 126 | if (!ret) \ |
110 | return 0; | 127 | return 0; |
111 | 128 | ||
129 | #undef __field_ext | ||
130 | #define __field_ext(type, item, filter_type) __field(type, item) | ||
131 | |||
112 | #undef __array | 132 | #undef __array |
113 | #define __array(type, item, len) \ | 133 | #define __array(type, item, len) \ |
114 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | 134 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ |
@@ -120,7 +140,7 @@ | |||
120 | 140 | ||
121 | #undef __dynamic_array | 141 | #undef __dynamic_array |
122 | #define __dynamic_array(type, item, len) \ | 142 | #define __dynamic_array(type, item, len) \ |
123 | ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ | 143 | ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ |
124 | "offset:%u;\tsize:%u;\n", \ | 144 | "offset:%u;\tsize:%u;\n", \ |
125 | (unsigned int)offsetof(typeof(field), \ | 145 | (unsigned int)offsetof(typeof(field), \ |
126 | __data_loc_##item), \ | 146 | __data_loc_##item), \ |
@@ -144,10 +164,14 @@ | |||
144 | #undef TP_fast_assign | 164 | #undef TP_fast_assign |
145 | #define TP_fast_assign(args...) args | 165 | #define TP_fast_assign(args...) args |
146 | 166 | ||
167 | #undef TP_perf_assign | ||
168 | #define TP_perf_assign(args...) | ||
169 | |||
147 | #undef TRACE_EVENT | 170 | #undef TRACE_EVENT |
148 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 171 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
149 | static int \ | 172 | static int \ |
150 | ftrace_format_##call(struct trace_seq *s) \ | 173 | ftrace_format_##call(struct ftrace_event_call *unused, \ |
174 | struct trace_seq *s) \ | ||
151 | { \ | 175 | { \ |
152 | struct ftrace_raw_##call field __attribute__((unused)); \ | 176 | struct ftrace_raw_##call field __attribute__((unused)); \ |
153 | int ret = 0; \ | 177 | int ret = 0; \ |
@@ -207,7 +231,7 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
207 | 231 | ||
208 | #undef __get_dynamic_array | 232 | #undef __get_dynamic_array |
209 | #define __get_dynamic_array(field) \ | 233 | #define __get_dynamic_array(field) \ |
210 | ((void *)__entry + __entry->__data_loc_##field) | 234 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) |
211 | 235 | ||
212 | #undef __get_str | 236 | #undef __get_str |
213 | #define __get_str(field) (char *)__get_dynamic_array(field) | 237 | #define __get_str(field) (char *)__get_dynamic_array(field) |
@@ -215,9 +239,9 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
215 | #undef __print_flags | 239 | #undef __print_flags |
216 | #define __print_flags(flag, delim, flag_array...) \ | 240 | #define __print_flags(flag, delim, flag_array...) \ |
217 | ({ \ | 241 | ({ \ |
218 | static const struct trace_print_flags flags[] = \ | 242 | static const struct trace_print_flags __flags[] = \ |
219 | { flag_array, { -1, NULL }}; \ | 243 | { flag_array, { -1, NULL }}; \ |
220 | ftrace_print_flags_seq(p, delim, flag, flags); \ | 244 | ftrace_print_flags_seq(p, delim, flag, __flags); \ |
221 | }) | 245 | }) |
222 | 246 | ||
223 | #undef __print_symbolic | 247 | #undef __print_symbolic |
@@ -230,7 +254,7 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
230 | 254 | ||
231 | #undef TRACE_EVENT | 255 | #undef TRACE_EVENT |
232 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 256 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
233 | enum print_line_t \ | 257 | static enum print_line_t \ |
234 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | 258 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ |
235 | { \ | 259 | { \ |
236 | struct trace_seq *s = &iter->seq; \ | 260 | struct trace_seq *s = &iter->seq; \ |
@@ -260,46 +284,48 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
260 | 284 | ||
261 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 285 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
262 | 286 | ||
263 | #undef __field | 287 | #undef __field_ext |
264 | #define __field(type, item) \ | 288 | #define __field_ext(type, item, filter_type) \ |
265 | ret = trace_define_field(event_call, #type, #item, \ | 289 | ret = trace_define_field(event_call, #type, #item, \ |
266 | offsetof(typeof(field), item), \ | 290 | offsetof(typeof(field), item), \ |
267 | sizeof(field.item), is_signed_type(type)); \ | 291 | sizeof(field.item), \ |
292 | is_signed_type(type), filter_type); \ | ||
268 | if (ret) \ | 293 | if (ret) \ |
269 | return ret; | 294 | return ret; |
270 | 295 | ||
296 | #undef __field | ||
297 | #define __field(type, item) __field_ext(type, item, FILTER_OTHER) | ||
298 | |||
271 | #undef __array | 299 | #undef __array |
272 | #define __array(type, item, len) \ | 300 | #define __array(type, item, len) \ |
273 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 301 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
274 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 302 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
275 | offsetof(typeof(field), item), \ | 303 | offsetof(typeof(field), item), \ |
276 | sizeof(field.item), 0); \ | 304 | sizeof(field.item), 0, FILTER_OTHER); \ |
277 | if (ret) \ | 305 | if (ret) \ |
278 | return ret; | 306 | return ret; |
279 | 307 | ||
280 | #undef __dynamic_array | 308 | #undef __dynamic_array |
281 | #define __dynamic_array(type, item, len) \ | 309 | #define __dynamic_array(type, item, len) \ |
282 | ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ | 310 | ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ |
283 | offsetof(typeof(field), __data_loc_##item), \ | 311 | offsetof(typeof(field), __data_loc_##item), \ |
284 | sizeof(field.__data_loc_##item), 0); | 312 | sizeof(field.__data_loc_##item), 0, \ |
313 | FILTER_OTHER); | ||
285 | 314 | ||
286 | #undef __string | 315 | #undef __string |
287 | #define __string(item, src) __dynamic_array(char, item, -1) | 316 | #define __string(item, src) __dynamic_array(char, item, -1) |
288 | 317 | ||
289 | #undef TRACE_EVENT | 318 | #undef TRACE_EVENT |
290 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 319 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
291 | int \ | 320 | static int \ |
292 | ftrace_define_fields_##call(void) \ | 321 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ |
293 | { \ | 322 | { \ |
294 | struct ftrace_raw_##call field; \ | 323 | struct ftrace_raw_##call field; \ |
295 | struct ftrace_event_call *event_call = &event_##call; \ | ||
296 | int ret; \ | 324 | int ret; \ |
297 | \ | 325 | \ |
298 | __common_field(int, type, 1); \ | 326 | ret = trace_define_common_fields(event_call); \ |
299 | __common_field(unsigned char, flags, 0); \ | 327 | if (ret) \ |
300 | __common_field(unsigned char, preempt_count, 0); \ | 328 | return ret; \ |
301 | __common_field(int, pid, 1); \ | ||
302 | __common_field(int, tgid, 1); \ | ||
303 | \ | 329 | \ |
304 | tstruct; \ | 330 | tstruct; \ |
305 | \ | 331 | \ |
@@ -318,6 +344,9 @@ ftrace_define_fields_##call(void) \ | |||
318 | #undef __field | 344 | #undef __field |
319 | #define __field(type, item) | 345 | #define __field(type, item) |
320 | 346 | ||
347 | #undef __field_ext | ||
348 | #define __field_ext(type, item, filter_type) | ||
349 | |||
321 | #undef __array | 350 | #undef __array |
322 | #define __array(type, item, len) | 351 | #define __array(type, item, len) |
323 | 352 | ||
@@ -325,6 +354,7 @@ ftrace_define_fields_##call(void) \ | |||
325 | #define __dynamic_array(type, item, len) \ | 354 | #define __dynamic_array(type, item, len) \ |
326 | __data_offsets->item = __data_size + \ | 355 | __data_offsets->item = __data_size + \ |
327 | offsetof(typeof(*entry), __data); \ | 356 | offsetof(typeof(*entry), __data); \ |
357 | __data_offsets->item |= (len * sizeof(type)) << 16; \ | ||
328 | __data_size += (len) * sizeof(type); | 358 | __data_size += (len) * sizeof(type); |
329 | 359 | ||
330 | #undef __string | 360 | #undef __string |
@@ -345,6 +375,44 @@ static inline int ftrace_get_offsets_##call( \ | |||
345 | 375 | ||
346 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 376 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
347 | 377 | ||
378 | #ifdef CONFIG_EVENT_PROFILE | ||
379 | |||
380 | /* | ||
381 | * Generate the functions needed for tracepoint perf_event support. | ||
382 | * | ||
383 | * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later | ||
384 | * | ||
385 | * static int ftrace_profile_enable_<call>(void) | ||
386 | * { | ||
387 | * return register_trace_<call>(ftrace_profile_<call>); | ||
388 | * } | ||
389 | * | ||
390 | * static void ftrace_profile_disable_<call>(void) | ||
391 | * { | ||
392 | * unregister_trace_<call>(ftrace_profile_<call>); | ||
393 | * } | ||
394 | * | ||
395 | */ | ||
396 | |||
397 | #undef TRACE_EVENT | ||
398 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
399 | \ | ||
400 | static void ftrace_profile_##call(proto); \ | ||
401 | \ | ||
402 | static int ftrace_profile_enable_##call(void) \ | ||
403 | { \ | ||
404 | return register_trace_##call(ftrace_profile_##call); \ | ||
405 | } \ | ||
406 | \ | ||
407 | static void ftrace_profile_disable_##call(void) \ | ||
408 | { \ | ||
409 | unregister_trace_##call(ftrace_profile_##call); \ | ||
410 | } | ||
411 | |||
412 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
413 | |||
414 | #endif | ||
415 | |||
348 | /* | 416 | /* |
349 | * Stage 4 of the trace events. | 417 | * Stage 4 of the trace events. |
350 | * | 418 | * |
@@ -380,13 +448,15 @@ static inline int ftrace_get_offsets_##call( \ | |||
380 | * { | 448 | * { |
381 | * struct ring_buffer_event *event; | 449 | * struct ring_buffer_event *event; |
382 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 450 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
451 | * struct ring_buffer *buffer; | ||
383 | * unsigned long irq_flags; | 452 | * unsigned long irq_flags; |
384 | * int pc; | 453 | * int pc; |
385 | * | 454 | * |
386 | * local_save_flags(irq_flags); | 455 | * local_save_flags(irq_flags); |
387 | * pc = preempt_count(); | 456 | * pc = preempt_count(); |
388 | * | 457 | * |
389 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | 458 | * event = trace_current_buffer_lock_reserve(&buffer, |
459 | * event_<call>.id, | ||
390 | * sizeof(struct ftrace_raw_<call>), | 460 | * sizeof(struct ftrace_raw_<call>), |
391 | * irq_flags, pc); | 461 | * irq_flags, pc); |
392 | * if (!event) | 462 | * if (!event) |
@@ -396,7 +466,7 @@ static inline int ftrace_get_offsets_##call( \ | |||
396 | * <assign>; <-- Here we assign the entries by the __field and | 466 | * <assign>; <-- Here we assign the entries by the __field and |
397 | * __array macros. | 467 | * __array macros. |
398 | * | 468 | * |
399 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | 469 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); |
400 | * } | 470 | * } |
401 | * | 471 | * |
402 | * static int ftrace_raw_reg_event_<call>(void) | 472 | * static int ftrace_raw_reg_event_<call>(void) |
@@ -447,28 +517,6 @@ static inline int ftrace_get_offsets_##call( \ | |||
447 | #define TP_FMT(fmt, args...) fmt "\n", ##args | 517 | #define TP_FMT(fmt, args...) fmt "\n", ##args |
448 | 518 | ||
449 | #ifdef CONFIG_EVENT_PROFILE | 519 | #ifdef CONFIG_EVENT_PROFILE |
450 | #define _TRACE_PROFILE(call, proto, args) \ | ||
451 | static void ftrace_profile_##call(proto) \ | ||
452 | { \ | ||
453 | extern void perf_tpcounter_event(int); \ | ||
454 | perf_tpcounter_event(event_##call.id); \ | ||
455 | } \ | ||
456 | \ | ||
457 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
458 | { \ | ||
459 | int ret = 0; \ | ||
460 | \ | ||
461 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
462 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
463 | \ | ||
464 | return ret; \ | ||
465 | } \ | ||
466 | \ | ||
467 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
468 | { \ | ||
469 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
470 | unregister_trace_##call(ftrace_profile_##call); \ | ||
471 | } | ||
472 | 520 | ||
473 | #define _TRACE_PROFILE_INIT(call) \ | 521 | #define _TRACE_PROFILE_INIT(call) \ |
474 | .profile_count = ATOMIC_INIT(-1), \ | 522 | .profile_count = ATOMIC_INIT(-1), \ |
@@ -476,7 +524,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
476 | .profile_disable = ftrace_profile_disable_##call, | 524 | .profile_disable = ftrace_profile_disable_##call, |
477 | 525 | ||
478 | #else | 526 | #else |
479 | #define _TRACE_PROFILE(call, proto, args) | ||
480 | #define _TRACE_PROFILE_INIT(call) | 527 | #define _TRACE_PROFILE_INIT(call) |
481 | #endif | 528 | #endif |
482 | 529 | ||
@@ -502,7 +549,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
502 | 549 | ||
503 | #undef TRACE_EVENT | 550 | #undef TRACE_EVENT |
504 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 551 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
505 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
506 | \ | 552 | \ |
507 | static struct ftrace_event_call event_##call; \ | 553 | static struct ftrace_event_call event_##call; \ |
508 | \ | 554 | \ |
@@ -512,6 +558,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
512 | struct ftrace_event_call *event_call = &event_##call; \ | 558 | struct ftrace_event_call *event_call = &event_##call; \ |
513 | struct ring_buffer_event *event; \ | 559 | struct ring_buffer_event *event; \ |
514 | struct ftrace_raw_##call *entry; \ | 560 | struct ftrace_raw_##call *entry; \ |
561 | struct ring_buffer *buffer; \ | ||
515 | unsigned long irq_flags; \ | 562 | unsigned long irq_flags; \ |
516 | int __data_size; \ | 563 | int __data_size; \ |
517 | int pc; \ | 564 | int pc; \ |
@@ -521,7 +568,8 @@ static void ftrace_raw_event_##call(proto) \ | |||
521 | \ | 568 | \ |
522 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 569 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
523 | \ | 570 | \ |
524 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | 571 | event = trace_current_buffer_lock_reserve(&buffer, \ |
572 | event_##call.id, \ | ||
525 | sizeof(*entry) + __data_size, \ | 573 | sizeof(*entry) + __data_size, \ |
526 | irq_flags, pc); \ | 574 | irq_flags, pc); \ |
527 | if (!event) \ | 575 | if (!event) \ |
@@ -533,11 +581,12 @@ static void ftrace_raw_event_##call(proto) \ | |||
533 | \ | 581 | \ |
534 | { assign; } \ | 582 | { assign; } \ |
535 | \ | 583 | \ |
536 | if (!filter_current_check_discard(event_call, entry, event)) \ | 584 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ |
537 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | 585 | trace_nowake_buffer_unlock_commit(buffer, \ |
586 | event, irq_flags, pc); \ | ||
538 | } \ | 587 | } \ |
539 | \ | 588 | \ |
540 | static int ftrace_raw_reg_event_##call(void) \ | 589 | static int ftrace_raw_reg_event_##call(void *ptr) \ |
541 | { \ | 590 | { \ |
542 | int ret; \ | 591 | int ret; \ |
543 | \ | 592 | \ |
@@ -548,7 +597,7 @@ static int ftrace_raw_reg_event_##call(void) \ | |||
548 | return ret; \ | 597 | return ret; \ |
549 | } \ | 598 | } \ |
550 | \ | 599 | \ |
551 | static void ftrace_raw_unreg_event_##call(void) \ | 600 | static void ftrace_raw_unreg_event_##call(void *ptr) \ |
552 | { \ | 601 | { \ |
553 | unregister_trace_##call(ftrace_raw_event_##call); \ | 602 | unregister_trace_##call(ftrace_raw_event_##call); \ |
554 | } \ | 603 | } \ |
@@ -566,7 +615,6 @@ static int ftrace_raw_init_event_##call(void) \ | |||
566 | return -ENODEV; \ | 615 | return -ENODEV; \ |
567 | event_##call.id = id; \ | 616 | event_##call.id = id; \ |
568 | INIT_LIST_HEAD(&event_##call.fields); \ | 617 | INIT_LIST_HEAD(&event_##call.fields); \ |
569 | init_preds(&event_##call); \ | ||
570 | return 0; \ | 618 | return 0; \ |
571 | } \ | 619 | } \ |
572 | \ | 620 | \ |
@@ -586,6 +634,137 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
586 | 634 | ||
587 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 635 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
588 | 636 | ||
589 | #undef _TRACE_PROFILE | 637 | /* |
638 | * Define the insertion callback to profile events | ||
639 | * | ||
640 | * The job is very similar to ftrace_raw_event_<call> except that we don't | ||
641 | * insert in the ring buffer but in a perf counter. | ||
642 | * | ||
643 | * static void ftrace_profile_<call>(proto) | ||
644 | * { | ||
645 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
646 | * struct ftrace_event_call *event_call = &event_<call>; | ||
647 | * extern void perf_tp_event(int, u64, u64, void *, int); | ||
648 | * struct ftrace_raw_##call *entry; | ||
649 | * u64 __addr = 0, __count = 1; | ||
650 | * unsigned long irq_flags; | ||
651 | * struct trace_entry *ent; | ||
652 | * int __entry_size; | ||
653 | * int __data_size; | ||
654 | * int __cpu | ||
655 | * int pc; | ||
656 | * | ||
657 | * pc = preempt_count(); | ||
658 | * | ||
659 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
660 | * | ||
661 | * // Below we want to get the aligned size by taking into account | ||
662 | * // the u32 field that will later store the buffer size | ||
663 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | ||
664 | * sizeof(u64)); | ||
665 | * __entry_size -= sizeof(u32); | ||
666 | * | ||
667 | * // Protect the non nmi buffer | ||
668 | * // This also protects the rcu read side | ||
669 | * local_irq_save(irq_flags); | ||
670 | * __cpu = smp_processor_id(); | ||
671 | * | ||
672 | * if (in_nmi()) | ||
673 | * raw_data = rcu_dereference(trace_profile_buf_nmi); | ||
674 | * else | ||
675 | * raw_data = rcu_dereference(trace_profile_buf); | ||
676 | * | ||
677 | * if (!raw_data) | ||
678 | * goto end; | ||
679 | * | ||
680 | * raw_data = per_cpu_ptr(raw_data, __cpu); | ||
681 | * | ||
682 | * //zero dead bytes from alignment to avoid stack leak to userspace: | ||
683 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | ||
684 | * entry = (struct ftrace_raw_<call> *)raw_data; | ||
685 | * ent = &entry->ent; | ||
686 | * tracing_generic_entry_update(ent, irq_flags, pc); | ||
687 | * ent->type = event_call->id; | ||
688 | * | ||
689 | * <tstruct> <- do some jobs with dynamic arrays | ||
690 | * | ||
691 | * <assign> <- affect our values | ||
692 | * | ||
693 | * perf_tp_event(event_call->id, __addr, __count, entry, | ||
694 | * __entry_size); <- submit them to perf counter | ||
695 | * | ||
696 | * } | ||
697 | */ | ||
698 | |||
699 | #ifdef CONFIG_EVENT_PROFILE | ||
700 | |||
701 | #undef __perf_addr | ||
702 | #define __perf_addr(a) __addr = (a) | ||
703 | |||
704 | #undef __perf_count | ||
705 | #define __perf_count(c) __count = (c) | ||
706 | |||
707 | #undef TRACE_EVENT | ||
708 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
709 | static void ftrace_profile_##call(proto) \ | ||
710 | { \ | ||
711 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
712 | struct ftrace_event_call *event_call = &event_##call; \ | ||
713 | extern void perf_tp_event(int, u64, u64, void *, int); \ | ||
714 | struct ftrace_raw_##call *entry; \ | ||
715 | u64 __addr = 0, __count = 1; \ | ||
716 | unsigned long irq_flags; \ | ||
717 | struct trace_entry *ent; \ | ||
718 | int __entry_size; \ | ||
719 | int __data_size; \ | ||
720 | char *raw_data; \ | ||
721 | int __cpu; \ | ||
722 | int pc; \ | ||
723 | \ | ||
724 | pc = preempt_count(); \ | ||
725 | \ | ||
726 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
727 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | ||
728 | sizeof(u64)); \ | ||
729 | __entry_size -= sizeof(u32); \ | ||
730 | \ | ||
731 | if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ | ||
732 | "profile buffer not large enough")) \ | ||
733 | return; \ | ||
734 | \ | ||
735 | local_irq_save(irq_flags); \ | ||
736 | __cpu = smp_processor_id(); \ | ||
737 | \ | ||
738 | if (in_nmi()) \ | ||
739 | raw_data = rcu_dereference(trace_profile_buf_nmi); \ | ||
740 | else \ | ||
741 | raw_data = rcu_dereference(trace_profile_buf); \ | ||
742 | \ | ||
743 | if (!raw_data) \ | ||
744 | goto end; \ | ||
745 | \ | ||
746 | raw_data = per_cpu_ptr(raw_data, __cpu); \ | ||
747 | \ | ||
748 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | ||
749 | entry = (struct ftrace_raw_##call *)raw_data; \ | ||
750 | ent = &entry->ent; \ | ||
751 | tracing_generic_entry_update(ent, irq_flags, pc); \ | ||
752 | ent->type = event_call->id; \ | ||
753 | \ | ||
754 | tstruct \ | ||
755 | \ | ||
756 | { assign; } \ | ||
757 | \ | ||
758 | perf_tp_event(event_call->id, __addr, __count, entry, \ | ||
759 | __entry_size); \ | ||
760 | \ | ||
761 | end: \ | ||
762 | local_irq_restore(irq_flags); \ | ||
763 | \ | ||
764 | } | ||
765 | |||
766 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
767 | #endif /* CONFIG_EVENT_PROFILE */ | ||
768 | |||
590 | #undef _TRACE_PROFILE_INIT | 769 | #undef _TRACE_PROFILE_INIT |
591 | 770 | ||