diff options
Diffstat (limited to 'include/trace/ftrace.h')
| -rw-r--r-- | include/trace/ftrace.h | 276 |
1 files changed, 220 insertions, 56 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1867553c61e5..308bafd93325 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -21,11 +21,14 @@ | |||
| 21 | #undef __field | 21 | #undef __field |
| 22 | #define __field(type, item) type item; | 22 | #define __field(type, item) type item; |
| 23 | 23 | ||
| 24 | #undef __field_ext | ||
| 25 | #define __field_ext(type, item, filter_type) type item; | ||
| 26 | |||
| 24 | #undef __array | 27 | #undef __array |
| 25 | #define __array(type, item, len) type item[len]; | 28 | #define __array(type, item, len) type item[len]; |
| 26 | 29 | ||
| 27 | #undef __dynamic_array | 30 | #undef __dynamic_array |
| 28 | #define __dynamic_array(type, item, len) unsigned short __data_loc_##item; | 31 | #define __dynamic_array(type, item, len) u32 __data_loc_##item; |
| 29 | 32 | ||
| 30 | #undef __string | 33 | #undef __string |
| 31 | #define __string(item, src) __dynamic_array(char, item, -1) | 34 | #define __string(item, src) __dynamic_array(char, item, -1) |
| @@ -42,6 +45,16 @@ | |||
| 42 | }; \ | 45 | }; \ |
| 43 | static struct ftrace_event_call event_##name | 46 | static struct ftrace_event_call event_##name |
| 44 | 47 | ||
| 48 | #undef __cpparg | ||
| 49 | #define __cpparg(arg...) arg | ||
| 50 | |||
| 51 | /* Callbacks are meaningless to ftrace. */ | ||
| 52 | #undef TRACE_EVENT_FN | ||
| 53 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | ||
| 54 | assign, print, reg, unreg) \ | ||
| 55 | TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \ | ||
| 56 | __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \ | ||
| 57 | |||
| 45 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 58 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
| 46 | 59 | ||
| 47 | 60 | ||
| @@ -51,23 +64,27 @@ | |||
| 51 | * Include the following: | 64 | * Include the following: |
| 52 | * | 65 | * |
| 53 | * struct ftrace_data_offsets_<call> { | 66 | * struct ftrace_data_offsets_<call> { |
| 54 | * int <item1>; | 67 | * u32 <item1>; |
| 55 | * int <item2>; | 68 | * u32 <item2>; |
| 56 | * [...] | 69 | * [...] |
| 57 | * }; | 70 | * }; |
| 58 | * | 71 | * |
| 59 | * The __dynamic_array() macro will create each int <item>, this is | 72 | * The __dynamic_array() macro will create each u32 <item>, this is |
| 60 | * to keep the offset of each array from the beginning of the event. | 73 | * to keep the offset of each array from the beginning of the event. |
| 74 | * The size of an array is also encoded, in the higher 16 bits of <item>. | ||
| 61 | */ | 75 | */ |
| 62 | 76 | ||
| 63 | #undef __field | 77 | #undef __field |
| 64 | #define __field(type, item); | 78 | #define __field(type, item) |
| 79 | |||
| 80 | #undef __field_ext | ||
| 81 | #define __field_ext(type, item, filter_type) | ||
| 65 | 82 | ||
| 66 | #undef __array | 83 | #undef __array |
| 67 | #define __array(type, item, len) | 84 | #define __array(type, item, len) |
| 68 | 85 | ||
| 69 | #undef __dynamic_array | 86 | #undef __dynamic_array |
| 70 | #define __dynamic_array(type, item, len) int item; | 87 | #define __dynamic_array(type, item, len) u32 item; |
| 71 | 88 | ||
| 72 | #undef __string | 89 | #undef __string |
| 73 | #define __string(item, src) __dynamic_array(char, item, -1) | 90 | #define __string(item, src) __dynamic_array(char, item, -1) |
| @@ -109,6 +126,9 @@ | |||
| 109 | if (!ret) \ | 126 | if (!ret) \ |
| 110 | return 0; | 127 | return 0; |
| 111 | 128 | ||
| 129 | #undef __field_ext | ||
| 130 | #define __field_ext(type, item, filter_type) __field(type, item) | ||
| 131 | |||
| 112 | #undef __array | 132 | #undef __array |
| 113 | #define __array(type, item, len) \ | 133 | #define __array(type, item, len) \ |
| 114 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | 134 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ |
| @@ -120,7 +140,7 @@ | |||
| 120 | 140 | ||
| 121 | #undef __dynamic_array | 141 | #undef __dynamic_array |
| 122 | #define __dynamic_array(type, item, len) \ | 142 | #define __dynamic_array(type, item, len) \ |
| 123 | ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ | 143 | ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ |
| 124 | "offset:%u;\tsize:%u;\n", \ | 144 | "offset:%u;\tsize:%u;\n", \ |
| 125 | (unsigned int)offsetof(typeof(field), \ | 145 | (unsigned int)offsetof(typeof(field), \ |
| 126 | __data_loc_##item), \ | 146 | __data_loc_##item), \ |
| @@ -144,10 +164,14 @@ | |||
| 144 | #undef TP_fast_assign | 164 | #undef TP_fast_assign |
| 145 | #define TP_fast_assign(args...) args | 165 | #define TP_fast_assign(args...) args |
| 146 | 166 | ||
| 167 | #undef TP_perf_assign | ||
| 168 | #define TP_perf_assign(args...) | ||
| 169 | |||
| 147 | #undef TRACE_EVENT | 170 | #undef TRACE_EVENT |
| 148 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 171 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
| 149 | static int \ | 172 | static int \ |
| 150 | ftrace_format_##call(struct trace_seq *s) \ | 173 | ftrace_format_##call(struct ftrace_event_call *unused, \ |
| 174 | struct trace_seq *s) \ | ||
| 151 | { \ | 175 | { \ |
| 152 | struct ftrace_raw_##call field __attribute__((unused)); \ | 176 | struct ftrace_raw_##call field __attribute__((unused)); \ |
| 153 | int ret = 0; \ | 177 | int ret = 0; \ |
| @@ -207,7 +231,7 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
| 207 | 231 | ||
| 208 | #undef __get_dynamic_array | 232 | #undef __get_dynamic_array |
| 209 | #define __get_dynamic_array(field) \ | 233 | #define __get_dynamic_array(field) \ |
| 210 | ((void *)__entry + __entry->__data_loc_##field) | 234 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) |
| 211 | 235 | ||
| 212 | #undef __get_str | 236 | #undef __get_str |
| 213 | #define __get_str(field) (char *)__get_dynamic_array(field) | 237 | #define __get_str(field) (char *)__get_dynamic_array(field) |
| @@ -260,28 +284,33 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
| 260 | 284 | ||
| 261 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 285 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
| 262 | 286 | ||
| 263 | #undef __field | 287 | #undef __field_ext |
| 264 | #define __field(type, item) \ | 288 | #define __field_ext(type, item, filter_type) \ |
| 265 | ret = trace_define_field(event_call, #type, #item, \ | 289 | ret = trace_define_field(event_call, #type, #item, \ |
| 266 | offsetof(typeof(field), item), \ | 290 | offsetof(typeof(field), item), \ |
| 267 | sizeof(field.item), is_signed_type(type)); \ | 291 | sizeof(field.item), \ |
| 292 | is_signed_type(type), filter_type); \ | ||
| 268 | if (ret) \ | 293 | if (ret) \ |
| 269 | return ret; | 294 | return ret; |
| 270 | 295 | ||
| 296 | #undef __field | ||
| 297 | #define __field(type, item) __field_ext(type, item, FILTER_OTHER) | ||
| 298 | |||
| 271 | #undef __array | 299 | #undef __array |
| 272 | #define __array(type, item, len) \ | 300 | #define __array(type, item, len) \ |
| 273 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 301 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
| 274 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 302 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
| 275 | offsetof(typeof(field), item), \ | 303 | offsetof(typeof(field), item), \ |
| 276 | sizeof(field.item), 0); \ | 304 | sizeof(field.item), 0, FILTER_OTHER); \ |
| 277 | if (ret) \ | 305 | if (ret) \ |
| 278 | return ret; | 306 | return ret; |
| 279 | 307 | ||
| 280 | #undef __dynamic_array | 308 | #undef __dynamic_array |
| 281 | #define __dynamic_array(type, item, len) \ | 309 | #define __dynamic_array(type, item, len) \ |
| 282 | ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ | 310 | ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ |
| 283 | offsetof(typeof(field), __data_loc_##item), \ | 311 | offsetof(typeof(field), __data_loc_##item), \ |
| 284 | sizeof(field.__data_loc_##item), 0); | 312 | sizeof(field.__data_loc_##item), 0, \ |
| 313 | FILTER_OTHER); | ||
| 285 | 314 | ||
| 286 | #undef __string | 315 | #undef __string |
| 287 | #define __string(item, src) __dynamic_array(char, item, -1) | 316 | #define __string(item, src) __dynamic_array(char, item, -1) |
| @@ -289,17 +318,14 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
| 289 | #undef TRACE_EVENT | 318 | #undef TRACE_EVENT |
| 290 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 319 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
| 291 | int \ | 320 | int \ |
| 292 | ftrace_define_fields_##call(void) \ | 321 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ |
| 293 | { \ | 322 | { \ |
| 294 | struct ftrace_raw_##call field; \ | 323 | struct ftrace_raw_##call field; \ |
| 295 | struct ftrace_event_call *event_call = &event_##call; \ | ||
| 296 | int ret; \ | 324 | int ret; \ |
| 297 | \ | 325 | \ |
| 298 | __common_field(int, type, 1); \ | 326 | ret = trace_define_common_fields(event_call); \ |
| 299 | __common_field(unsigned char, flags, 0); \ | 327 | if (ret) \ |
| 300 | __common_field(unsigned char, preempt_count, 0); \ | 328 | return ret; \ |
| 301 | __common_field(int, pid, 1); \ | ||
| 302 | __common_field(int, tgid, 1); \ | ||
| 303 | \ | 329 | \ |
| 304 | tstruct; \ | 330 | tstruct; \ |
| 305 | \ | 331 | \ |
| @@ -318,6 +344,9 @@ ftrace_define_fields_##call(void) \ | |||
| 318 | #undef __field | 344 | #undef __field |
| 319 | #define __field(type, item) | 345 | #define __field(type, item) |
| 320 | 346 | ||
| 347 | #undef __field_ext | ||
| 348 | #define __field_ext(type, item, filter_type) | ||
| 349 | |||
| 321 | #undef __array | 350 | #undef __array |
| 322 | #define __array(type, item, len) | 351 | #define __array(type, item, len) |
| 323 | 352 | ||
| @@ -325,6 +354,7 @@ ftrace_define_fields_##call(void) \ | |||
| 325 | #define __dynamic_array(type, item, len) \ | 354 | #define __dynamic_array(type, item, len) \ |
| 326 | __data_offsets->item = __data_size + \ | 355 | __data_offsets->item = __data_size + \ |
| 327 | offsetof(typeof(*entry), __data); \ | 356 | offsetof(typeof(*entry), __data); \ |
| 357 | __data_offsets->item |= (len * sizeof(type)) << 16; \ | ||
| 328 | __data_size += (len) * sizeof(type); | 358 | __data_size += (len) * sizeof(type); |
| 329 | 359 | ||
| 330 | #undef __string | 360 | #undef __string |
| @@ -345,6 +375,56 @@ static inline int ftrace_get_offsets_##call( \ | |||
| 345 | 375 | ||
| 346 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 376 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
| 347 | 377 | ||
| 378 | #ifdef CONFIG_EVENT_PROFILE | ||
| 379 | |||
| 380 | /* | ||
| 381 | * Generate the functions needed for tracepoint perf_counter support. | ||
| 382 | * | ||
| 383 | * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later | ||
| 384 | * | ||
| 385 | * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) | ||
| 386 | * { | ||
| 387 | * int ret = 0; | ||
| 388 | * | ||
| 389 | * if (!atomic_inc_return(&event_call->profile_count)) | ||
| 390 | * ret = register_trace_<call>(ftrace_profile_<call>); | ||
| 391 | * | ||
| 392 | * return ret; | ||
| 393 | * } | ||
| 394 | * | ||
| 395 | * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) | ||
| 396 | * { | ||
| 397 | * if (atomic_add_negative(-1, &event->call->profile_count)) | ||
| 398 | * unregister_trace_<call>(ftrace_profile_<call>); | ||
| 399 | * } | ||
| 400 | * | ||
| 401 | */ | ||
| 402 | |||
| 403 | #undef TRACE_EVENT | ||
| 404 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
| 405 | \ | ||
| 406 | static void ftrace_profile_##call(proto); \ | ||
| 407 | \ | ||
| 408 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
| 409 | { \ | ||
| 410 | int ret = 0; \ | ||
| 411 | \ | ||
| 412 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
| 413 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
| 414 | \ | ||
| 415 | return ret; \ | ||
| 416 | } \ | ||
| 417 | \ | ||
| 418 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
| 419 | { \ | ||
| 420 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
| 421 | unregister_trace_##call(ftrace_profile_##call); \ | ||
| 422 | } | ||
| 423 | |||
| 424 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
| 425 | |||
| 426 | #endif | ||
| 427 | |||
| 348 | /* | 428 | /* |
| 349 | * Stage 4 of the trace events. | 429 | * Stage 4 of the trace events. |
| 350 | * | 430 | * |
| @@ -380,13 +460,15 @@ static inline int ftrace_get_offsets_##call( \ | |||
| 380 | * { | 460 | * { |
| 381 | * struct ring_buffer_event *event; | 461 | * struct ring_buffer_event *event; |
| 382 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 462 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
| 463 | * struct ring_buffer *buffer; | ||
| 383 | * unsigned long irq_flags; | 464 | * unsigned long irq_flags; |
| 384 | * int pc; | 465 | * int pc; |
| 385 | * | 466 | * |
| 386 | * local_save_flags(irq_flags); | 467 | * local_save_flags(irq_flags); |
| 387 | * pc = preempt_count(); | 468 | * pc = preempt_count(); |
| 388 | * | 469 | * |
| 389 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | 470 | * event = trace_current_buffer_lock_reserve(&buffer, |
| 471 | * event_<call>.id, | ||
| 390 | * sizeof(struct ftrace_raw_<call>), | 472 | * sizeof(struct ftrace_raw_<call>), |
| 391 | * irq_flags, pc); | 473 | * irq_flags, pc); |
| 392 | * if (!event) | 474 | * if (!event) |
| @@ -396,7 +478,7 @@ static inline int ftrace_get_offsets_##call( \ | |||
| 396 | * <assign>; <-- Here we assign the entries by the __field and | 478 | * <assign>; <-- Here we assign the entries by the __field and |
| 397 | * __array macros. | 479 | * __array macros. |
| 398 | * | 480 | * |
| 399 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | 481 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); |
| 400 | * } | 482 | * } |
| 401 | * | 483 | * |
| 402 | * static int ftrace_raw_reg_event_<call>(void) | 484 | * static int ftrace_raw_reg_event_<call>(void) |
| @@ -447,28 +529,6 @@ static inline int ftrace_get_offsets_##call( \ | |||
| 447 | #define TP_FMT(fmt, args...) fmt "\n", ##args | 529 | #define TP_FMT(fmt, args...) fmt "\n", ##args |
| 448 | 530 | ||
| 449 | #ifdef CONFIG_EVENT_PROFILE | 531 | #ifdef CONFIG_EVENT_PROFILE |
| 450 | #define _TRACE_PROFILE(call, proto, args) \ | ||
| 451 | static void ftrace_profile_##call(proto) \ | ||
| 452 | { \ | ||
| 453 | extern void perf_tpcounter_event(int); \ | ||
| 454 | perf_tpcounter_event(event_##call.id); \ | ||
| 455 | } \ | ||
| 456 | \ | ||
| 457 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
| 458 | { \ | ||
| 459 | int ret = 0; \ | ||
| 460 | \ | ||
| 461 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
| 462 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
| 463 | \ | ||
| 464 | return ret; \ | ||
| 465 | } \ | ||
| 466 | \ | ||
| 467 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
| 468 | { \ | ||
| 469 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
| 470 | unregister_trace_##call(ftrace_profile_##call); \ | ||
| 471 | } | ||
| 472 | 532 | ||
| 473 | #define _TRACE_PROFILE_INIT(call) \ | 533 | #define _TRACE_PROFILE_INIT(call) \ |
| 474 | .profile_count = ATOMIC_INIT(-1), \ | 534 | .profile_count = ATOMIC_INIT(-1), \ |
| @@ -476,7 +536,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
| 476 | .profile_disable = ftrace_profile_disable_##call, | 536 | .profile_disable = ftrace_profile_disable_##call, |
| 477 | 537 | ||
| 478 | #else | 538 | #else |
| 479 | #define _TRACE_PROFILE(call, proto, args) | ||
| 480 | #define _TRACE_PROFILE_INIT(call) | 539 | #define _TRACE_PROFILE_INIT(call) |
| 481 | #endif | 540 | #endif |
| 482 | 541 | ||
| @@ -502,7 +561,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
| 502 | 561 | ||
| 503 | #undef TRACE_EVENT | 562 | #undef TRACE_EVENT |
| 504 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 563 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
| 505 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
| 506 | \ | 564 | \ |
| 507 | static struct ftrace_event_call event_##call; \ | 565 | static struct ftrace_event_call event_##call; \ |
| 508 | \ | 566 | \ |
| @@ -512,6 +570,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
| 512 | struct ftrace_event_call *event_call = &event_##call; \ | 570 | struct ftrace_event_call *event_call = &event_##call; \ |
| 513 | struct ring_buffer_event *event; \ | 571 | struct ring_buffer_event *event; \ |
| 514 | struct ftrace_raw_##call *entry; \ | 572 | struct ftrace_raw_##call *entry; \ |
| 573 | struct ring_buffer *buffer; \ | ||
| 515 | unsigned long irq_flags; \ | 574 | unsigned long irq_flags; \ |
| 516 | int __data_size; \ | 575 | int __data_size; \ |
| 517 | int pc; \ | 576 | int pc; \ |
| @@ -521,7 +580,8 @@ static void ftrace_raw_event_##call(proto) \ | |||
| 521 | \ | 580 | \ |
| 522 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 581 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
| 523 | \ | 582 | \ |
| 524 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | 583 | event = trace_current_buffer_lock_reserve(&buffer, \ |
| 584 | event_##call.id, \ | ||
| 525 | sizeof(*entry) + __data_size, \ | 585 | sizeof(*entry) + __data_size, \ |
| 526 | irq_flags, pc); \ | 586 | irq_flags, pc); \ |
| 527 | if (!event) \ | 587 | if (!event) \ |
| @@ -533,11 +593,12 @@ static void ftrace_raw_event_##call(proto) \ | |||
| 533 | \ | 593 | \ |
| 534 | { assign; } \ | 594 | { assign; } \ |
| 535 | \ | 595 | \ |
| 536 | if (!filter_current_check_discard(event_call, entry, event)) \ | 596 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ |
| 537 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | 597 | trace_nowake_buffer_unlock_commit(buffer, \ |
| 598 | event, irq_flags, pc); \ | ||
| 538 | } \ | 599 | } \ |
| 539 | \ | 600 | \ |
| 540 | static int ftrace_raw_reg_event_##call(void) \ | 601 | static int ftrace_raw_reg_event_##call(void *ptr) \ |
| 541 | { \ | 602 | { \ |
| 542 | int ret; \ | 603 | int ret; \ |
| 543 | \ | 604 | \ |
| @@ -548,7 +609,7 @@ static int ftrace_raw_reg_event_##call(void) \ | |||
| 548 | return ret; \ | 609 | return ret; \ |
| 549 | } \ | 610 | } \ |
| 550 | \ | 611 | \ |
| 551 | static void ftrace_raw_unreg_event_##call(void) \ | 612 | static void ftrace_raw_unreg_event_##call(void *ptr) \ |
| 552 | { \ | 613 | { \ |
| 553 | unregister_trace_##call(ftrace_raw_event_##call); \ | 614 | unregister_trace_##call(ftrace_raw_event_##call); \ |
| 554 | } \ | 615 | } \ |
| @@ -566,7 +627,6 @@ static int ftrace_raw_init_event_##call(void) \ | |||
| 566 | return -ENODEV; \ | 627 | return -ENODEV; \ |
| 567 | event_##call.id = id; \ | 628 | event_##call.id = id; \ |
| 568 | INIT_LIST_HEAD(&event_##call.fields); \ | 629 | INIT_LIST_HEAD(&event_##call.fields); \ |
| 569 | init_preds(&event_##call); \ | ||
| 570 | return 0; \ | 630 | return 0; \ |
| 571 | } \ | 631 | } \ |
| 572 | \ | 632 | \ |
| @@ -586,6 +646,110 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
| 586 | 646 | ||
| 587 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 647 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
| 588 | 648 | ||
| 589 | #undef _TRACE_PROFILE | 649 | /* |
| 650 | * Define the insertion callback to profile events | ||
| 651 | * | ||
| 652 | * The job is very similar to ftrace_raw_event_<call> except that we don't | ||
| 653 | * insert in the ring buffer but in a perf counter. | ||
| 654 | * | ||
| 655 | * static void ftrace_profile_<call>(proto) | ||
| 656 | * { | ||
| 657 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
| 658 | * struct ftrace_event_call *event_call = &event_<call>; | ||
| 659 | * extern void perf_tpcounter_event(int, u64, u64, void *, int); | ||
| 660 | * struct ftrace_raw_##call *entry; | ||
| 661 | * u64 __addr = 0, __count = 1; | ||
| 662 | * unsigned long irq_flags; | ||
| 663 | * int __entry_size; | ||
| 664 | * int __data_size; | ||
| 665 | * int pc; | ||
| 666 | * | ||
| 667 | * local_save_flags(irq_flags); | ||
| 668 | * pc = preempt_count(); | ||
| 669 | * | ||
| 670 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
| 671 | * | ||
| 672 | * // Below we want to get the aligned size by taking into account | ||
| 673 | * // the u32 field that will later store the buffer size | ||
| 674 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | ||
| 675 | * sizeof(u64)); | ||
| 676 | * __entry_size -= sizeof(u32); | ||
| 677 | * | ||
| 678 | * do { | ||
| 679 | * char raw_data[__entry_size]; <- allocate our sample in the stack | ||
| 680 | * struct trace_entry *ent; | ||
| 681 | * | ||
| 682 | * zero dead bytes from alignment to avoid stack leak to userspace: | ||
| 683 | * | ||
| 684 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | ||
| 685 | * entry = (struct ftrace_raw_<call> *)raw_data; | ||
| 686 | * ent = &entry->ent; | ||
| 687 | * tracing_generic_entry_update(ent, irq_flags, pc); | ||
| 688 | * ent->type = event_call->id; | ||
| 689 | * | ||
| 690 | * <tstruct> <- do some jobs with dynamic arrays | ||
| 691 | * | ||
| 692 | * <assign> <- affect our values | ||
| 693 | * | ||
| 694 | * perf_tpcounter_event(event_call->id, __addr, __count, entry, | ||
| 695 | * __entry_size); <- submit them to perf counter | ||
| 696 | * } while (0); | ||
| 697 | * | ||
| 698 | * } | ||
| 699 | */ | ||
| 700 | |||
| 701 | #ifdef CONFIG_EVENT_PROFILE | ||
| 702 | |||
| 703 | #undef __perf_addr | ||
| 704 | #define __perf_addr(a) __addr = (a) | ||
| 705 | |||
| 706 | #undef __perf_count | ||
| 707 | #define __perf_count(c) __count = (c) | ||
| 708 | |||
| 709 | #undef TRACE_EVENT | ||
| 710 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
| 711 | static void ftrace_profile_##call(proto) \ | ||
| 712 | { \ | ||
| 713 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
| 714 | struct ftrace_event_call *event_call = &event_##call; \ | ||
| 715 | extern void perf_tpcounter_event(int, u64, u64, void *, int); \ | ||
| 716 | struct ftrace_raw_##call *entry; \ | ||
| 717 | u64 __addr = 0, __count = 1; \ | ||
| 718 | unsigned long irq_flags; \ | ||
| 719 | int __entry_size; \ | ||
| 720 | int __data_size; \ | ||
| 721 | int pc; \ | ||
| 722 | \ | ||
| 723 | local_save_flags(irq_flags); \ | ||
| 724 | pc = preempt_count(); \ | ||
| 725 | \ | ||
| 726 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
| 727 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | ||
| 728 | sizeof(u64)); \ | ||
| 729 | __entry_size -= sizeof(u32); \ | ||
| 730 | \ | ||
| 731 | do { \ | ||
| 732 | char raw_data[__entry_size]; \ | ||
| 733 | struct trace_entry *ent; \ | ||
| 734 | \ | ||
| 735 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | ||
| 736 | entry = (struct ftrace_raw_##call *)raw_data; \ | ||
| 737 | ent = &entry->ent; \ | ||
| 738 | tracing_generic_entry_update(ent, irq_flags, pc); \ | ||
| 739 | ent->type = event_call->id; \ | ||
| 740 | \ | ||
| 741 | tstruct \ | ||
| 742 | \ | ||
| 743 | { assign; } \ | ||
| 744 | \ | ||
| 745 | perf_tpcounter_event(event_call->id, __addr, __count, entry,\ | ||
| 746 | __entry_size); \ | ||
| 747 | } while (0); \ | ||
| 748 | \ | ||
| 749 | } | ||
| 750 | |||
| 751 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
| 752 | #endif /* CONFIG_EVENT_PROFILE */ | ||
| 753 | |||
| 590 | #undef _TRACE_PROFILE_INIT | 754 | #undef _TRACE_PROFILE_INIT |
| 591 | 755 | ||
