diff options
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 49 |
1 files changed, 23 insertions, 26 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 40dc5e8fe340..19edd7facaa1 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -227,29 +227,18 @@ static notrace enum print_line_t \ | |||
227 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | 227 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
228 | struct trace_event *trace_event) \ | 228 | struct trace_event *trace_event) \ |
229 | { \ | 229 | { \ |
230 | struct ftrace_event_call *event; \ | ||
231 | struct trace_seq *s = &iter->seq; \ | 230 | struct trace_seq *s = &iter->seq; \ |
231 | struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ | ||
232 | struct ftrace_raw_##call *field; \ | 232 | struct ftrace_raw_##call *field; \ |
233 | struct trace_entry *entry; \ | ||
234 | struct trace_seq *p = &iter->tmp_seq; \ | ||
235 | int ret; \ | 233 | int ret; \ |
236 | \ | 234 | \ |
237 | event = container_of(trace_event, struct ftrace_event_call, \ | 235 | field = (typeof(field))iter->ent; \ |
238 | event); \ | ||
239 | \ | ||
240 | entry = iter->ent; \ | ||
241 | \ | ||
242 | if (entry->type != event->event.type) { \ | ||
243 | WARN_ON_ONCE(1); \ | ||
244 | return TRACE_TYPE_UNHANDLED; \ | ||
245 | } \ | ||
246 | \ | ||
247 | field = (typeof(field))entry; \ | ||
248 | \ | 236 | \ |
249 | trace_seq_init(p); \ | 237 | ret = ftrace_raw_output_prep(iter, trace_event); \ |
250 | ret = trace_seq_printf(s, "%s: ", event->name); \ | ||
251 | if (ret) \ | 238 | if (ret) \ |
252 | ret = trace_seq_printf(s, print); \ | 239 | return ret; \ |
240 | \ | ||
241 | ret = trace_seq_printf(s, print); \ | ||
253 | if (!ret) \ | 242 | if (!ret) \ |
254 | return TRACE_TYPE_PARTIAL_LINE; \ | 243 | return TRACE_TYPE_PARTIAL_LINE; \ |
255 | \ | 244 | \ |
@@ -335,7 +324,7 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |||
335 | 324 | ||
336 | #undef DECLARE_EVENT_CLASS | 325 | #undef DECLARE_EVENT_CLASS |
337 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ | 326 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ |
338 | static int notrace \ | 327 | static int notrace __init \ |
339 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | 328 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ |
340 | { \ | 329 | { \ |
341 | struct ftrace_raw_##call field; \ | 330 | struct ftrace_raw_##call field; \ |
@@ -414,7 +403,8 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
414 | * | 403 | * |
415 | * static void ftrace_raw_event_<call>(void *__data, proto) | 404 | * static void ftrace_raw_event_<call>(void *__data, proto) |
416 | * { | 405 | * { |
417 | * struct ftrace_event_call *event_call = __data; | 406 | * struct ftrace_event_file *ftrace_file = __data; |
407 | * struct ftrace_event_call *event_call = ftrace_file->event_call; | ||
418 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | 408 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; |
419 | * struct ring_buffer_event *event; | 409 | * struct ring_buffer_event *event; |
420 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 410 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
@@ -423,12 +413,16 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
423 | * int __data_size; | 413 | * int __data_size; |
424 | * int pc; | 414 | * int pc; |
425 | * | 415 | * |
416 | * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, | ||
417 | * &ftrace_file->flags)) | ||
418 | * return; | ||
419 | * | ||
426 | * local_save_flags(irq_flags); | 420 | * local_save_flags(irq_flags); |
427 | * pc = preempt_count(); | 421 | * pc = preempt_count(); |
428 | * | 422 | * |
429 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | 423 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); |
430 | * | 424 | * |
431 | * event = trace_current_buffer_lock_reserve(&buffer, | 425 | * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
432 | * event_<call>->event.type, | 426 | * event_<call>->event.type, |
433 | * sizeof(*entry) + __data_size, | 427 | * sizeof(*entry) + __data_size, |
434 | * irq_flags, pc); | 428 | * irq_flags, pc); |
@@ -440,7 +434,7 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
440 | * __array macros. | 434 | * __array macros. |
441 | * | 435 | * |
442 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) | 436 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) |
443 | * trace_current_buffer_unlock_commit(buffer, | 437 | * trace_nowake_buffer_unlock_commit(buffer, |
444 | * event, irq_flags, pc); | 438 | * event, irq_flags, pc); |
445 | * } | 439 | * } |
446 | * | 440 | * |
@@ -518,7 +512,8 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
518 | static notrace void \ | 512 | static notrace void \ |
519 | ftrace_raw_event_##call(void *__data, proto) \ | 513 | ftrace_raw_event_##call(void *__data, proto) \ |
520 | { \ | 514 | { \ |
521 | struct ftrace_event_call *event_call = __data; \ | 515 | struct ftrace_event_file *ftrace_file = __data; \ |
516 | struct ftrace_event_call *event_call = ftrace_file->event_call; \ | ||
522 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 517 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
523 | struct ring_buffer_event *event; \ | 518 | struct ring_buffer_event *event; \ |
524 | struct ftrace_raw_##call *entry; \ | 519 | struct ftrace_raw_##call *entry; \ |
@@ -527,12 +522,16 @@ ftrace_raw_event_##call(void *__data, proto) \ | |||
527 | int __data_size; \ | 522 | int __data_size; \ |
528 | int pc; \ | 523 | int pc; \ |
529 | \ | 524 | \ |
525 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \ | ||
526 | &ftrace_file->flags)) \ | ||
527 | return; \ | ||
528 | \ | ||
530 | local_save_flags(irq_flags); \ | 529 | local_save_flags(irq_flags); \ |
531 | pc = preempt_count(); \ | 530 | pc = preempt_count(); \ |
532 | \ | 531 | \ |
533 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 532 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
534 | \ | 533 | \ |
535 | event = trace_current_buffer_lock_reserve(&buffer, \ | 534 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \ |
536 | event_call->event.type, \ | 535 | event_call->event.type, \ |
537 | sizeof(*entry) + __data_size, \ | 536 | sizeof(*entry) + __data_size, \ |
538 | irq_flags, pc); \ | 537 | irq_flags, pc); \ |
@@ -581,7 +580,7 @@ static inline void ftrace_test_probe_##call(void) \ | |||
581 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 580 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
582 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ | 581 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ |
583 | static const char print_fmt_##call[] = print; \ | 582 | static const char print_fmt_##call[] = print; \ |
584 | static struct ftrace_event_class __used event_class_##call = { \ | 583 | static struct ftrace_event_class __used __refdata event_class_##call = { \ |
585 | .system = __stringify(TRACE_SYSTEM), \ | 584 | .system = __stringify(TRACE_SYSTEM), \ |
586 | .define_fields = ftrace_define_fields_##call, \ | 585 | .define_fields = ftrace_define_fields_##call, \ |
587 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | 586 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ |
@@ -705,5 +704,3 @@ static inline void perf_test_probe_##call(void) \ | |||
705 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 704 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
706 | #endif /* CONFIG_PERF_EVENTS */ | 705 | #endif /* CONFIG_PERF_EVENTS */ |
707 | 706 | ||
708 | #undef _TRACE_PROFILE_INIT | ||
709 | |||