diff options
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/ftrace.h | 59 | ||||
-rw-r--r-- | include/trace/syscall.h | 11 |
2 files changed, 47 insertions, 23 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index c9bbcab95fbe..4945d1c99864 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -402,12 +402,12 @@ static inline int ftrace_get_offsets_##call( \ | |||
402 | \ | 402 | \ |
403 | static void ftrace_profile_##call(proto); \ | 403 | static void ftrace_profile_##call(proto); \ |
404 | \ | 404 | \ |
405 | static int ftrace_profile_enable_##call(void) \ | 405 | static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\ |
406 | { \ | 406 | { \ |
407 | return register_trace_##call(ftrace_profile_##call); \ | 407 | return register_trace_##call(ftrace_profile_##call); \ |
408 | } \ | 408 | } \ |
409 | \ | 409 | \ |
410 | static void ftrace_profile_disable_##call(void) \ | 410 | static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\ |
411 | { \ | 411 | { \ |
412 | unregister_trace_##call(ftrace_profile_##call); \ | 412 | unregister_trace_##call(ftrace_profile_##call); \ |
413 | } | 413 | } |
@@ -426,7 +426,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
426 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | 426 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); |
427 | * } | 427 | * } |
428 | * | 428 | * |
429 | * static int ftrace_reg_event_<call>(void) | 429 | * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) |
430 | * { | 430 | * { |
431 | * int ret; | 431 | * int ret; |
432 | * | 432 | * |
@@ -437,7 +437,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
437 | * return ret; | 437 | * return ret; |
438 | * } | 438 | * } |
439 | * | 439 | * |
440 | * static void ftrace_unreg_event_<call>(void) | 440 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) |
441 | * { | 441 | * { |
442 | * unregister_trace_<call>(ftrace_event_<call>); | 442 | * unregister_trace_<call>(ftrace_event_<call>); |
443 | * } | 443 | * } |
@@ -472,7 +472,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
472 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); | 472 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); |
473 | * } | 473 | * } |
474 | * | 474 | * |
475 | * static int ftrace_raw_reg_event_<call>(void) | 475 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) |
476 | * { | 476 | * { |
477 | * int ret; | 477 | * int ret; |
478 | * | 478 | * |
@@ -483,7 +483,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
483 | * return ret; | 483 | * return ret; |
484 | * } | 484 | * } |
485 | * | 485 | * |
486 | * static void ftrace_unreg_event_<call>(void) | 486 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) |
487 | * { | 487 | * { |
488 | * unregister_trace_<call>(ftrace_raw_event_<call>); | 488 | * unregister_trace_<call>(ftrace_raw_event_<call>); |
489 | * } | 489 | * } |
@@ -492,7 +492,7 @@ static void ftrace_profile_disable_##call(void) \ | |||
492 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 492 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
493 | * }; | 493 | * }; |
494 | * | 494 | * |
495 | * static int ftrace_raw_init_event_<call>(void) | 495 | * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused) |
496 | * { | 496 | * { |
497 | * int id; | 497 | * int id; |
498 | * | 498 | * |
@@ -589,7 +589,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
589 | event, irq_flags, pc); \ | 589 | event, irq_flags, pc); \ |
590 | } \ | 590 | } \ |
591 | \ | 591 | \ |
592 | static int ftrace_raw_reg_event_##call(void *ptr) \ | 592 | static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ |
593 | { \ | 593 | { \ |
594 | int ret; \ | 594 | int ret; \ |
595 | \ | 595 | \ |
@@ -600,7 +600,7 @@ static int ftrace_raw_reg_event_##call(void *ptr) \ | |||
600 | return ret; \ | 600 | return ret; \ |
601 | } \ | 601 | } \ |
602 | \ | 602 | \ |
603 | static void ftrace_raw_unreg_event_##call(void *ptr) \ | 603 | static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ |
604 | { \ | 604 | { \ |
605 | unregister_trace_##call(ftrace_raw_event_##call); \ | 605 | unregister_trace_##call(ftrace_raw_event_##call); \ |
606 | } \ | 606 | } \ |
@@ -609,7 +609,7 @@ static struct trace_event ftrace_event_type_##call = { \ | |||
609 | .trace = ftrace_raw_output_##call, \ | 609 | .trace = ftrace_raw_output_##call, \ |
610 | }; \ | 610 | }; \ |
611 | \ | 611 | \ |
612 | static int ftrace_raw_init_event_##call(void) \ | 612 | static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\ |
613 | { \ | 613 | { \ |
614 | int id; \ | 614 | int id; \ |
615 | \ | 615 | \ |
@@ -649,6 +649,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
649 | * struct ftrace_event_call *event_call = &event_<call>; | 649 | * struct ftrace_event_call *event_call = &event_<call>; |
650 | * extern void perf_tp_event(int, u64, u64, void *, int); | 650 | * extern void perf_tp_event(int, u64, u64, void *, int); |
651 | * struct ftrace_raw_##call *entry; | 651 | * struct ftrace_raw_##call *entry; |
652 | * struct perf_trace_buf *trace_buf; | ||
652 | * u64 __addr = 0, __count = 1; | 653 | * u64 __addr = 0, __count = 1; |
653 | * unsigned long irq_flags; | 654 | * unsigned long irq_flags; |
654 | * struct trace_entry *ent; | 655 | * struct trace_entry *ent; |
@@ -673,14 +674,25 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
673 | * __cpu = smp_processor_id(); | 674 | * __cpu = smp_processor_id(); |
674 | * | 675 | * |
675 | * if (in_nmi()) | 676 | * if (in_nmi()) |
676 | * raw_data = rcu_dereference(trace_profile_buf_nmi); | 677 | * trace_buf = rcu_dereference(perf_trace_buf_nmi); |
677 | * else | 678 | * else |
678 | * raw_data = rcu_dereference(trace_profile_buf); | 679 | * trace_buf = rcu_dereference(perf_trace_buf); |
679 | * | 680 | * |
680 | * if (!raw_data) | 681 | * if (!trace_buf) |
681 | * goto end; | 682 | * goto end; |
682 | * | 683 | * |
683 | * raw_data = per_cpu_ptr(raw_data, __cpu); | 684 | * trace_buf = per_cpu_ptr(trace_buf, __cpu); |
685 | * | ||
686 | * // Avoid recursion from perf that could mess up the buffer | ||
687 | * if (trace_buf->recursion++) | ||
688 | * goto end_recursion; | ||
689 | * | ||
690 | * raw_data = trace_buf->buf; | ||
691 | * | ||
692 | * // Make recursion update visible before entering perf_tp_event | ||
693 | * // so that we protect from perf recursions. | ||
694 | * | ||
695 | * barrier(); | ||
684 | * | 696 | * |
685 | * //zero dead bytes from alignment to avoid stack leak to userspace: | 697 | * //zero dead bytes from alignment to avoid stack leak to userspace: |
686 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | 698 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; |
@@ -713,8 +725,9 @@ static void ftrace_profile_##call(proto) \ | |||
713 | { \ | 725 | { \ |
714 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 726 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
715 | struct ftrace_event_call *event_call = &event_##call; \ | 727 | struct ftrace_event_call *event_call = &event_##call; \ |
716 | extern void perf_tp_event(int, u64, u64, void *, int); \ | 728 | extern void perf_tp_event(int, u64, u64, void *, int); \ |
717 | struct ftrace_raw_##call *entry; \ | 729 | struct ftrace_raw_##call *entry; \ |
730 | struct perf_trace_buf *trace_buf; \ | ||
718 | u64 __addr = 0, __count = 1; \ | 731 | u64 __addr = 0, __count = 1; \ |
719 | unsigned long irq_flags; \ | 732 | unsigned long irq_flags; \ |
720 | struct trace_entry *ent; \ | 733 | struct trace_entry *ent; \ |
@@ -739,14 +752,20 @@ static void ftrace_profile_##call(proto) \ | |||
739 | __cpu = smp_processor_id(); \ | 752 | __cpu = smp_processor_id(); \ |
740 | \ | 753 | \ |
741 | if (in_nmi()) \ | 754 | if (in_nmi()) \ |
742 | raw_data = rcu_dereference(trace_profile_buf_nmi); \ | 755 | trace_buf = rcu_dereference(perf_trace_buf_nmi); \ |
743 | else \ | 756 | else \ |
744 | raw_data = rcu_dereference(trace_profile_buf); \ | 757 | trace_buf = rcu_dereference(perf_trace_buf); \ |
745 | \ | 758 | \ |
746 | if (!raw_data) \ | 759 | if (!trace_buf) \ |
747 | goto end; \ | 760 | goto end; \ |
748 | \ | 761 | \ |
749 | raw_data = per_cpu_ptr(raw_data, __cpu); \ | 762 | trace_buf = per_cpu_ptr(trace_buf, __cpu); \ |
763 | if (trace_buf->recursion++) \ | ||
764 | goto end_recursion; \ | ||
765 | \ | ||
766 | barrier(); \ | ||
767 | \ | ||
768 | raw_data = trace_buf->buf; \ | ||
750 | \ | 769 | \ |
751 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | 770 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ |
752 | entry = (struct ftrace_raw_##call *)raw_data; \ | 771 | entry = (struct ftrace_raw_##call *)raw_data; \ |
@@ -761,6 +780,8 @@ static void ftrace_profile_##call(proto) \ | |||
761 | perf_tp_event(event_call->id, __addr, __count, entry, \ | 780 | perf_tp_event(event_call->id, __addr, __count, entry, \ |
762 | __entry_size); \ | 781 | __entry_size); \ |
763 | \ | 782 | \ |
783 | end_recursion: \ | ||
784 | trace_buf->recursion--; \ | ||
764 | end: \ | 785 | end: \ |
765 | local_irq_restore(irq_flags); \ | 786 | local_irq_restore(irq_flags); \ |
766 | \ | 787 | \ |
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index e972f0a40f8d..51ee17d3632a 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -39,16 +39,19 @@ void set_syscall_enter_id(int num, int id); | |||
39 | void set_syscall_exit_id(int num, int id); | 39 | void set_syscall_exit_id(int num, int id); |
40 | extern struct trace_event event_syscall_enter; | 40 | extern struct trace_event event_syscall_enter; |
41 | extern struct trace_event event_syscall_exit; | 41 | extern struct trace_event event_syscall_exit; |
42 | extern int reg_event_syscall_enter(void *ptr); | 42 | |
43 | extern void unreg_event_syscall_enter(void *ptr); | ||
44 | extern int reg_event_syscall_exit(void *ptr); | ||
45 | extern void unreg_event_syscall_exit(void *ptr); | ||
46 | extern int syscall_enter_format(struct ftrace_event_call *call, | 43 | extern int syscall_enter_format(struct ftrace_event_call *call, |
47 | struct trace_seq *s); | 44 | struct trace_seq *s); |
48 | extern int syscall_exit_format(struct ftrace_event_call *call, | 45 | extern int syscall_exit_format(struct ftrace_event_call *call, |
49 | struct trace_seq *s); | 46 | struct trace_seq *s); |
50 | extern int syscall_enter_define_fields(struct ftrace_event_call *call); | 47 | extern int syscall_enter_define_fields(struct ftrace_event_call *call); |
51 | extern int syscall_exit_define_fields(struct ftrace_event_call *call); | 48 | extern int syscall_exit_define_fields(struct ftrace_event_call *call); |
49 | extern int reg_event_syscall_enter(struct ftrace_event_call *call); | ||
50 | extern void unreg_event_syscall_enter(struct ftrace_event_call *call); | ||
51 | extern int reg_event_syscall_exit(struct ftrace_event_call *call); | ||
52 | extern void unreg_event_syscall_exit(struct ftrace_event_call *call); | ||
53 | extern int | ||
54 | ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); | ||
52 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); | 55 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); |
53 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); | 56 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); |
54 | #endif | 57 | #endif |