diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-11 21:59:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-13 04:22:42 -0400 |
commit | 419ab0d6a959f41ec7fde807fe311aaafb05c3be (patch) | |
tree | e64f1de57d50401c743a076d96f9a8c3f2bd1287 /tools/perf | |
parent | 46538818023e8ea94f656acfa1e38297e2df20e2 (diff) |
perf sched: Make it easier to plug in new sub profilers
Create a sched event structure of handlers in which various
sched events reader can plug their own callbacks.
This makes easier the addition of new perf sched sub commands.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/builtin-sched.c | 243 |
1 files changed, 165 insertions, 78 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 0215936696ed..756fe62eb046 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -14,6 +14,9 @@ | |||
14 | #include "util/trace-event.h" | 14 | #include "util/trace-event.h" |
15 | #include <sys/types.h> | 15 | #include <sys/types.h> |
16 | 16 | ||
17 | |||
18 | #define MAX_CPUS 4096 | ||
19 | |||
17 | static char const *input_name = "perf.data"; | 20 | static char const *input_name = "perf.data"; |
18 | static int input; | 21 | static int input; |
19 | static unsigned long page_size; | 22 | static unsigned long page_size; |
@@ -27,6 +30,8 @@ static struct thread *last_match; | |||
27 | static struct perf_header *header; | 30 | static struct perf_header *header; |
28 | static u64 sample_type; | 31 | static u64 sample_type; |
29 | 32 | ||
33 | static int replay_mode; | ||
34 | |||
30 | 35 | ||
31 | /* | 36 | /* |
32 | * Scheduler benchmarks | 37 | * Scheduler benchmarks |
@@ -677,6 +682,27 @@ do { \ | |||
677 | FILL_FIELD(ptr, common_tgid, event, data); \ | 682 | FILL_FIELD(ptr, common_tgid, event, data); \ |
678 | } while (0) | 683 | } while (0) |
679 | 684 | ||
685 | |||
686 | |||
687 | struct trace_switch_event { | ||
688 | u32 size; | ||
689 | |||
690 | u16 common_type; | ||
691 | u8 common_flags; | ||
692 | u8 common_preempt_count; | ||
693 | u32 common_pid; | ||
694 | u32 common_tgid; | ||
695 | |||
696 | char prev_comm[16]; | ||
697 | u32 prev_pid; | ||
698 | u32 prev_prio; | ||
699 | u64 prev_state; | ||
700 | char next_comm[16]; | ||
701 | u32 next_pid; | ||
702 | u32 next_prio; | ||
703 | }; | ||
704 | |||
705 | |||
680 | struct trace_wakeup_event { | 706 | struct trace_wakeup_event { |
681 | u32 size; | 707 | u32 size; |
682 | 708 | ||
@@ -694,78 +720,79 @@ struct trace_wakeup_event { | |||
694 | u32 cpu; | 720 | u32 cpu; |
695 | }; | 721 | }; |
696 | 722 | ||
697 | static void | 723 | struct trace_fork_event { |
698 | process_sched_wakeup_event(struct raw_event_sample *raw, struct event *event, | 724 | u32 size; |
699 | int cpu __used, u64 timestamp __used, struct thread *thread __used) | ||
700 | { | ||
701 | struct task_desc *waker, *wakee; | ||
702 | struct trace_wakeup_event wakeup_event; | ||
703 | 725 | ||
704 | FILL_COMMON_FIELDS(wakeup_event, event, raw->data); | 726 | u16 common_type; |
727 | u8 common_flags; | ||
728 | u8 common_preempt_count; | ||
729 | u32 common_pid; | ||
730 | u32 common_tgid; | ||
731 | |||
732 | char parent_comm[16]; | ||
733 | u32 parent_pid; | ||
734 | char child_comm[16]; | ||
735 | u32 child_pid; | ||
736 | }; | ||
737 | |||
738 | struct trace_sched_handler { | ||
739 | void (*switch_event)(struct trace_switch_event *, | ||
740 | struct event *, | ||
741 | int cpu, | ||
742 | u64 timestamp, | ||
743 | struct thread *thread); | ||
744 | |||
745 | void (*wakeup_event)(struct trace_wakeup_event *, | ||
746 | struct event *, | ||
747 | int cpu, | ||
748 | u64 timestamp, | ||
749 | struct thread *thread); | ||
750 | |||
751 | void (*fork_event)(struct trace_fork_event *, | ||
752 | struct event *, | ||
753 | int cpu, | ||
754 | u64 timestamp, | ||
755 | struct thread *thread); | ||
756 | }; | ||
705 | 757 | ||
706 | FILL_ARRAY(wakeup_event, comm, event, raw->data); | ||
707 | FILL_FIELD(wakeup_event, pid, event, raw->data); | ||
708 | FILL_FIELD(wakeup_event, prio, event, raw->data); | ||
709 | FILL_FIELD(wakeup_event, success, event, raw->data); | ||
710 | FILL_FIELD(wakeup_event, cpu, event, raw->data); | ||
711 | 758 | ||
759 | static void | ||
760 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | ||
761 | struct event *event, | ||
762 | int cpu __used, | ||
763 | u64 timestamp __used, | ||
764 | struct thread *thread __used) | ||
765 | { | ||
766 | struct task_desc *waker, *wakee; | ||
712 | 767 | ||
713 | if (verbose) { | 768 | if (verbose) { |
714 | printf("sched_wakeup event %p\n", event); | 769 | printf("sched_wakeup event %p\n", event); |
715 | 770 | ||
716 | printf(" ... pid %d woke up %s/%d\n", | 771 | printf(" ... pid %d woke up %s/%d\n", |
717 | wakeup_event.common_pid, | 772 | wakeup_event->common_pid, |
718 | wakeup_event.comm, | 773 | wakeup_event->comm, |
719 | wakeup_event.pid); | 774 | wakeup_event->pid); |
720 | } | 775 | } |
721 | 776 | ||
722 | waker = register_pid(wakeup_event.common_pid, "<unknown>"); | 777 | waker = register_pid(wakeup_event->common_pid, "<unknown>"); |
723 | wakee = register_pid(wakeup_event.pid, wakeup_event.comm); | 778 | wakee = register_pid(wakeup_event->pid, wakeup_event->comm); |
724 | 779 | ||
725 | add_sched_event_wakeup(waker, timestamp, wakee); | 780 | add_sched_event_wakeup(waker, timestamp, wakee); |
726 | } | 781 | } |
727 | 782 | ||
728 | struct trace_switch_event { | 783 | static unsigned long cpu_last_switched[MAX_CPUS]; |
729 | u32 size; | ||
730 | |||
731 | u16 common_type; | ||
732 | u8 common_flags; | ||
733 | u8 common_preempt_count; | ||
734 | u32 common_pid; | ||
735 | u32 common_tgid; | ||
736 | |||
737 | char prev_comm[16]; | ||
738 | u32 prev_pid; | ||
739 | u32 prev_prio; | ||
740 | u64 prev_state; | ||
741 | char next_comm[16]; | ||
742 | u32 next_pid; | ||
743 | u32 next_prio; | ||
744 | }; | ||
745 | |||
746 | #define MAX_CPUS 4096 | ||
747 | |||
748 | unsigned long cpu_last_switched[MAX_CPUS]; | ||
749 | 784 | ||
750 | static void | 785 | static void |
751 | process_sched_switch_event(struct raw_event_sample *raw, struct event *event, | 786 | replay_switch_event(struct trace_switch_event *switch_event, |
752 | int cpu __used, u64 timestamp __used, struct thread *thread __used) | 787 | struct event *event, |
788 | int cpu, | ||
789 | u64 timestamp, | ||
790 | struct thread *thread __used) | ||
753 | { | 791 | { |
754 | struct trace_switch_event switch_event; | ||
755 | struct task_desc *prev, *next; | 792 | struct task_desc *prev, *next; |
756 | u64 timestamp0; | 793 | u64 timestamp0; |
757 | s64 delta; | 794 | s64 delta; |
758 | 795 | ||
759 | FILL_COMMON_FIELDS(switch_event, event, raw->data); | ||
760 | |||
761 | FILL_ARRAY(switch_event, prev_comm, event, raw->data); | ||
762 | FILL_FIELD(switch_event, prev_pid, event, raw->data); | ||
763 | FILL_FIELD(switch_event, prev_prio, event, raw->data); | ||
764 | FILL_FIELD(switch_event, prev_state, event, raw->data); | ||
765 | FILL_ARRAY(switch_event, next_comm, event, raw->data); | ||
766 | FILL_FIELD(switch_event, next_pid, event, raw->data); | ||
767 | FILL_FIELD(switch_event, next_prio, event, raw->data); | ||
768 | |||
769 | if (verbose) | 796 | if (verbose) |
770 | printf("sched_switch event %p\n", event); | 797 | printf("sched_switch event %p\n", event); |
771 | 798 | ||
@@ -783,38 +810,94 @@ process_sched_switch_event(struct raw_event_sample *raw, struct event *event, | |||
783 | 810 | ||
784 | if (verbose) { | 811 | if (verbose) { |
785 | printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n", | 812 | printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n", |
786 | switch_event.prev_comm, switch_event.prev_pid, | 813 | switch_event->prev_comm, switch_event->prev_pid, |
787 | switch_event.next_comm, switch_event.next_pid, | 814 | switch_event->next_comm, switch_event->next_pid, |
788 | delta); | 815 | delta); |
789 | } | 816 | } |
790 | 817 | ||
791 | prev = register_pid(switch_event.prev_pid, switch_event.prev_comm); | 818 | prev = register_pid(switch_event->prev_pid, switch_event->prev_comm); |
792 | next = register_pid(switch_event.next_pid, switch_event.next_comm); | 819 | next = register_pid(switch_event->next_pid, switch_event->next_comm); |
793 | 820 | ||
794 | cpu_last_switched[cpu] = timestamp; | 821 | cpu_last_switched[cpu] = timestamp; |
795 | 822 | ||
796 | add_sched_event_run(prev, timestamp, delta); | 823 | add_sched_event_run(prev, timestamp, delta); |
797 | add_sched_event_sleep(prev, timestamp, switch_event.prev_state); | 824 | add_sched_event_sleep(prev, timestamp, switch_event->prev_state); |
798 | } | 825 | } |
799 | 826 | ||
800 | struct trace_fork_event { | ||
801 | u32 size; | ||
802 | 827 | ||
803 | u16 common_type; | 828 | static void |
804 | u8 common_flags; | 829 | replay_fork_event(struct trace_fork_event *fork_event, |
805 | u8 common_preempt_count; | 830 | struct event *event, |
806 | u32 common_pid; | 831 | int cpu __used, |
807 | u32 common_tgid; | 832 | u64 timestamp __used, |
833 | struct thread *thread __used) | ||
834 | { | ||
835 | if (verbose) { | ||
836 | printf("sched_fork event %p\n", event); | ||
837 | printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); | ||
838 | printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); | ||
839 | } | ||
840 | register_pid(fork_event->parent_pid, fork_event->parent_comm); | ||
841 | register_pid(fork_event->child_pid, fork_event->child_comm); | ||
842 | } | ||
808 | 843 | ||
809 | char parent_comm[16]; | 844 | static struct trace_sched_handler replay_ops = { |
810 | u32 parent_pid; | 845 | .wakeup_event = replay_wakeup_event, |
811 | char child_comm[16]; | 846 | .switch_event = replay_switch_event, |
812 | u32 child_pid; | 847 | .fork_event = replay_fork_event, |
813 | }; | 848 | }; |
814 | 849 | ||
850 | |||
851 | static struct trace_sched_handler *trace_handler; | ||
852 | |||
815 | static void | 853 | static void |
816 | process_sched_fork_event(struct raw_event_sample *raw, struct event *event, | 854 | process_sched_wakeup_event(struct raw_event_sample *raw, |
817 | int cpu __used, u64 timestamp __used, struct thread *thread __used) | 855 | struct event *event, |
856 | int cpu __used, | ||
857 | u64 timestamp __used, | ||
858 | struct thread *thread __used) | ||
859 | { | ||
860 | struct trace_wakeup_event wakeup_event; | ||
861 | |||
862 | FILL_COMMON_FIELDS(wakeup_event, event, raw->data); | ||
863 | |||
864 | FILL_ARRAY(wakeup_event, comm, event, raw->data); | ||
865 | FILL_FIELD(wakeup_event, pid, event, raw->data); | ||
866 | FILL_FIELD(wakeup_event, prio, event, raw->data); | ||
867 | FILL_FIELD(wakeup_event, success, event, raw->data); | ||
868 | FILL_FIELD(wakeup_event, cpu, event, raw->data); | ||
869 | |||
870 | trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread); | ||
871 | } | ||
872 | |||
873 | static void | ||
874 | process_sched_switch_event(struct raw_event_sample *raw, | ||
875 | struct event *event, | ||
876 | int cpu __used, | ||
877 | u64 timestamp __used, | ||
878 | struct thread *thread __used) | ||
879 | { | ||
880 | struct trace_switch_event switch_event; | ||
881 | |||
882 | FILL_COMMON_FIELDS(switch_event, event, raw->data); | ||
883 | |||
884 | FILL_ARRAY(switch_event, prev_comm, event, raw->data); | ||
885 | FILL_FIELD(switch_event, prev_pid, event, raw->data); | ||
886 | FILL_FIELD(switch_event, prev_prio, event, raw->data); | ||
887 | FILL_FIELD(switch_event, prev_state, event, raw->data); | ||
888 | FILL_ARRAY(switch_event, next_comm, event, raw->data); | ||
889 | FILL_FIELD(switch_event, next_pid, event, raw->data); | ||
890 | FILL_FIELD(switch_event, next_prio, event, raw->data); | ||
891 | |||
892 | trace_handler->switch_event(&switch_event, event, cpu, timestamp, thread); | ||
893 | } | ||
894 | |||
895 | static void | ||
896 | process_sched_fork_event(struct raw_event_sample *raw, | ||
897 | struct event *event, | ||
898 | int cpu __used, | ||
899 | u64 timestamp __used, | ||
900 | struct thread *thread __used) | ||
818 | { | 901 | { |
819 | struct trace_fork_event fork_event; | 902 | struct trace_fork_event fork_event; |
820 | 903 | ||
@@ -825,17 +908,14 @@ process_sched_fork_event(struct raw_event_sample *raw, struct event *event, | |||
825 | FILL_ARRAY(fork_event, child_comm, event, raw->data); | 908 | FILL_ARRAY(fork_event, child_comm, event, raw->data); |
826 | FILL_FIELD(fork_event, child_pid, event, raw->data); | 909 | FILL_FIELD(fork_event, child_pid, event, raw->data); |
827 | 910 | ||
828 | if (verbose) { | 911 | trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread); |
829 | printf("sched_fork event %p\n", event); | ||
830 | printf("... parent: %s/%d\n", fork_event.parent_comm, fork_event.parent_pid); | ||
831 | printf("... child: %s/%d\n", fork_event.child_comm, fork_event.child_pid); | ||
832 | } | ||
833 | register_pid(fork_event.parent_pid, fork_event.parent_comm); | ||
834 | register_pid(fork_event.child_pid, fork_event.child_comm); | ||
835 | } | 912 | } |
836 | 913 | ||
837 | static void process_sched_exit_event(struct event *event, | 914 | static void |
838 | int cpu __used, u64 timestamp __used, struct thread *thread __used) | 915 | process_sched_exit_event(struct event *event, |
916 | int cpu __used, | ||
917 | u64 timestamp __used, | ||
918 | struct thread *thread __used) | ||
839 | { | 919 | { |
840 | if (verbose) | 920 | if (verbose) |
841 | printf("sched_exit event %p\n", event); | 921 | printf("sched_exit event %p\n", event); |
@@ -1072,6 +1152,8 @@ static const char * const annotate_usage[] = { | |||
1072 | static const struct option options[] = { | 1152 | static const struct option options[] = { |
1073 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | 1153 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
1074 | "dump raw trace in ASCII"), | 1154 | "dump raw trace in ASCII"), |
1155 | OPT_BOOLEAN('r', "replay", &replay_mode, | ||
1156 | "replay sched behaviour from traces"), | ||
1075 | OPT_BOOLEAN('v', "verbose", &verbose, | 1157 | OPT_BOOLEAN('v', "verbose", &verbose, |
1076 | "be more verbose (show symbol address, etc)"), | 1158 | "be more verbose (show symbol address, etc)"), |
1077 | OPT_END() | 1159 | OPT_END() |
@@ -1096,6 +1178,11 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) | |||
1096 | 1178 | ||
1097 | // setup_pager(); | 1179 | // setup_pager(); |
1098 | 1180 | ||
1181 | if (replay_mode) | ||
1182 | trace_handler = &replay_ops; | ||
1183 | else /* We may need a default subcommand */ | ||
1184 | die("Please select a sub command (-r)\n"); | ||
1185 | |||
1099 | calibrate_run_measurement_overhead(); | 1186 | calibrate_run_measurement_overhead(); |
1100 | calibrate_sleep_measurement_overhead(); | 1187 | calibrate_sleep_measurement_overhead(); |
1101 | 1188 | ||