diff options
author | David Ahern <dsahern@gmail.com> | 2013-08-05 21:41:34 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2013-08-07 16:35:38 -0400 |
commit | 1afe1d148491069ef51ed69fa53b09e1cb3ec30d (patch) | |
tree | 2514c6312ce93e24f7d37819d7fab48d1117e2b7 /tools/perf | |
parent | e30b88a77cc8ae2a1febf268c8443a6cdd696417 (diff) |
perf kvm: Add live mode
perf kvm stat currently requires back to back record and report commands
to see stats. e.g,.
perf kvm stat record -p $pid -- sleep 1
perf kvm stat report
This is inconvenvient for on box monitoring of a VM. This patch
introduces a 'live' mode that in effect combines the record plus report
into one command. e.g., to monitor a single VM:
perf kvm stat live -p $pid
or all VMs:
perf kvm stat live
Same stats options for the record+report path work with the live mode.
Display rate defaults to 1 second and can be changed using the -d
option.
v4:
- address comments from Xiao -- verify_vcpu check should not look at
processors on line for the host, prune configurable options.
- set attr->{mmap,comm,task} to 0 - don't need task events so trim events
we have to deal with
- better control of time for queue event flushing to reduce frequency of
"Timestamp below last timeslice flush" failures.
v3:
updated to use existing tracepoint parsing code
v2:
removed ABSTIME arg from timerfd_settime as mentioned by Namhyung
only call perf_kvm__handle_stdin when poll returns activity.
Signed-off-by: David Ahern <dsahern@gmail.com>
Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Runzhen Wang <runzhen@linux.vnet.ibm.com>
Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1375753297-69645-3-git-send-email-dsahern@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/builtin-kvm.c | 659 |
1 files changed, 633 insertions, 26 deletions
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 7d14a3a45401..29bfca78613e 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include "perf.h" | 2 | #include "perf.h" |
3 | 3 | ||
4 | #include "util/evsel.h" | 4 | #include "util/evsel.h" |
5 | #include "util/evlist.h" | ||
5 | #include "util/util.h" | 6 | #include "util/util.h" |
6 | #include "util/cache.h" | 7 | #include "util/cache.h" |
7 | #include "util/symbol.h" | 8 | #include "util/symbol.h" |
@@ -15,9 +16,12 @@ | |||
15 | #include <lk/debugfs.h> | 16 | #include <lk/debugfs.h> |
16 | #include "util/tool.h" | 17 | #include "util/tool.h" |
17 | #include "util/stat.h" | 18 | #include "util/stat.h" |
19 | #include "util/top.h" | ||
18 | 20 | ||
19 | #include <sys/prctl.h> | 21 | #include <sys/prctl.h> |
22 | #include <sys/timerfd.h> | ||
20 | 23 | ||
24 | #include <termios.h> | ||
21 | #include <semaphore.h> | 25 | #include <semaphore.h> |
22 | #include <pthread.h> | 26 | #include <pthread.h> |
23 | #include <math.h> | 27 | #include <math.h> |
@@ -82,6 +86,8 @@ struct exit_reasons_table { | |||
82 | 86 | ||
83 | struct perf_kvm_stat { | 87 | struct perf_kvm_stat { |
84 | struct perf_tool tool; | 88 | struct perf_tool tool; |
89 | struct perf_record_opts opts; | ||
90 | struct perf_evlist *evlist; | ||
85 | struct perf_session *session; | 91 | struct perf_session *session; |
86 | 92 | ||
87 | const char *file_name; | 93 | const char *file_name; |
@@ -96,10 +102,16 @@ struct perf_kvm_stat { | |||
96 | struct kvm_events_ops *events_ops; | 102 | struct kvm_events_ops *events_ops; |
97 | key_cmp_fun compare; | 103 | key_cmp_fun compare; |
98 | struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; | 104 | struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; |
105 | |||
99 | u64 total_time; | 106 | u64 total_time; |
100 | u64 total_count; | 107 | u64 total_count; |
108 | u64 lost_events; | ||
101 | 109 | ||
102 | struct rb_root result; | 110 | struct rb_root result; |
111 | |||
112 | int timerfd; | ||
113 | unsigned int display_time; | ||
114 | bool live; | ||
103 | }; | 115 | }; |
104 | 116 | ||
105 | 117 | ||
@@ -320,6 +332,23 @@ static void init_kvm_event_record(struct perf_kvm_stat *kvm) | |||
320 | INIT_LIST_HEAD(&kvm->kvm_events_cache[i]); | 332 | INIT_LIST_HEAD(&kvm->kvm_events_cache[i]); |
321 | } | 333 | } |
322 | 334 | ||
335 | static void clear_events_cache_stats(struct list_head *kvm_events_cache) | ||
336 | { | ||
337 | struct list_head *head; | ||
338 | struct kvm_event *event; | ||
339 | unsigned int i; | ||
340 | |||
341 | for (i = 0; i < EVENTS_CACHE_SIZE; i++) { | ||
342 | head = &kvm_events_cache[i]; | ||
343 | list_for_each_entry(event, head, hash_entry) { | ||
344 | /* reset stats for event */ | ||
345 | memset(&event->total, 0, sizeof(event->total)); | ||
346 | memset(event->vcpu, 0, | ||
347 | event->max_vcpu * sizeof(*event->vcpu)); | ||
348 | } | ||
349 | } | ||
350 | } | ||
351 | |||
323 | static int kvm_events_hash_fn(u64 key) | 352 | static int kvm_events_hash_fn(u64 key) |
324 | { | 353 | { |
325 | return key & (EVENTS_CACHE_SIZE - 1); | 354 | return key & (EVENTS_CACHE_SIZE - 1); |
@@ -472,7 +501,11 @@ static bool handle_end_event(struct perf_kvm_stat *kvm, | |||
472 | vcpu_record->last_event = NULL; | 501 | vcpu_record->last_event = NULL; |
473 | vcpu_record->start_time = 0; | 502 | vcpu_record->start_time = 0; |
474 | 503 | ||
475 | BUG_ON(timestamp < time_begin); | 504 | /* seems to happen once in a while during live mode */ |
505 | if (timestamp < time_begin) { | ||
506 | pr_debug("End time before begin time; skipping event.\n"); | ||
507 | return true; | ||
508 | } | ||
476 | 509 | ||
477 | time_diff = timestamp - time_begin; | 510 | time_diff = timestamp - time_begin; |
478 | return update_kvm_event(event, vcpu, time_diff); | 511 | return update_kvm_event(event, vcpu, time_diff); |
@@ -639,24 +672,56 @@ static struct kvm_event *pop_from_result(struct rb_root *result) | |||
639 | return container_of(node, struct kvm_event, rb); | 672 | return container_of(node, struct kvm_event, rb); |
640 | } | 673 | } |
641 | 674 | ||
642 | static void print_vcpu_info(int vcpu) | 675 | static void print_vcpu_info(struct perf_kvm_stat *kvm) |
643 | { | 676 | { |
677 | int vcpu = kvm->trace_vcpu; | ||
678 | |||
644 | pr_info("Analyze events for "); | 679 | pr_info("Analyze events for "); |
645 | 680 | ||
681 | if (kvm->live) { | ||
682 | if (kvm->opts.target.system_wide) | ||
683 | pr_info("all VMs, "); | ||
684 | else if (kvm->opts.target.pid) | ||
685 | pr_info("pid(s) %s, ", kvm->opts.target.pid); | ||
686 | else | ||
687 | pr_info("dazed and confused on what is monitored, "); | ||
688 | } | ||
689 | |||
646 | if (vcpu == -1) | 690 | if (vcpu == -1) |
647 | pr_info("all VCPUs:\n\n"); | 691 | pr_info("all VCPUs:\n\n"); |
648 | else | 692 | else |
649 | pr_info("VCPU %d:\n\n", vcpu); | 693 | pr_info("VCPU %d:\n\n", vcpu); |
650 | } | 694 | } |
651 | 695 | ||
696 | static void show_timeofday(void) | ||
697 | { | ||
698 | char date[64]; | ||
699 | struct timeval tv; | ||
700 | struct tm ltime; | ||
701 | |||
702 | gettimeofday(&tv, NULL); | ||
703 | if (localtime_r(&tv.tv_sec, <ime)) { | ||
704 | strftime(date, sizeof(date), "%H:%M:%S", <ime); | ||
705 | pr_info("%s.%06ld", date, tv.tv_usec); | ||
706 | } else | ||
707 | pr_info("00:00:00.000000"); | ||
708 | |||
709 | return; | ||
710 | } | ||
711 | |||
652 | static void print_result(struct perf_kvm_stat *kvm) | 712 | static void print_result(struct perf_kvm_stat *kvm) |
653 | { | 713 | { |
654 | char decode[20]; | 714 | char decode[20]; |
655 | struct kvm_event *event; | 715 | struct kvm_event *event; |
656 | int vcpu = kvm->trace_vcpu; | 716 | int vcpu = kvm->trace_vcpu; |
657 | 717 | ||
718 | if (kvm->live) { | ||
719 | puts(CONSOLE_CLEAR); | ||
720 | show_timeofday(); | ||
721 | } | ||
722 | |||
658 | pr_info("\n\n"); | 723 | pr_info("\n\n"); |
659 | print_vcpu_info(vcpu); | 724 | print_vcpu_info(kvm); |
660 | pr_info("%20s ", kvm->events_ops->name); | 725 | pr_info("%20s ", kvm->events_ops->name); |
661 | pr_info("%10s ", "Samples"); | 726 | pr_info("%10s ", "Samples"); |
662 | pr_info("%9s ", "Samples%"); | 727 | pr_info("%9s ", "Samples%"); |
@@ -683,6 +748,20 @@ static void print_result(struct perf_kvm_stat *kvm) | |||
683 | 748 | ||
684 | pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n", | 749 | pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n", |
685 | kvm->total_count, kvm->total_time / 1e3); | 750 | kvm->total_count, kvm->total_time / 1e3); |
751 | |||
752 | if (kvm->lost_events) | ||
753 | pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events); | ||
754 | } | ||
755 | |||
756 | static int process_lost_event(struct perf_tool *tool, | ||
757 | union perf_event *event __maybe_unused, | ||
758 | struct perf_sample *sample __maybe_unused, | ||
759 | struct machine *machine __maybe_unused) | ||
760 | { | ||
761 | struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool); | ||
762 | |||
763 | kvm->lost_events++; | ||
764 | return 0; | ||
686 | } | 765 | } |
687 | 766 | ||
688 | static int process_sample_event(struct perf_tool *tool, | 767 | static int process_sample_event(struct perf_tool *tool, |
@@ -707,10 +786,20 @@ static int process_sample_event(struct perf_tool *tool, | |||
707 | return 0; | 786 | return 0; |
708 | } | 787 | } |
709 | 788 | ||
710 | static int get_cpu_isa(struct perf_session *session) | 789 | static int cpu_isa_config(struct perf_kvm_stat *kvm) |
711 | { | 790 | { |
712 | char *cpuid = session->header.env.cpuid; | 791 | char buf[64], *cpuid; |
713 | int isa; | 792 | int err, isa; |
793 | |||
794 | if (kvm->live) { | ||
795 | err = get_cpuid(buf, sizeof(buf)); | ||
796 | if (err != 0) { | ||
797 | pr_err("Failed to look up CPU type (Intel or AMD)\n"); | ||
798 | return err; | ||
799 | } | ||
800 | cpuid = buf; | ||
801 | } else | ||
802 | cpuid = kvm->session->header.env.cpuid; | ||
714 | 803 | ||
715 | if (strstr(cpuid, "Intel")) | 804 | if (strstr(cpuid, "Intel")) |
716 | isa = 1; | 805 | isa = 1; |
@@ -718,10 +807,361 @@ static int get_cpu_isa(struct perf_session *session) | |||
718 | isa = 0; | 807 | isa = 0; |
719 | else { | 808 | else { |
720 | pr_err("CPU %s is not supported.\n", cpuid); | 809 | pr_err("CPU %s is not supported.\n", cpuid); |
721 | isa = -ENOTSUP; | 810 | return -ENOTSUP; |
811 | } | ||
812 | |||
813 | if (isa == 1) { | ||
814 | kvm->exit_reasons = vmx_exit_reasons; | ||
815 | kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons); | ||
816 | kvm->exit_reasons_isa = "VMX"; | ||
817 | } | ||
818 | |||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | static bool verify_vcpu(int vcpu) | ||
823 | { | ||
824 | if (vcpu != -1 && vcpu < 0) { | ||
825 | pr_err("Invalid vcpu:%d.\n", vcpu); | ||
826 | return false; | ||
827 | } | ||
828 | |||
829 | return true; | ||
830 | } | ||
831 | |||
832 | /* keeping the max events to a modest level to keep | ||
833 | * the processing of samples per mmap smooth. | ||
834 | */ | ||
835 | #define PERF_KVM__MAX_EVENTS_PER_MMAP 25 | ||
836 | |||
837 | static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx, | ||
838 | u64 *mmap_time) | ||
839 | { | ||
840 | union perf_event *event; | ||
841 | struct perf_sample sample; | ||
842 | s64 n = 0; | ||
843 | int err; | ||
844 | |||
845 | *mmap_time = ULLONG_MAX; | ||
846 | while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) { | ||
847 | err = perf_evlist__parse_sample(kvm->evlist, event, &sample); | ||
848 | if (err) { | ||
849 | pr_err("Failed to parse sample\n"); | ||
850 | return -1; | ||
851 | } | ||
852 | |||
853 | err = perf_session_queue_event(kvm->session, event, &sample, 0); | ||
854 | if (err) { | ||
855 | pr_err("Failed to enqueue sample: %d\n", err); | ||
856 | return -1; | ||
857 | } | ||
858 | |||
859 | /* save time stamp of our first sample for this mmap */ | ||
860 | if (n == 0) | ||
861 | *mmap_time = sample.time; | ||
862 | |||
863 | /* limit events per mmap handled all at once */ | ||
864 | n++; | ||
865 | if (n == PERF_KVM__MAX_EVENTS_PER_MMAP) | ||
866 | break; | ||
867 | } | ||
868 | |||
869 | return n; | ||
870 | } | ||
871 | |||
872 | static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm) | ||
873 | { | ||
874 | int i, err, throttled = 0; | ||
875 | s64 n, ntotal = 0; | ||
876 | u64 flush_time = ULLONG_MAX, mmap_time; | ||
877 | |||
878 | for (i = 0; i < kvm->evlist->nr_mmaps; i++) { | ||
879 | n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time); | ||
880 | if (n < 0) | ||
881 | return -1; | ||
882 | |||
883 | /* flush time is going to be the minimum of all the individual | ||
884 | * mmap times. Essentially, we flush all the samples queued up | ||
885 | * from the last pass under our minimal start time -- that leaves | ||
886 | * a very small race for samples to come in with a lower timestamp. | ||
887 | * The ioctl to return the perf_clock timestamp should close the | ||
888 | * race entirely. | ||
889 | */ | ||
890 | if (mmap_time < flush_time) | ||
891 | flush_time = mmap_time; | ||
892 | |||
893 | ntotal += n; | ||
894 | if (n == PERF_KVM__MAX_EVENTS_PER_MMAP) | ||
895 | throttled = 1; | ||
896 | } | ||
897 | |||
898 | /* flush queue after each round in which we processed events */ | ||
899 | if (ntotal) { | ||
900 | kvm->session->ordered_samples.next_flush = flush_time; | ||
901 | err = kvm->tool.finished_round(&kvm->tool, NULL, kvm->session); | ||
902 | if (err) { | ||
903 | if (kvm->lost_events) | ||
904 | pr_info("\nLost events: %" PRIu64 "\n\n", | ||
905 | kvm->lost_events); | ||
906 | return err; | ||
907 | } | ||
908 | } | ||
909 | |||
910 | return throttled; | ||
911 | } | ||
912 | |||
913 | static volatile int done; | ||
914 | |||
915 | static void sig_handler(int sig __maybe_unused) | ||
916 | { | ||
917 | done = 1; | ||
918 | } | ||
919 | |||
920 | static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm) | ||
921 | { | ||
922 | struct itimerspec new_value; | ||
923 | int rc = -1; | ||
924 | |||
925 | kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); | ||
926 | if (kvm->timerfd < 0) { | ||
927 | pr_err("timerfd_create failed\n"); | ||
928 | goto out; | ||
929 | } | ||
930 | |||
931 | new_value.it_value.tv_sec = kvm->display_time; | ||
932 | new_value.it_value.tv_nsec = 0; | ||
933 | new_value.it_interval.tv_sec = kvm->display_time; | ||
934 | new_value.it_interval.tv_nsec = 0; | ||
935 | |||
936 | if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) { | ||
937 | pr_err("timerfd_settime failed: %d\n", errno); | ||
938 | close(kvm->timerfd); | ||
939 | goto out; | ||
940 | } | ||
941 | |||
942 | rc = 0; | ||
943 | out: | ||
944 | return rc; | ||
945 | } | ||
946 | |||
947 | static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm) | ||
948 | { | ||
949 | uint64_t c; | ||
950 | int rc; | ||
951 | |||
952 | rc = read(kvm->timerfd, &c, sizeof(uint64_t)); | ||
953 | if (rc < 0) { | ||
954 | if (errno == EAGAIN) | ||
955 | return 0; | ||
956 | |||
957 | pr_err("Failed to read timer fd: %d\n", errno); | ||
958 | return -1; | ||
959 | } | ||
960 | |||
961 | if (rc != sizeof(uint64_t)) { | ||
962 | pr_err("Error reading timer fd - invalid size returned\n"); | ||
963 | return -1; | ||
964 | } | ||
965 | |||
966 | if (c != 1) | ||
967 | pr_debug("Missed timer beats: %" PRIu64 "\n", c-1); | ||
968 | |||
969 | /* update display */ | ||
970 | sort_result(kvm); | ||
971 | print_result(kvm); | ||
972 | |||
973 | /* reset counts */ | ||
974 | clear_events_cache_stats(kvm->kvm_events_cache); | ||
975 | kvm->total_count = 0; | ||
976 | kvm->total_time = 0; | ||
977 | kvm->lost_events = 0; | ||
978 | |||
979 | return 0; | ||
980 | } | ||
981 | |||
982 | static int fd_set_nonblock(int fd) | ||
983 | { | ||
984 | long arg = 0; | ||
985 | |||
986 | arg = fcntl(fd, F_GETFL); | ||
987 | if (arg < 0) { | ||
988 | pr_err("Failed to get current flags for fd %d\n", fd); | ||
989 | return -1; | ||
990 | } | ||
991 | |||
992 | if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) { | ||
993 | pr_err("Failed to set non-block option on fd %d\n", fd); | ||
994 | return -1; | ||
995 | } | ||
996 | |||
997 | return 0; | ||
998 | } | ||
999 | |||
1000 | static | ||
1001 | int perf_kvm__handle_stdin(struct termios *tc_now, struct termios *tc_save) | ||
1002 | { | ||
1003 | int c; | ||
1004 | |||
1005 | tcsetattr(0, TCSANOW, tc_now); | ||
1006 | c = getc(stdin); | ||
1007 | tcsetattr(0, TCSAFLUSH, tc_save); | ||
1008 | |||
1009 | if (c == 'q') | ||
1010 | return 1; | ||
1011 | |||
1012 | return 0; | ||
1013 | } | ||
1014 | |||
1015 | static int kvm_events_live_report(struct perf_kvm_stat *kvm) | ||
1016 | { | ||
1017 | struct pollfd *pollfds = NULL; | ||
1018 | int nr_fds, nr_stdin, ret, err = -EINVAL; | ||
1019 | struct termios tc, save; | ||
1020 | |||
1021 | /* live flag must be set first */ | ||
1022 | kvm->live = true; | ||
1023 | |||
1024 | ret = cpu_isa_config(kvm); | ||
1025 | if (ret < 0) | ||
1026 | return ret; | ||
1027 | |||
1028 | if (!verify_vcpu(kvm->trace_vcpu) || | ||
1029 | !select_key(kvm) || | ||
1030 | !register_kvm_events_ops(kvm)) { | ||
1031 | goto out; | ||
1032 | } | ||
1033 | |||
1034 | init_kvm_event_record(kvm); | ||
1035 | |||
1036 | tcgetattr(0, &save); | ||
1037 | tc = save; | ||
1038 | tc.c_lflag &= ~(ICANON | ECHO); | ||
1039 | tc.c_cc[VMIN] = 0; | ||
1040 | tc.c_cc[VTIME] = 0; | ||
1041 | |||
1042 | signal(SIGINT, sig_handler); | ||
1043 | signal(SIGTERM, sig_handler); | ||
1044 | |||
1045 | /* copy pollfds -- need to add timerfd and stdin */ | ||
1046 | nr_fds = kvm->evlist->nr_fds; | ||
1047 | pollfds = zalloc(sizeof(struct pollfd) * (nr_fds + 2)); | ||
1048 | if (!pollfds) { | ||
1049 | err = -ENOMEM; | ||
1050 | goto out; | ||
1051 | } | ||
1052 | memcpy(pollfds, kvm->evlist->pollfd, | ||
1053 | sizeof(struct pollfd) * kvm->evlist->nr_fds); | ||
1054 | |||
1055 | /* add timer fd */ | ||
1056 | if (perf_kvm__timerfd_create(kvm) < 0) { | ||
1057 | err = -1; | ||
1058 | goto out; | ||
1059 | } | ||
1060 | |||
1061 | pollfds[nr_fds].fd = kvm->timerfd; | ||
1062 | pollfds[nr_fds].events = POLLIN; | ||
1063 | nr_fds++; | ||
1064 | |||
1065 | pollfds[nr_fds].fd = fileno(stdin); | ||
1066 | pollfds[nr_fds].events = POLLIN; | ||
1067 | nr_stdin = nr_fds; | ||
1068 | nr_fds++; | ||
1069 | if (fd_set_nonblock(fileno(stdin)) != 0) | ||
1070 | goto out; | ||
1071 | |||
1072 | /* everything is good - enable the events and process */ | ||
1073 | perf_evlist__enable(kvm->evlist); | ||
1074 | |||
1075 | while (!done) { | ||
1076 | int rc; | ||
1077 | |||
1078 | rc = perf_kvm__mmap_read(kvm); | ||
1079 | if (rc < 0) | ||
1080 | break; | ||
1081 | |||
1082 | err = perf_kvm__handle_timerfd(kvm); | ||
1083 | if (err) | ||
1084 | goto out; | ||
1085 | |||
1086 | if (pollfds[nr_stdin].revents & POLLIN) | ||
1087 | done = perf_kvm__handle_stdin(&tc, &save); | ||
1088 | |||
1089 | if (!rc && !done) | ||
1090 | err = poll(pollfds, nr_fds, 100); | ||
1091 | } | ||
1092 | |||
1093 | perf_evlist__disable(kvm->evlist); | ||
1094 | |||
1095 | if (err == 0) { | ||
1096 | sort_result(kvm); | ||
1097 | print_result(kvm); | ||
1098 | } | ||
1099 | |||
1100 | out: | ||
1101 | if (kvm->timerfd >= 0) | ||
1102 | close(kvm->timerfd); | ||
1103 | |||
1104 | if (pollfds) | ||
1105 | free(pollfds); | ||
1106 | |||
1107 | return err; | ||
1108 | } | ||
1109 | |||
1110 | static int kvm_live_open_events(struct perf_kvm_stat *kvm) | ||
1111 | { | ||
1112 | int err, rc = -1; | ||
1113 | struct perf_evsel *pos; | ||
1114 | struct perf_evlist *evlist = kvm->evlist; | ||
1115 | |||
1116 | perf_evlist__config(evlist, &kvm->opts); | ||
1117 | |||
1118 | /* | ||
1119 | * Note: exclude_{guest,host} do not apply here. | ||
1120 | * This command processes KVM tracepoints from host only | ||
1121 | */ | ||
1122 | list_for_each_entry(pos, &evlist->entries, node) { | ||
1123 | struct perf_event_attr *attr = &pos->attr; | ||
1124 | |||
1125 | /* make sure these *are* set */ | ||
1126 | attr->sample_type |= PERF_SAMPLE_TID; | ||
1127 | attr->sample_type |= PERF_SAMPLE_TIME; | ||
1128 | attr->sample_type |= PERF_SAMPLE_CPU; | ||
1129 | attr->sample_type |= PERF_SAMPLE_RAW; | ||
1130 | /* make sure these are *not*; want as small a sample as possible */ | ||
1131 | attr->sample_type &= ~PERF_SAMPLE_PERIOD; | ||
1132 | attr->sample_type &= ~PERF_SAMPLE_IP; | ||
1133 | attr->sample_type &= ~PERF_SAMPLE_CALLCHAIN; | ||
1134 | attr->sample_type &= ~PERF_SAMPLE_ADDR; | ||
1135 | attr->sample_type &= ~PERF_SAMPLE_READ; | ||
1136 | attr->mmap = 0; | ||
1137 | attr->comm = 0; | ||
1138 | attr->task = 0; | ||
1139 | |||
1140 | attr->sample_period = 1; | ||
1141 | |||
1142 | attr->watermark = 0; | ||
1143 | attr->wakeup_events = 1000; | ||
1144 | |||
1145 | /* will enable all once we are ready */ | ||
1146 | attr->disabled = 1; | ||
1147 | } | ||
1148 | |||
1149 | err = perf_evlist__open(evlist); | ||
1150 | if (err < 0) { | ||
1151 | printf("Couldn't create the events: %s\n", strerror(errno)); | ||
1152 | goto out; | ||
722 | } | 1153 | } |
723 | 1154 | ||
724 | return isa; | 1155 | if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) { |
1156 | ui__error("Failed to mmap the events: %s\n", strerror(errno)); | ||
1157 | perf_evlist__close(evlist); | ||
1158 | goto out; | ||
1159 | } | ||
1160 | |||
1161 | rc = 0; | ||
1162 | |||
1163 | out: | ||
1164 | return rc; | ||
725 | } | 1165 | } |
726 | 1166 | ||
727 | static int read_events(struct perf_kvm_stat *kvm) | 1167 | static int read_events(struct perf_kvm_stat *kvm) |
@@ -749,30 +1189,13 @@ static int read_events(struct perf_kvm_stat *kvm) | |||
749 | * Do not use 'isa' recorded in kvm_exit tracepoint since it is not | 1189 | * Do not use 'isa' recorded in kvm_exit tracepoint since it is not |
750 | * traced in the old kernel. | 1190 | * traced in the old kernel. |
751 | */ | 1191 | */ |
752 | ret = get_cpu_isa(kvm->session); | 1192 | ret = cpu_isa_config(kvm); |
753 | |||
754 | if (ret < 0) | 1193 | if (ret < 0) |
755 | return ret; | 1194 | return ret; |
756 | 1195 | ||
757 | if (ret == 1) { | ||
758 | kvm->exit_reasons = vmx_exit_reasons; | ||
759 | kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons); | ||
760 | kvm->exit_reasons_isa = "VMX"; | ||
761 | } | ||
762 | |||
763 | return perf_session__process_events(kvm->session, &kvm->tool); | 1196 | return perf_session__process_events(kvm->session, &kvm->tool); |
764 | } | 1197 | } |
765 | 1198 | ||
766 | static bool verify_vcpu(int vcpu) | ||
767 | { | ||
768 | if (vcpu != -1 && vcpu < 0) { | ||
769 | pr_err("Invalid vcpu:%d.\n", vcpu); | ||
770 | return false; | ||
771 | } | ||
772 | |||
773 | return true; | ||
774 | } | ||
775 | |||
776 | static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm) | 1199 | static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm) |
777 | { | 1200 | { |
778 | int ret = -EINVAL; | 1201 | int ret = -EINVAL; |
@@ -886,6 +1309,186 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv) | |||
886 | return kvm_events_report_vcpu(kvm); | 1309 | return kvm_events_report_vcpu(kvm); |
887 | } | 1310 | } |
888 | 1311 | ||
1312 | static struct perf_evlist *kvm_live_event_list(void) | ||
1313 | { | ||
1314 | struct perf_evlist *evlist; | ||
1315 | char *tp, *name, *sys; | ||
1316 | unsigned int j; | ||
1317 | int err = -1; | ||
1318 | |||
1319 | evlist = perf_evlist__new(); | ||
1320 | if (evlist == NULL) | ||
1321 | return NULL; | ||
1322 | |||
1323 | for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) { | ||
1324 | |||
1325 | tp = strdup(kvm_events_tp[j]); | ||
1326 | if (tp == NULL) | ||
1327 | goto out; | ||
1328 | |||
1329 | /* split tracepoint into subsystem and name */ | ||
1330 | sys = tp; | ||
1331 | name = strchr(tp, ':'); | ||
1332 | if (name == NULL) { | ||
1333 | pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n", | ||
1334 | kvm_events_tp[j]); | ||
1335 | free(tp); | ||
1336 | goto out; | ||
1337 | } | ||
1338 | *name = '\0'; | ||
1339 | name++; | ||
1340 | |||
1341 | if (perf_evlist__add_newtp(evlist, sys, name, NULL)) { | ||
1342 | pr_err("Failed to add %s tracepoint to the list\n", kvm_events_tp[j]); | ||
1343 | free(tp); | ||
1344 | goto out; | ||
1345 | } | ||
1346 | |||
1347 | free(tp); | ||
1348 | } | ||
1349 | |||
1350 | err = 0; | ||
1351 | |||
1352 | out: | ||
1353 | if (err) { | ||
1354 | perf_evlist__delete(evlist); | ||
1355 | evlist = NULL; | ||
1356 | } | ||
1357 | |||
1358 | return evlist; | ||
1359 | } | ||
1360 | |||
1361 | static int kvm_events_live(struct perf_kvm_stat *kvm, | ||
1362 | int argc, const char **argv) | ||
1363 | { | ||
1364 | char errbuf[BUFSIZ]; | ||
1365 | int err; | ||
1366 | |||
1367 | const struct option live_options[] = { | ||
1368 | OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid", | ||
1369 | "record events on existing process id"), | ||
1370 | OPT_UINTEGER('m', "mmap-pages", &kvm->opts.mmap_pages, | ||
1371 | "number of mmap data pages"), | ||
1372 | OPT_INCR('v', "verbose", &verbose, | ||
1373 | "be more verbose (show counter open errors, etc)"), | ||
1374 | OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide, | ||
1375 | "system-wide collection from all CPUs"), | ||
1376 | OPT_UINTEGER('d', "display", &kvm->display_time, | ||
1377 | "time in seconds between display updates"), | ||
1378 | OPT_STRING(0, "event", &kvm->report_event, "report event", | ||
1379 | "event for reporting: vmexit, mmio, ioport"), | ||
1380 | OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu, | ||
1381 | "vcpu id to report"), | ||
1382 | OPT_STRING('k', "key", &kvm->sort_key, "sort-key", | ||
1383 | "key for sorting: sample(sort by samples number)" | ||
1384 | " time (sort by avg time)"), | ||
1385 | OPT_END() | ||
1386 | }; | ||
1387 | const char * const live_usage[] = { | ||
1388 | "perf kvm stat live [<options>]", | ||
1389 | NULL | ||
1390 | }; | ||
1391 | |||
1392 | |||
1393 | /* event handling */ | ||
1394 | kvm->tool.sample = process_sample_event; | ||
1395 | kvm->tool.comm = perf_event__process_comm; | ||
1396 | kvm->tool.exit = perf_event__process_exit; | ||
1397 | kvm->tool.fork = perf_event__process_fork; | ||
1398 | kvm->tool.lost = process_lost_event; | ||
1399 | kvm->tool.ordered_samples = true; | ||
1400 | perf_tool__fill_defaults(&kvm->tool); | ||
1401 | |||
1402 | /* set defaults */ | ||
1403 | kvm->display_time = 1; | ||
1404 | kvm->opts.user_interval = 1; | ||
1405 | kvm->opts.mmap_pages = 512; | ||
1406 | kvm->opts.target.uses_mmap = false; | ||
1407 | kvm->opts.target.uid_str = NULL; | ||
1408 | kvm->opts.target.uid = UINT_MAX; | ||
1409 | |||
1410 | symbol__init(); | ||
1411 | disable_buildid_cache(); | ||
1412 | |||
1413 | use_browser = 0; | ||
1414 | setup_browser(false); | ||
1415 | |||
1416 | if (argc) { | ||
1417 | argc = parse_options(argc, argv, live_options, | ||
1418 | live_usage, 0); | ||
1419 | if (argc) | ||
1420 | usage_with_options(live_usage, live_options); | ||
1421 | } | ||
1422 | |||
1423 | /* | ||
1424 | * target related setups | ||
1425 | */ | ||
1426 | err = perf_target__validate(&kvm->opts.target); | ||
1427 | if (err) { | ||
1428 | perf_target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ); | ||
1429 | ui__warning("%s", errbuf); | ||
1430 | } | ||
1431 | |||
1432 | if (perf_target__none(&kvm->opts.target)) | ||
1433 | kvm->opts.target.system_wide = true; | ||
1434 | |||
1435 | |||
1436 | /* | ||
1437 | * generate the event list | ||
1438 | */ | ||
1439 | kvm->evlist = kvm_live_event_list(); | ||
1440 | if (kvm->evlist == NULL) { | ||
1441 | err = -1; | ||
1442 | goto out; | ||
1443 | } | ||
1444 | |||
1445 | symbol_conf.nr_events = kvm->evlist->nr_entries; | ||
1446 | |||
1447 | if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0) | ||
1448 | usage_with_options(live_usage, live_options); | ||
1449 | |||
1450 | /* | ||
1451 | * perf session | ||
1452 | */ | ||
1453 | kvm->session = perf_session__new(NULL, O_WRONLY, false, false, &kvm->tool); | ||
1454 | if (kvm->session == NULL) { | ||
1455 | err = -ENOMEM; | ||
1456 | goto out; | ||
1457 | } | ||
1458 | kvm->session->evlist = kvm->evlist; | ||
1459 | perf_session__set_id_hdr_size(kvm->session); | ||
1460 | |||
1461 | |||
1462 | if (perf_target__has_task(&kvm->opts.target)) | ||
1463 | perf_event__synthesize_thread_map(&kvm->tool, | ||
1464 | kvm->evlist->threads, | ||
1465 | perf_event__process, | ||
1466 | &kvm->session->machines.host); | ||
1467 | else | ||
1468 | perf_event__synthesize_threads(&kvm->tool, perf_event__process, | ||
1469 | &kvm->session->machines.host); | ||
1470 | |||
1471 | |||
1472 | err = kvm_live_open_events(kvm); | ||
1473 | if (err) | ||
1474 | goto out; | ||
1475 | |||
1476 | err = kvm_events_live_report(kvm); | ||
1477 | |||
1478 | out: | ||
1479 | exit_browser(0); | ||
1480 | |||
1481 | if (kvm->session) | ||
1482 | perf_session__delete(kvm->session); | ||
1483 | kvm->session = NULL; | ||
1484 | if (kvm->evlist) { | ||
1485 | perf_evlist__delete_maps(kvm->evlist); | ||
1486 | perf_evlist__delete(kvm->evlist); | ||
1487 | } | ||
1488 | |||
1489 | return err; | ||
1490 | } | ||
1491 | |||
889 | static void print_kvm_stat_usage(void) | 1492 | static void print_kvm_stat_usage(void) |
890 | { | 1493 | { |
891 | printf("Usage: perf kvm stat <command>\n\n"); | 1494 | printf("Usage: perf kvm stat <command>\n\n"); |
@@ -893,6 +1496,7 @@ static void print_kvm_stat_usage(void) | |||
893 | printf("# Available commands:\n"); | 1496 | printf("# Available commands:\n"); |
894 | printf("\trecord: record kvm events\n"); | 1497 | printf("\trecord: record kvm events\n"); |
895 | printf("\treport: report statistical data of kvm events\n"); | 1498 | printf("\treport: report statistical data of kvm events\n"); |
1499 | printf("\tlive: live reporting of statistical data of kvm events\n"); | ||
896 | 1500 | ||
897 | printf("\nOtherwise, it is the alias of 'perf stat':\n"); | 1501 | printf("\nOtherwise, it is the alias of 'perf stat':\n"); |
898 | } | 1502 | } |
@@ -922,6 +1526,9 @@ static int kvm_cmd_stat(const char *file_name, int argc, const char **argv) | |||
922 | if (!strncmp(argv[1], "rep", 3)) | 1526 | if (!strncmp(argv[1], "rep", 3)) |
923 | return kvm_events_report(&kvm, argc - 1 , argv + 1); | 1527 | return kvm_events_report(&kvm, argc - 1 , argv + 1); |
924 | 1528 | ||
1529 | if (!strncmp(argv[1], "live", 4)) | ||
1530 | return kvm_events_live(&kvm, argc - 1 , argv + 1); | ||
1531 | |||
925 | perf_stat: | 1532 | perf_stat: |
926 | return cmd_stat(argc, argv, NULL); | 1533 | return cmd_stat(argc, argv, NULL); |
927 | } | 1534 | } |