diff options
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r-- | tools/perf/util/evlist.c | 70 |
1 files changed, 43 insertions, 27 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index b62e523a7035..ac35cd214feb 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "parse-events.h" | 25 | #include "parse-events.h" |
26 | #include <subcmd/parse-options.h> | 26 | #include <subcmd/parse-options.h> |
27 | 27 | ||
28 | #include <fcntl.h> | ||
28 | #include <sys/ioctl.h> | 29 | #include <sys/ioctl.h> |
29 | #include <sys/mman.h> | 30 | #include <sys/mman.h> |
30 | 31 | ||
@@ -125,7 +126,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist) | |||
125 | void perf_evlist__exit(struct perf_evlist *evlist) | 126 | void perf_evlist__exit(struct perf_evlist *evlist) |
126 | { | 127 | { |
127 | zfree(&evlist->mmap); | 128 | zfree(&evlist->mmap); |
128 | zfree(&evlist->backward_mmap); | 129 | zfree(&evlist->overwrite_mmap); |
129 | fdarray__exit(&evlist->pollfd); | 130 | fdarray__exit(&evlist->pollfd); |
130 | } | 131 | } |
131 | 132 | ||
@@ -675,11 +676,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) | |||
675 | { | 676 | { |
676 | int i; | 677 | int i; |
677 | 678 | ||
678 | if (!evlist->backward_mmap) | 679 | if (!evlist->overwrite_mmap) |
679 | return 0; | 680 | return 0; |
680 | 681 | ||
681 | for (i = 0; i < evlist->nr_mmaps; i++) { | 682 | for (i = 0; i < evlist->nr_mmaps; i++) { |
682 | int fd = evlist->backward_mmap[i].fd; | 683 | int fd = evlist->overwrite_mmap[i].fd; |
683 | int err; | 684 | int err; |
684 | 685 | ||
685 | if (fd < 0) | 686 | if (fd < 0) |
@@ -711,7 +712,7 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int | |||
711 | * No need for read-write ring buffer: kernel stop outputting when | 712 | * No need for read-write ring buffer: kernel stop outputting when |
712 | * it hit md->prev (perf_mmap__consume()). | 713 | * it hit md->prev (perf_mmap__consume()). |
713 | */ | 714 | */ |
714 | return perf_mmap__read_forward(md, evlist->overwrite); | 715 | return perf_mmap__read_forward(md); |
715 | } | 716 | } |
716 | 717 | ||
717 | union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx) | 718 | union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx) |
@@ -738,7 +739,7 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) | |||
738 | 739 | ||
739 | void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) | 740 | void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) |
740 | { | 741 | { |
741 | perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite); | 742 | perf_mmap__consume(&evlist->mmap[idx], false); |
742 | } | 743 | } |
743 | 744 | ||
744 | static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) | 745 | static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) |
@@ -749,16 +750,16 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) | |||
749 | for (i = 0; i < evlist->nr_mmaps; i++) | 750 | for (i = 0; i < evlist->nr_mmaps; i++) |
750 | perf_mmap__munmap(&evlist->mmap[i]); | 751 | perf_mmap__munmap(&evlist->mmap[i]); |
751 | 752 | ||
752 | if (evlist->backward_mmap) | 753 | if (evlist->overwrite_mmap) |
753 | for (i = 0; i < evlist->nr_mmaps; i++) | 754 | for (i = 0; i < evlist->nr_mmaps; i++) |
754 | perf_mmap__munmap(&evlist->backward_mmap[i]); | 755 | perf_mmap__munmap(&evlist->overwrite_mmap[i]); |
755 | } | 756 | } |
756 | 757 | ||
757 | void perf_evlist__munmap(struct perf_evlist *evlist) | 758 | void perf_evlist__munmap(struct perf_evlist *evlist) |
758 | { | 759 | { |
759 | perf_evlist__munmap_nofree(evlist); | 760 | perf_evlist__munmap_nofree(evlist); |
760 | zfree(&evlist->mmap); | 761 | zfree(&evlist->mmap); |
761 | zfree(&evlist->backward_mmap); | 762 | zfree(&evlist->overwrite_mmap); |
762 | } | 763 | } |
763 | 764 | ||
764 | static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) | 765 | static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
@@ -800,7 +801,7 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, | |||
800 | 801 | ||
801 | static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, | 802 | static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, |
802 | struct mmap_params *mp, int cpu_idx, | 803 | struct mmap_params *mp, int cpu_idx, |
803 | int thread, int *_output, int *_output_backward) | 804 | int thread, int *_output, int *_output_overwrite) |
804 | { | 805 | { |
805 | struct perf_evsel *evsel; | 806 | struct perf_evsel *evsel; |
806 | int revent; | 807 | int revent; |
@@ -812,18 +813,20 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, | |||
812 | int fd; | 813 | int fd; |
813 | int cpu; | 814 | int cpu; |
814 | 815 | ||
816 | mp->prot = PROT_READ | PROT_WRITE; | ||
815 | if (evsel->attr.write_backward) { | 817 | if (evsel->attr.write_backward) { |
816 | output = _output_backward; | 818 | output = _output_overwrite; |
817 | maps = evlist->backward_mmap; | 819 | maps = evlist->overwrite_mmap; |
818 | 820 | ||
819 | if (!maps) { | 821 | if (!maps) { |
820 | maps = perf_evlist__alloc_mmap(evlist); | 822 | maps = perf_evlist__alloc_mmap(evlist); |
821 | if (!maps) | 823 | if (!maps) |
822 | return -1; | 824 | return -1; |
823 | evlist->backward_mmap = maps; | 825 | evlist->overwrite_mmap = maps; |
824 | if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) | 826 | if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) |
825 | perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); | 827 | perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); |
826 | } | 828 | } |
829 | mp->prot &= ~PROT_WRITE; | ||
827 | } | 830 | } |
828 | 831 | ||
829 | if (evsel->system_wide && thread) | 832 | if (evsel->system_wide && thread) |
@@ -884,14 +887,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, | |||
884 | pr_debug2("perf event ring buffer mmapped per cpu\n"); | 887 | pr_debug2("perf event ring buffer mmapped per cpu\n"); |
885 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 888 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
886 | int output = -1; | 889 | int output = -1; |
887 | int output_backward = -1; | 890 | int output_overwrite = -1; |
888 | 891 | ||
889 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, | 892 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, |
890 | true); | 893 | true); |
891 | 894 | ||
892 | for (thread = 0; thread < nr_threads; thread++) { | 895 | for (thread = 0; thread < nr_threads; thread++) { |
893 | if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, | 896 | if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, |
894 | thread, &output, &output_backward)) | 897 | thread, &output, &output_overwrite)) |
895 | goto out_unmap; | 898 | goto out_unmap; |
896 | } | 899 | } |
897 | } | 900 | } |
@@ -912,13 +915,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, | |||
912 | pr_debug2("perf event ring buffer mmapped per thread\n"); | 915 | pr_debug2("perf event ring buffer mmapped per thread\n"); |
913 | for (thread = 0; thread < nr_threads; thread++) { | 916 | for (thread = 0; thread < nr_threads; thread++) { |
914 | int output = -1; | 917 | int output = -1; |
915 | int output_backward = -1; | 918 | int output_overwrite = -1; |
916 | 919 | ||
917 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, | 920 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, |
918 | false); | 921 | false); |
919 | 922 | ||
920 | if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, | 923 | if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, |
921 | &output, &output_backward)) | 924 | &output, &output_overwrite)) |
922 | goto out_unmap; | 925 | goto out_unmap; |
923 | } | 926 | } |
924 | 927 | ||
@@ -1052,15 +1055,18 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, | |||
1052 | * Return: %0 on success, negative error code otherwise. | 1055 | * Return: %0 on success, negative error code otherwise. |
1053 | */ | 1056 | */ |
1054 | int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, | 1057 | int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, |
1055 | bool overwrite, unsigned int auxtrace_pages, | 1058 | unsigned int auxtrace_pages, |
1056 | bool auxtrace_overwrite) | 1059 | bool auxtrace_overwrite) |
1057 | { | 1060 | { |
1058 | struct perf_evsel *evsel; | 1061 | struct perf_evsel *evsel; |
1059 | const struct cpu_map *cpus = evlist->cpus; | 1062 | const struct cpu_map *cpus = evlist->cpus; |
1060 | const struct thread_map *threads = evlist->threads; | 1063 | const struct thread_map *threads = evlist->threads; |
1061 | struct mmap_params mp = { | 1064 | /* |
1062 | .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), | 1065 | * Delay setting mp.prot: set it before calling perf_mmap__mmap. |
1063 | }; | 1066 | * Its value is decided by evsel's write_backward. |
1067 | * So &mp should not be passed through const pointer. | ||
1068 | */ | ||
1069 | struct mmap_params mp; | ||
1064 | 1070 | ||
1065 | if (!evlist->mmap) | 1071 | if (!evlist->mmap) |
1066 | evlist->mmap = perf_evlist__alloc_mmap(evlist); | 1072 | evlist->mmap = perf_evlist__alloc_mmap(evlist); |
@@ -1070,7 +1076,6 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, | |||
1070 | if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) | 1076 | if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
1071 | return -ENOMEM; | 1077 | return -ENOMEM; |
1072 | 1078 | ||
1073 | evlist->overwrite = overwrite; | ||
1074 | evlist->mmap_len = perf_evlist__mmap_size(pages); | 1079 | evlist->mmap_len = perf_evlist__mmap_size(pages); |
1075 | pr_debug("mmap size %zuB\n", evlist->mmap_len); | 1080 | pr_debug("mmap size %zuB\n", evlist->mmap_len); |
1076 | mp.mask = evlist->mmap_len - page_size - 1; | 1081 | mp.mask = evlist->mmap_len - page_size - 1; |
@@ -1091,10 +1096,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, | |||
1091 | return perf_evlist__mmap_per_cpu(evlist, &mp); | 1096 | return perf_evlist__mmap_per_cpu(evlist, &mp); |
1092 | } | 1097 | } |
1093 | 1098 | ||
1094 | int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, | 1099 | int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) |
1095 | bool overwrite) | ||
1096 | { | 1100 | { |
1097 | return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); | 1101 | return perf_evlist__mmap_ex(evlist, pages, 0, false); |
1098 | } | 1102 | } |
1099 | 1103 | ||
1100 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) | 1104 | int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) |
@@ -1102,7 +1106,8 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) | |||
1102 | struct cpu_map *cpus; | 1106 | struct cpu_map *cpus; |
1103 | struct thread_map *threads; | 1107 | struct thread_map *threads; |
1104 | 1108 | ||
1105 | threads = thread_map__new_str(target->pid, target->tid, target->uid); | 1109 | threads = thread_map__new_str(target->pid, target->tid, target->uid, |
1110 | target->per_thread); | ||
1106 | 1111 | ||
1107 | if (!threads) | 1112 | if (!threads) |
1108 | return -1; | 1113 | return -1; |
@@ -1582,6 +1587,17 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even | |||
1582 | return perf_evsel__parse_sample(evsel, event, sample); | 1587 | return perf_evsel__parse_sample(evsel, event, sample); |
1583 | } | 1588 | } |
1584 | 1589 | ||
1590 | int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist, | ||
1591 | union perf_event *event, | ||
1592 | u64 *timestamp) | ||
1593 | { | ||
1594 | struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); | ||
1595 | |||
1596 | if (!evsel) | ||
1597 | return -EFAULT; | ||
1598 | return perf_evsel__parse_sample_timestamp(evsel, event, timestamp); | ||
1599 | } | ||
1600 | |||
1585 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) | 1601 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) |
1586 | { | 1602 | { |
1587 | struct perf_evsel *evsel; | 1603 | struct perf_evsel *evsel; |
@@ -1739,13 +1755,13 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, | |||
1739 | RESUME, | 1755 | RESUME, |
1740 | } action = NONE; | 1756 | } action = NONE; |
1741 | 1757 | ||
1742 | if (!evlist->backward_mmap) | 1758 | if (!evlist->overwrite_mmap) |
1743 | return; | 1759 | return; |
1744 | 1760 | ||
1745 | switch (old_state) { | 1761 | switch (old_state) { |
1746 | case BKW_MMAP_NOTREADY: { | 1762 | case BKW_MMAP_NOTREADY: { |
1747 | if (state != BKW_MMAP_RUNNING) | 1763 | if (state != BKW_MMAP_RUNNING) |
1748 | goto state_err;; | 1764 | goto state_err; |
1749 | break; | 1765 | break; |
1750 | } | 1766 | } |
1751 | case BKW_MMAP_RUNNING: { | 1767 | case BKW_MMAP_RUNNING: { |