diff options
author | Wang Nan <wangnan0@huawei.com> | 2016-07-14 04:34:40 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2016-07-15 16:27:48 -0400 |
commit | 078c33862e042b3778dce3bcc8eaef84ab40715c (patch) | |
tree | cabb0ccd2b78aa02662f4671fb3885abaa12666a | |
parent | b2cb615d8aaba520fe351ff456f6c7730828b3fe (diff) |
perf evlist: Map backward events to backward_mmap
In perf_evlist__mmap_per_evsel(), select backward_mmap for backward
events. Utilize new perf_mmap APIs. Dynamically alloc backward_mmap.
Remove useless functions.
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: He Kuang <hekuang@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-9-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r-- | tools/perf/tests/backward-ring-buffer.c | 4 | ||||
-rw-r--r-- | tools/perf/util/evlist.c | 54 |
2 files changed, 29 insertions, 29 deletions
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c index 5cee3873f2b5..b2c634815f6b 100644 --- a/tools/perf/tests/backward-ring-buffer.c +++ b/tools/perf/tests/backward-ring-buffer.c | |||
@@ -31,8 +31,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count, | |||
31 | for (i = 0; i < evlist->nr_mmaps; i++) { | 31 | for (i = 0; i < evlist->nr_mmaps; i++) { |
32 | union perf_event *event; | 32 | union perf_event *event; |
33 | 33 | ||
34 | perf_evlist__mmap_read_catchup(evlist, i); | 34 | perf_mmap__read_catchup(&evlist->backward_mmap[i]); |
35 | while ((event = perf_evlist__mmap_read_backward(evlist, i)) != NULL) { | 35 | while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) { |
36 | const u32 type = event->header.type; | 36 | const u32 type = event->header.type; |
37 | 37 | ||
38 | switch (type) { | 38 | switch (type) { |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 24927e111d17..7570f903200e 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/log2.h> | 27 | #include <linux/log2.h> |
28 | #include <linux/err.h> | 28 | #include <linux/err.h> |
29 | 29 | ||
30 | static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); | ||
31 | static void perf_mmap__munmap(struct perf_mmap *map); | 30 | static void perf_mmap__munmap(struct perf_mmap *map); |
32 | static void perf_mmap__put(struct perf_mmap *map); | 31 | static void perf_mmap__put(struct perf_mmap *map); |
33 | 32 | ||
@@ -692,8 +691,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) | |||
692 | { | 691 | { |
693 | int i; | 692 | int i; |
694 | 693 | ||
694 | if (!evlist->backward_mmap) | ||
695 | return 0; | ||
696 | |||
695 | for (i = 0; i < evlist->nr_mmaps; i++) { | 697 | for (i = 0; i < evlist->nr_mmaps; i++) { |
696 | int fd = evlist->mmap[i].fd; | 698 | int fd = evlist->backward_mmap[i].fd; |
697 | int err; | 699 | int err; |
698 | 700 | ||
699 | if (fd < 0) | 701 | if (fd < 0) |
@@ -904,16 +906,6 @@ static void perf_mmap__put(struct perf_mmap *md) | |||
904 | perf_mmap__munmap(md); | 906 | perf_mmap__munmap(md); |
905 | } | 907 | } |
906 | 908 | ||
907 | static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) | ||
908 | { | ||
909 | perf_mmap__get(&evlist->mmap[idx]); | ||
910 | } | ||
911 | |||
912 | static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) | ||
913 | { | ||
914 | perf_mmap__put(&evlist->mmap[idx]); | ||
915 | } | ||
916 | |||
917 | void perf_mmap__consume(struct perf_mmap *md, bool overwrite) | 909 | void perf_mmap__consume(struct perf_mmap *md, bool overwrite) |
918 | { | 910 | { |
919 | if (!overwrite) { | 911 | if (!overwrite) { |
@@ -1049,12 +1041,6 @@ static int perf_mmap__mmap(struct perf_mmap *map, | |||
1049 | return 0; | 1041 | return 0; |
1050 | } | 1042 | } |
1051 | 1043 | ||
1052 | static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, | ||
1053 | struct mmap_params *mp, int fd) | ||
1054 | { | ||
1055 | return perf_mmap__mmap(&evlist->mmap[idx], mp, fd); | ||
1056 | } | ||
1057 | |||
1058 | static bool | 1044 | static bool |
1059 | perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, | 1045 | perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, |
1060 | struct perf_evsel *evsel) | 1046 | struct perf_evsel *evsel) |
@@ -1066,16 +1052,27 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, | |||
1066 | 1052 | ||
1067 | static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, | 1053 | static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, |
1068 | struct mmap_params *mp, int cpu, | 1054 | struct mmap_params *mp, int cpu, |
1069 | int thread, int *output) | 1055 | int thread, int *_output, int *_output_backward) |
1070 | { | 1056 | { |
1071 | struct perf_evsel *evsel; | 1057 | struct perf_evsel *evsel; |
1072 | int revent; | 1058 | int revent; |
1073 | 1059 | ||
1074 | evlist__for_each_entry(evlist, evsel) { | 1060 | evlist__for_each_entry(evlist, evsel) { |
1061 | struct perf_mmap *maps = evlist->mmap; | ||
1062 | int *output = _output; | ||
1075 | int fd; | 1063 | int fd; |
1076 | 1064 | ||
1077 | if (!!evsel->attr.write_backward != (evlist->overwrite && evlist->backward)) | 1065 | if (evsel->attr.write_backward) { |
1078 | continue; | 1066 | output = _output_backward; |
1067 | maps = evlist->backward_mmap; | ||
1068 | |||
1069 | if (!maps) { | ||
1070 | maps = perf_evlist__alloc_mmap(evlist); | ||
1071 | if (!maps) | ||
1072 | return -1; | ||
1073 | evlist->backward_mmap = maps; | ||
1074 | } | ||
1075 | } | ||
1079 | 1076 | ||
1080 | if (evsel->system_wide && thread) | 1077 | if (evsel->system_wide && thread) |
1081 | continue; | 1078 | continue; |
@@ -1084,13 +1081,14 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, | |||
1084 | 1081 | ||
1085 | if (*output == -1) { | 1082 | if (*output == -1) { |
1086 | *output = fd; | 1083 | *output = fd; |
1087 | if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0) | 1084 | |
1085 | if (perf_mmap__mmap(&maps[idx], mp, *output) < 0) | ||
1088 | return -1; | 1086 | return -1; |
1089 | } else { | 1087 | } else { |
1090 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) | 1088 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) |
1091 | return -1; | 1089 | return -1; |
1092 | 1090 | ||
1093 | perf_evlist__mmap_get(evlist, idx); | 1091 | perf_mmap__get(&maps[idx]); |
1094 | } | 1092 | } |
1095 | 1093 | ||
1096 | revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; | 1094 | revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; |
@@ -1103,8 +1101,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, | |||
1103 | * Therefore don't add it for polling. | 1101 | * Therefore don't add it for polling. |
1104 | */ | 1102 | */ |
1105 | if (!evsel->system_wide && | 1103 | if (!evsel->system_wide && |
1106 | __perf_evlist__add_pollfd(evlist, fd, &evlist->mmap[idx], revent) < 0) { | 1104 | __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) { |
1107 | perf_evlist__mmap_put(evlist, idx); | 1105 | perf_mmap__put(&maps[idx]); |
1108 | return -1; | 1106 | return -1; |
1109 | } | 1107 | } |
1110 | 1108 | ||
@@ -1130,13 +1128,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, | |||
1130 | pr_debug2("perf event ring buffer mmapped per cpu\n"); | 1128 | pr_debug2("perf event ring buffer mmapped per cpu\n"); |
1131 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 1129 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
1132 | int output = -1; | 1130 | int output = -1; |
1131 | int output_backward = -1; | ||
1133 | 1132 | ||
1134 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, | 1133 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, |
1135 | true); | 1134 | true); |
1136 | 1135 | ||
1137 | for (thread = 0; thread < nr_threads; thread++) { | 1136 | for (thread = 0; thread < nr_threads; thread++) { |
1138 | if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, | 1137 | if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, |
1139 | thread, &output)) | 1138 | thread, &output, &output_backward)) |
1140 | goto out_unmap; | 1139 | goto out_unmap; |
1141 | } | 1140 | } |
1142 | } | 1141 | } |
@@ -1157,12 +1156,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, | |||
1157 | pr_debug2("perf event ring buffer mmapped per thread\n"); | 1156 | pr_debug2("perf event ring buffer mmapped per thread\n"); |
1158 | for (thread = 0; thread < nr_threads; thread++) { | 1157 | for (thread = 0; thread < nr_threads; thread++) { |
1159 | int output = -1; | 1158 | int output = -1; |
1159 | int output_backward = -1; | ||
1160 | 1160 | ||
1161 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, | 1161 | auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, |
1162 | false); | 1162 | false); |
1163 | 1163 | ||
1164 | if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, | 1164 | if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, |
1165 | &output)) | 1165 | &output, &output_backward)) |
1166 | goto out_unmap; | 1166 | goto out_unmap; |
1167 | } | 1167 | } |
1168 | 1168 | ||