aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
authorWang Nan <wangnan0@huawei.com>2016-07-14 04:34:35 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-07-15 16:27:46 -0400
commit8db6d6b19e486eef3db41bbd74a1f4c2b82d7706 (patch)
tree719f07ba8567f25333b292519b8d6864c40b09e7 /tools/perf/util/evlist.c
parent2b4383470675c85add72725cc08840d36b409276 (diff)
perf evlist: Update mmap related APIs and helpers
Currently, the evlist mmap related helpers and APIs accept evlist and idx, and dereference 'struct perf_mmap' by evlist->mmap[idx]. This is unnecessary, and force each evlist contains only one mmap array. Following commits are going to introduce multiple mmap arrays to a evlist. This patch refators these APIs and helpers, introduces functions accept perf_mmap pointer directly. New helpers and APIs are decoupled with perf_evlist, and become perf_mmap functions (so they have perf_mmap prefix). Old functions are reimplemented with new functions. Some of them will be removed in following commits. Signed-off-by: Wang Nan <wangnan0@huawei.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: He Kuang <hekuang@huawei.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nilay Vaish <nilayvaish@gmail.com> Cc: Zefan Li <lizefan@huawei.com> Cc: pi3orama@163.com Link: http://lkml.kernel.org/r/1468485287-33422-4-git-send-email-wangnan0@huawei.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c139
1 files changed, 96 insertions, 43 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6803f5ccd15e..a4137e02eab8 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -29,6 +29,7 @@
29 29
30static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); 30static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
31static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); 31static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
32static void perf_mmap__munmap(struct perf_mmap *map);
32 33
33#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 34#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
34#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 35#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
@@ -781,9 +782,8 @@ broken_event:
781 return event; 782 return event;
782} 783}
783 784
784union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx) 785union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
785{ 786{
786 struct perf_mmap *md = &evlist->mmap[idx];
787 u64 head; 787 u64 head;
788 u64 old = md->prev; 788 u64 old = md->prev;
789 789
@@ -795,13 +795,12 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int
795 795
796 head = perf_mmap__read_head(md); 796 head = perf_mmap__read_head(md);
797 797
798 return perf_mmap__read(md, evlist->overwrite, old, head, &md->prev); 798 return perf_mmap__read(md, check_messup, old, head, &md->prev);
799} 799}
800 800
801union perf_event * 801union perf_event *
802perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx) 802perf_mmap__read_backward(struct perf_mmap *md)
803{ 803{
804 struct perf_mmap *md = &evlist->mmap[idx];
805 u64 head, end; 804 u64 head, end;
806 u64 start = md->prev; 805 u64 start = md->prev;
807 806
@@ -836,6 +835,31 @@ perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
836 return perf_mmap__read(md, false, start, end, &md->prev); 835 return perf_mmap__read(md, false, start, end, &md->prev);
837} 836}
838 837
838union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
839{
840 struct perf_mmap *md = &evlist->mmap[idx];
841
842 /*
843 * Check messup is required for forward overwritable ring buffer:
844 * memory pointed by md->prev can be overwritten in this case.
845 * No need for read-write ring buffer: kernel stop outputting when
846 * it hit md->prev (perf_mmap__consume()).
847 */
848 return perf_mmap__read_forward(md, evlist->overwrite);
849}
850
851union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
852{
853 struct perf_mmap *md = &evlist->mmap[idx];
854
855 /*
856 * No need to check messup for backward ring buffer:
857 * We can always read arbitrary long data from a backward
858 * ring buffer unless we forget to pause it before reading.
859 */
860 return perf_mmap__read_backward(md);
861}
862
839union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 863union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
840{ 864{
841 if (!evlist->backward) 865 if (!evlist->backward)
@@ -843,9 +867,8 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
843 return perf_evlist__mmap_read_backward(evlist, idx); 867 return perf_evlist__mmap_read_backward(evlist, idx);
844} 868}
845 869
846void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) 870void perf_mmap__read_catchup(struct perf_mmap *md)
847{ 871{
848 struct perf_mmap *md = &evlist->mmap[idx];
849 u64 head; 872 u64 head;
850 873
851 if (!atomic_read(&md->refcnt)) 874 if (!atomic_read(&md->refcnt))
@@ -855,38 +878,54 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
855 md->prev = head; 878 md->prev = head;
856} 879}
857 880
881void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
882{
883 perf_mmap__read_catchup(&evlist->mmap[idx]);
884}
885
858static bool perf_mmap__empty(struct perf_mmap *md) 886static bool perf_mmap__empty(struct perf_mmap *md)
859{ 887{
860 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; 888 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
861} 889}
862 890
863static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) 891static void perf_mmap__get(struct perf_mmap *map)
864{ 892{
865 atomic_inc(&evlist->mmap[idx].refcnt); 893 atomic_inc(&map->refcnt);
866} 894}
867 895
868static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) 896static void perf_mmap__put(struct perf_mmap *md)
869{ 897{
870 struct perf_mmap *md = &evlist->mmap[idx];
871
872 BUG_ON(md->base && atomic_read(&md->refcnt) == 0); 898 BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
873 899
874 if (atomic_dec_and_test(&md->refcnt)) 900 if (atomic_dec_and_test(&md->refcnt))
875 __perf_evlist__munmap(evlist, idx); 901 perf_mmap__munmap(md);
876} 902}
877 903
878void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 904static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
879{ 905{
880 struct perf_mmap *md = &evlist->mmap[idx]; 906 perf_mmap__get(&evlist->mmap[idx]);
907}
908
909static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
910{
911 perf_mmap__put(&evlist->mmap[idx]);
912}
881 913
882 if (!evlist->overwrite) { 914void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
915{
916 if (!overwrite) {
883 u64 old = md->prev; 917 u64 old = md->prev;
884 918
885 perf_mmap__write_tail(md, old); 919 perf_mmap__write_tail(md, old);
886 } 920 }
887 921
888 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) 922 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
889 perf_evlist__mmap_put(evlist, idx); 923 perf_mmap__put(md);
924}
925
926void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
927{
928 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
890} 929}
891 930
892int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, 931int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
@@ -917,15 +956,20 @@ void __weak auxtrace_mmap_params__set_idx(
917{ 956{
918} 957}
919 958
920static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 959static void perf_mmap__munmap(struct perf_mmap *map)
921{ 960{
922 if (evlist->mmap[idx].base != NULL) { 961 if (map->base != NULL) {
923 munmap(evlist->mmap[idx].base, evlist->mmap_len); 962 munmap(map->base, perf_mmap__mmap_len(map));
924 evlist->mmap[idx].base = NULL; 963 map->base = NULL;
925 evlist->mmap[idx].fd = -1; 964 map->fd = -1;
926 atomic_set(&evlist->mmap[idx].refcnt, 0); 965 atomic_set(&map->refcnt, 0);
927 } 966 }
928 auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap); 967 auxtrace_mmap__munmap(&map->auxtrace_mmap);
968}
969
970static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
971{
972 perf_mmap__munmap(&evlist->mmap[idx]);
929} 973}
930 974
931void perf_evlist__munmap(struct perf_evlist *evlist) 975void perf_evlist__munmap(struct perf_evlist *evlist)
@@ -941,20 +985,21 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
941 zfree(&evlist->mmap); 985 zfree(&evlist->mmap);
942} 986}
943 987
944static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 988static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
945{ 989{
946 int i; 990 int i;
991 struct perf_mmap *map;
947 992
948 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 993 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
949 if (cpu_map__empty(evlist->cpus)) 994 if (cpu_map__empty(evlist->cpus))
950 evlist->nr_mmaps = thread_map__nr(evlist->threads); 995 evlist->nr_mmaps = thread_map__nr(evlist->threads);
951 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 996 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
952 if (!evlist->mmap) 997 if (!map)
953 return -ENOMEM; 998 return NULL;
954 999
955 for (i = 0; i < evlist->nr_mmaps; i++) 1000 for (i = 0; i < evlist->nr_mmaps; i++)
956 evlist->mmap[i].fd = -1; 1001 map[i].fd = -1;
957 return 0; 1002 return map;
958} 1003}
959 1004
960struct mmap_params { 1005struct mmap_params {
@@ -963,8 +1008,8 @@ struct mmap_params {
963 struct auxtrace_mmap_params auxtrace_mp; 1008 struct auxtrace_mmap_params auxtrace_mp;
964}; 1009};
965 1010
966static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, 1011static int perf_mmap__mmap(struct perf_mmap *map,
967 struct mmap_params *mp, int fd) 1012 struct mmap_params *mp, int fd)
968{ 1013{
969 /* 1014 /*
970 * The last one will be done at perf_evlist__mmap_consume(), so that we 1015 * The last one will be done at perf_evlist__mmap_consume(), so that we
@@ -979,26 +1024,32 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
979 * evlist layer can't just drop it when filtering events in 1024 * evlist layer can't just drop it when filtering events in
980 * perf_evlist__filter_pollfd(). 1025 * perf_evlist__filter_pollfd().
981 */ 1026 */
982 atomic_set(&evlist->mmap[idx].refcnt, 2); 1027 atomic_set(&map->refcnt, 2);
983 evlist->mmap[idx].prev = 0; 1028 map->prev = 0;
984 evlist->mmap[idx].mask = mp->mask; 1029 map->mask = mp->mask;
985 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, 1030 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
986 MAP_SHARED, fd, 0); 1031 MAP_SHARED, fd, 0);
987 if (evlist->mmap[idx].base == MAP_FAILED) { 1032 if (map->base == MAP_FAILED) {
988 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 1033 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
989 errno); 1034 errno);
990 evlist->mmap[idx].base = NULL; 1035 map->base = NULL;
991 return -1; 1036 return -1;
992 } 1037 }
993 evlist->mmap[idx].fd = fd; 1038 map->fd = fd;
994 1039
995 if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap, 1040 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
996 &mp->auxtrace_mp, evlist->mmap[idx].base, fd)) 1041 &mp->auxtrace_mp, map->base, fd))
997 return -1; 1042 return -1;
998 1043
999 return 0; 1044 return 0;
1000} 1045}
1001 1046
1047static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
1048 struct mmap_params *mp, int fd)
1049{
1050 return perf_mmap__mmap(&evlist->mmap[idx], mp, fd);
1051}
1052
1002static bool 1053static bool
1003perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, 1054perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1004 struct perf_evsel *evsel) 1055 struct perf_evsel *evsel)
@@ -1248,7 +1299,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1248 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), 1299 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1249 }; 1300 };
1250 1301
1251 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 1302 if (!evlist->mmap)
1303 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1304 if (!evlist->mmap)
1252 return -ENOMEM; 1305 return -ENOMEM;
1253 1306
1254 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 1307 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)