summaryrefslogtreecommitdiffstats
path: root/tools/bpf
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2018-05-10 13:24:40 -0400
committerDaniel Borkmann <daniel@iogearbox.net>2018-05-10 19:40:52 -0400
commitd0cabbb021bee5c4b831a0235af9534ad07f8d3d (patch)
tree7843337206d80588110d9112d49836624b09be64 /tools/bpf
parent5f9380572b4bb24f60cd492b17331db6ee34a516 (diff)
tools: bpf: move the event reading loop to libbpf
There are two copies of event reading loop - in bpftool and trace_helpers "library". Consolidate them and move the code to libbpf. Return codes from trace_helpers are kept, but renamed to include LIBBPF prefix. Suggested-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'tools/bpf')
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c66
1 files changed, 15 insertions, 51 deletions
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index 9ae4bb8a2cad..1832100d1b27 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -50,14 +50,15 @@ static void int_exit(int signo)
50 stop = true; 50 stop = true;
51} 51}
52 52
53static void 53static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv)
54print_bpf_output(struct event_ring_info *ring, struct perf_event_sample *e)
55{ 54{
55 struct event_ring_info *ring = priv;
56 struct perf_event_sample *e = event;
56 struct { 57 struct {
57 struct perf_event_header header; 58 struct perf_event_header header;
58 __u64 id; 59 __u64 id;
59 __u64 lost; 60 __u64 lost;
60 } *lost = (void *)e; 61 } *lost = event;
61 62
62 if (json_output) { 63 if (json_output) {
63 jsonw_start_object(json_wtr); 64 jsonw_start_object(json_wtr);
@@ -96,60 +97,23 @@ print_bpf_output(struct event_ring_info *ring, struct perf_event_sample *e)
96 e->header.type, e->header.size); 97 e->header.type, e->header.size);
97 } 98 }
98 } 99 }
100
101 return LIBBPF_PERF_EVENT_CONT;
99} 102}
100 103
101static void 104static void
102perf_event_read(struct event_ring_info *ring, void **buf, size_t *buf_len) 105perf_event_read(struct event_ring_info *ring, void **buf, size_t *buf_len)
103{ 106{
104 volatile struct perf_event_mmap_page *header = ring->mem; 107 enum bpf_perf_event_ret ret;
105 __u64 buffer_size = MMAP_PAGE_CNT * get_page_size(); 108
106 __u64 data_tail = header->data_tail; 109 ret = bpf_perf_event_read_simple(ring->mem,
107 __u64 data_head = header->data_head; 110 MMAP_PAGE_CNT * get_page_size(),
108 void *base, *begin, *end; 111 get_page_size(), buf, buf_len,
109 112 print_bpf_output, ring);
110 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ 113 if (ret != LIBBPF_PERF_EVENT_CONT) {
111 if (data_head == data_tail) 114 fprintf(stderr, "perf read loop failed with %d\n", ret);
112 return; 115 stop = true;
113
114 base = ((char *)header) + get_page_size();
115
116 begin = base + data_tail % buffer_size;
117 end = base + data_head % buffer_size;
118
119 while (begin != end) {
120 struct perf_event_sample *e;
121
122 e = begin;
123 if (begin + e->header.size > base + buffer_size) {
124 long len = base + buffer_size - begin;
125
126 if (*buf_len < e->header.size) {
127 free(*buf);
128 *buf = malloc(e->header.size);
129 if (!*buf) {
130 fprintf(stderr,
131 "can't allocate memory");
132 stop = true;
133 return;
134 }
135 *buf_len = e->header.size;
136 }
137
138 memcpy(*buf, begin, len);
139 memcpy(*buf + len, base, e->header.size - len);
140 e = (void *)*buf;
141 begin = base + e->header.size - len;
142 } else if (begin + e->header.size == base + buffer_size) {
143 begin = base;
144 } else {
145 begin += e->header.size;
146 }
147
148 print_bpf_output(ring, e);
149 } 116 }
150
151 __sync_synchronize(); /* smp_mb() */
152 header->data_tail = data_head;
153} 117}
154 118
155static int perf_mmap_size(void) 119static int perf_mmap_size(void)