aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2011-01-14 12:50:51 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2011-01-22 16:56:30 -0500
commit0a27d7f9f417c0305f7efa70631764a53c7af219 (patch)
tree60d126b9732e93596eaa4a923de785b351af5de7
parent70db7533caef02350ec8d6852e589491bca3a951 (diff)
perf record: Use perf_evlist__mmap
There is more stuff that can go to the perf_ev{sel,list} layer, like detecting if sample_id_all is available, etc, but lets try using this in 'perf test' first. Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/builtin-record.c59
1 files changed, 11 insertions, 48 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 109f3b269ac5..45a3689f9ed6 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -30,6 +30,7 @@
30#include <sys/mman.h> 30#include <sys/mman.h>
31 31
32#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 32#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
33#define SID(e, x, y) xyarray__entry(e->id, x, y)
33 34
34enum write_mode_t { 35enum write_mode_t {
35 WRITE_FORCE, 36 WRITE_FORCE,
@@ -78,8 +79,6 @@ static off_t post_processing_offset;
78static struct perf_session *session; 79static struct perf_session *session;
79static const char *cpu_list; 80static const char *cpu_list;
80 81
81static struct perf_mmap mmap_array[MAX_NR_CPUS];
82
83static void advance_output(size_t size) 82static void advance_output(size_t size)
84{ 83{
85 bytes_written += size; 84 bytes_written += size;
@@ -196,20 +195,14 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
196 return h_attr; 195 return h_attr;
197} 196}
198 197
199static void create_counter(struct perf_evlist *evlist, 198static void create_counter(struct perf_evsel *evsel, int cpu)
200 struct perf_evsel *evsel, int cpu)
201{ 199{
202 char *filter = evsel->filter; 200 char *filter = evsel->filter;
203 struct perf_event_attr *attr = &evsel->attr; 201 struct perf_event_attr *attr = &evsel->attr;
204 struct perf_header_attr *h_attr; 202 struct perf_header_attr *h_attr;
203 struct perf_sample_id *sid;
205 int thread_index; 204 int thread_index;
206 int ret; 205 int ret;
207 struct {
208 u64 count;
209 u64 time_enabled;
210 u64 time_running;
211 u64 id;
212 } read_data;
213 206
214 for (thread_index = 0; thread_index < threads->nr; thread_index++) { 207 for (thread_index = 0; thread_index < threads->nr; thread_index++) {
215 h_attr = get_header_attr(attr, evsel->idx); 208 h_attr = get_header_attr(attr, evsel->idx);
@@ -223,45 +216,12 @@ static void create_counter(struct perf_evlist *evlist,
223 } 216 }
224 } 217 }
225 218
226 if (read(FD(evsel, cpu, thread_index), &read_data, sizeof(read_data)) == -1) { 219 sid = SID(evsel, cpu, thread_index);
227 perror("Unable to read perf file descriptor"); 220 if (perf_header_attr__add_id(h_attr, sid->id) < 0) {
228 exit(-1);
229 }
230
231 if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
232 pr_warning("Not enough memory to add id\n"); 221 pr_warning("Not enough memory to add id\n");
233 exit(-1); 222 exit(-1);
234 } 223 }
235 224
236 assert(FD(evsel, cpu, thread_index) >= 0);
237 fcntl(FD(evsel, cpu, thread_index), F_SETFL, O_NONBLOCK);
238
239 if (evsel->idx || thread_index) {
240 struct perf_evsel *first;
241 first = list_entry(evlist->entries.next, struct perf_evsel, node);
242 ret = ioctl(FD(evsel, cpu, thread_index),
243 PERF_EVENT_IOC_SET_OUTPUT,
244 FD(first, cpu, 0));
245 if (ret) {
246 error("failed to set output: %d (%s)\n", errno,
247 strerror(errno));
248 exit(-1);
249 }
250 } else {
251 mmap_array[cpu].prev = 0;
252 mmap_array[cpu].mask = mmap_pages*page_size - 1;
253 mmap_array[cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
254 PROT_READ | PROT_WRITE, MAP_SHARED, FD(evsel, cpu, thread_index), 0);
255 if (mmap_array[cpu].base == MAP_FAILED) {
256 error("failed to mmap with %d (%s)\n", errno, strerror(errno));
257 exit(-1);
258 }
259
260 evlist->pollfd[evlist->nr_fds].fd = FD(evsel, cpu, thread_index);
261 evlist->pollfd[evlist->nr_fds].events = POLLIN;
262 evlist->nr_fds++;
263 }
264
265 if (filter != NULL) { 225 if (filter != NULL) {
266 ret = ioctl(FD(evsel, cpu, thread_index), 226 ret = ioctl(FD(evsel, cpu, thread_index),
267 PERF_EVENT_IOC_SET_FILTER, filter); 227 PERF_EVENT_IOC_SET_FILTER, filter);
@@ -423,9 +383,12 @@ try_again:
423 } 383 }
424 } 384 }
425 385
386 if (perf_evlist__mmap(evlist, cpus, threads, mmap_pages, false) < 0)
387 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
388
426 for (cpu = 0; cpu < cpus->nr; ++cpu) { 389 for (cpu = 0; cpu < cpus->nr; ++cpu) {
427 list_for_each_entry(pos, &evlist->entries, node) 390 list_for_each_entry(pos, &evlist->entries, node)
428 create_counter(evlist, pos, cpu); 391 create_counter(pos, cpu);
429 } 392 }
430} 393}
431 394
@@ -502,8 +465,8 @@ static void mmap_read_all(void)
502 int i; 465 int i;
503 466
504 for (i = 0; i < cpus->nr; i++) { 467 for (i = 0; i < cpus->nr; i++) {
505 if (mmap_array[i].base) 468 if (evsel_list->mmap[i].base)
506 mmap_read(&mmap_array[i]); 469 mmap_read(&evsel_list->mmap[i]);
507 } 470 }
508 471
509 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO)) 472 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))