aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-record.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-record.c')
-rw-r--r--tools/perf/builtin-record.c383
1 files changed, 215 insertions, 168 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ba6777a147c..4642d38b8d1 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -35,43 +35,36 @@ enum write_mode_t {
35 WRITE_APPEND 35 WRITE_APPEND
36}; 36};
37 37
38struct perf_record_opts record_opts = { 38struct perf_record {
39 .target_pid = -1, 39 struct perf_event_ops ops;
40 .target_tid = -1, 40 struct perf_record_opts opts;
41 .mmap_pages = UINT_MAX, 41 u64 bytes_written;
42 .user_freq = UINT_MAX, 42 const char *output_name;
43 .user_interval = ULLONG_MAX, 43 struct perf_evlist *evlist;
44 .freq = 1000, 44 struct perf_session *session;
45 .sample_id_all_avail = true, 45 const char *progname;
46 int output;
47 unsigned int page_size;
48 int realtime_prio;
49 enum write_mode_t write_mode;
50 bool no_buildid;
51 bool no_buildid_cache;
52 bool force;
53 bool file_new;
54 bool append_file;
55 long samples;
56 off_t post_processing_offset;
46}; 57};
47 58
48static unsigned int page_size; 59static void advance_output(struct perf_record *rec, size_t size)
49static int output;
50static const char *output_name = NULL;
51static int realtime_prio = 0;
52static enum write_mode_t write_mode = WRITE_FORCE;
53static bool no_buildid = false;
54static bool no_buildid_cache = false;
55static struct perf_evlist *evsel_list;
56
57static long samples = 0;
58static u64 bytes_written = 0;
59
60static int file_new = 1;
61static off_t post_processing_offset;
62
63static struct perf_session *session;
64static const char *progname;
65
66static void advance_output(size_t size)
67{ 60{
68 bytes_written += size; 61 rec->bytes_written += size;
69} 62}
70 63
71static void write_output(void *buf, size_t size) 64static void write_output(struct perf_record *rec, void *buf, size_t size)
72{ 65{
73 while (size) { 66 while (size) {
74 int ret = write(output, buf, size); 67 int ret = write(rec->output, buf, size);
75 68
76 if (ret < 0) 69 if (ret < 0)
77 die("failed to write"); 70 die("failed to write");
@@ -79,30 +72,33 @@ static void write_output(void *buf, size_t size)
79 size -= ret; 72 size -= ret;
80 buf += ret; 73 buf += ret;
81 74
82 bytes_written += ret; 75 rec->bytes_written += ret;
83 } 76 }
84} 77}
85 78
86static int process_synthesized_event(union perf_event *event, 79static int process_synthesized_event(struct perf_event_ops *ops,
80 union perf_event *event,
87 struct perf_sample *sample __used, 81 struct perf_sample *sample __used,
88 struct perf_session *self __used) 82 struct perf_session *self __used)
89{ 83{
90 write_output(event, event->header.size); 84 struct perf_record *rec = container_of(ops, struct perf_record, ops);
85 write_output(rec, event, event->header.size);
91 return 0; 86 return 0;
92} 87}
93 88
94static void mmap_read(struct perf_mmap *md) 89static void perf_record__mmap_read(struct perf_record *rec,
90 struct perf_mmap *md)
95{ 91{
96 unsigned int head = perf_mmap__read_head(md); 92 unsigned int head = perf_mmap__read_head(md);
97 unsigned int old = md->prev; 93 unsigned int old = md->prev;
98 unsigned char *data = md->base + page_size; 94 unsigned char *data = md->base + rec->page_size;
99 unsigned long size; 95 unsigned long size;
100 void *buf; 96 void *buf;
101 97
102 if (old == head) 98 if (old == head)
103 return; 99 return;
104 100
105 samples++; 101 rec->samples++;
106 102
107 size = head - old; 103 size = head - old;
108 104
@@ -111,14 +107,14 @@ static void mmap_read(struct perf_mmap *md)
111 size = md->mask + 1 - (old & md->mask); 107 size = md->mask + 1 - (old & md->mask);
112 old += size; 108 old += size;
113 109
114 write_output(buf, size); 110 write_output(rec, buf, size);
115 } 111 }
116 112
117 buf = &data[old & md->mask]; 113 buf = &data[old & md->mask];
118 size = head - old; 114 size = head - old;
119 old += size; 115 old += size;
120 116
121 write_output(buf, size); 117 write_output(rec, buf, size);
122 118
123 md->prev = old; 119 md->prev = old;
124 perf_mmap__write_tail(md, old); 120 perf_mmap__write_tail(md, old);
@@ -137,17 +133,18 @@ static void sig_handler(int sig)
137 signr = sig; 133 signr = sig;
138} 134}
139 135
140static void sig_atexit(void) 136static void perf_record__sig_exit(int exit_status __used, void *arg)
141{ 137{
138 struct perf_record *rec = arg;
142 int status; 139 int status;
143 140
144 if (evsel_list->workload.pid > 0) { 141 if (rec->evlist->workload.pid > 0) {
145 if (!child_finished) 142 if (!child_finished)
146 kill(evsel_list->workload.pid, SIGTERM); 143 kill(rec->evlist->workload.pid, SIGTERM);
147 144
148 wait(&status); 145 wait(&status);
149 if (WIFSIGNALED(status)) 146 if (WIFSIGNALED(status))
150 psignal(WTERMSIG(status), progname); 147 psignal(WTERMSIG(status), rec->progname);
151 } 148 }
152 149
153 if (signr == -1 || signr == SIGUSR1) 150 if (signr == -1 || signr == SIGUSR1)
@@ -176,13 +173,16 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
176 return true; 173 return true;
177} 174}
178 175
179static void open_counters(struct perf_evlist *evlist) 176static void perf_record__open(struct perf_record *rec)
180{ 177{
181 struct perf_evsel *pos, *first; 178 struct perf_evsel *pos, *first;
179 struct perf_evlist *evlist = rec->evlist;
180 struct perf_session *session = rec->session;
181 struct perf_record_opts *opts = &rec->opts;
182 182
183 first = list_entry(evlist->entries.next, struct perf_evsel, node); 183 first = list_entry(evlist->entries.next, struct perf_evsel, node);
184 184
185 perf_evlist__config_attrs(evlist, &record_opts); 185 perf_evlist__config_attrs(evlist, opts);
186 186
187 list_for_each_entry(pos, &evlist->entries, node) { 187 list_for_each_entry(pos, &evlist->entries, node) {
188 struct perf_event_attr *attr = &pos->attr; 188 struct perf_event_attr *attr = &pos->attr;
@@ -201,27 +201,27 @@ static void open_counters(struct perf_evlist *evlist)
201 */ 201 */
202 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; 202 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
203 203
204 if (record_opts.group && pos != first) 204 if (opts->group && pos != first)
205 group_fd = first->fd; 205 group_fd = first->fd;
206retry_sample_id: 206retry_sample_id:
207 attr->sample_id_all = record_opts.sample_id_all_avail ? 1 : 0; 207 attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
208try_again: 208try_again:
209 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, 209 if (perf_evsel__open(pos, evlist->cpus, evlist->threads,
210 record_opts.group, group_fd) < 0) { 210 opts->group, group_fd) < 0) {
211 int err = errno; 211 int err = errno;
212 212
213 if (err == EPERM || err == EACCES) { 213 if (err == EPERM || err == EACCES) {
214 ui__error_paranoid(); 214 ui__error_paranoid();
215 exit(EXIT_FAILURE); 215 exit(EXIT_FAILURE);
216 } else if (err == ENODEV && record_opts.cpu_list) { 216 } else if (err == ENODEV && opts->cpu_list) {
217 die("No such device - did you specify" 217 die("No such device - did you specify"
218 " an out-of-range profile CPU?\n"); 218 " an out-of-range profile CPU?\n");
219 } else if (err == EINVAL && record_opts.sample_id_all_avail) { 219 } else if (err == EINVAL && opts->sample_id_all_avail) {
220 /* 220 /*
221 * Old kernel, no attr->sample_id_type_all field 221 * Old kernel, no attr->sample_id_type_all field
222 */ 222 */
223 record_opts.sample_id_all_avail = false; 223 opts->sample_id_all_avail = false;
224 if (!record_opts.sample_time && !record_opts.raw_samples && !time_needed) 224 if (!opts->sample_time && !opts->raw_samples && !time_needed)
225 attr->sample_type &= ~PERF_SAMPLE_TIME; 225 attr->sample_type &= ~PERF_SAMPLE_TIME;
226 226
227 goto retry_sample_id; 227 goto retry_sample_id;
@@ -271,10 +271,10 @@ try_again:
271 exit(-1); 271 exit(-1);
272 } 272 }
273 273
274 if (perf_evlist__mmap(evlist, record_opts.mmap_pages, false) < 0) 274 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0)
275 die("failed to mmap with %d (%s)\n", errno, strerror(errno)); 275 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
276 276
277 if (file_new) 277 if (rec->file_new)
278 session->evlist = evlist; 278 session->evlist = evlist;
279 else { 279 else {
280 if (!perf_evlist__equal(session->evlist, evlist)) { 280 if (!perf_evlist__equal(session->evlist, evlist)) {
@@ -286,29 +286,32 @@ try_again:
286 perf_session__update_sample_type(session); 286 perf_session__update_sample_type(session);
287} 287}
288 288
289static int process_buildids(void) 289static int process_buildids(struct perf_record *rec)
290{ 290{
291 u64 size = lseek(output, 0, SEEK_CUR); 291 u64 size = lseek(rec->output, 0, SEEK_CUR);
292 292
293 if (size == 0) 293 if (size == 0)
294 return 0; 294 return 0;
295 295
296 session->fd = output; 296 rec->session->fd = rec->output;
297 return __perf_session__process_events(session, post_processing_offset, 297 return __perf_session__process_events(rec->session, rec->post_processing_offset,
298 size - post_processing_offset, 298 size - rec->post_processing_offset,
299 size, &build_id__mark_dso_hit_ops); 299 size, &build_id__mark_dso_hit_ops);
300} 300}
301 301
302static void atexit_header(void) 302static void perf_record__exit(int status __used, void *arg)
303{ 303{
304 if (!record_opts.pipe_output) { 304 struct perf_record *rec = arg;
305 session->header.data_size += bytes_written; 305
306 306 if (!rec->opts.pipe_output) {
307 if (!no_buildid) 307 rec->session->header.data_size += rec->bytes_written;
308 process_buildids(); 308
309 perf_session__write_header(session, evsel_list, output, true); 309 if (!rec->no_buildid)
310 perf_session__delete(session); 310 process_buildids(rec);
311 perf_evlist__delete(evsel_list); 311 perf_session__write_header(rec->session, rec->evlist,
312 rec->output, true);
313 perf_session__delete(rec->session);
314 perf_evlist__delete(rec->evlist);
312 symbol__exit(); 315 symbol__exit();
313 } 316 }
314} 317}
@@ -316,7 +319,9 @@ static void atexit_header(void)
316static void perf_event__synthesize_guest_os(struct machine *machine, void *data) 319static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
317{ 320{
318 int err; 321 int err;
319 struct perf_session *psession = data; 322 struct perf_event_ops *ops = data;
323 struct perf_record *rec = container_of(ops, struct perf_record, ops);
324 struct perf_session *psession = rec->session;
320 325
321 if (machine__is_host(machine)) 326 if (machine__is_host(machine))
322 return; 327 return;
@@ -329,7 +334,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
329 *method is used to avoid symbol missing when the first addr is 334 *method is used to avoid symbol missing when the first addr is
330 *in module instead of in guest kernel. 335 *in module instead of in guest kernel.
331 */ 336 */
332 err = perf_event__synthesize_modules(process_synthesized_event, 337 err = perf_event__synthesize_modules(ops, process_synthesized_event,
333 psession, machine); 338 psession, machine);
334 if (err < 0) 339 if (err < 0)
335 pr_err("Couldn't record guest kernel [%d]'s reference" 340 pr_err("Couldn't record guest kernel [%d]'s reference"
@@ -339,10 +344,10 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
339 * We use _stext for guest kernel because guest kernel's /proc/kallsyms 344 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
340 * have no _text sometimes. 345 * have no _text sometimes.
341 */ 346 */
342 err = perf_event__synthesize_kernel_mmap(process_synthesized_event, 347 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
343 psession, machine, "_text"); 348 psession, machine, "_text");
344 if (err < 0) 349 if (err < 0)
345 err = perf_event__synthesize_kernel_mmap(process_synthesized_event, 350 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
346 psession, machine, 351 psession, machine,
347 "_stext"); 352 "_stext");
348 if (err < 0) 353 if (err < 0)
@@ -355,66 +360,71 @@ static struct perf_event_header finished_round_event = {
355 .type = PERF_RECORD_FINISHED_ROUND, 360 .type = PERF_RECORD_FINISHED_ROUND,
356}; 361};
357 362
358static void mmap_read_all(void) 363static void perf_record__mmap_read_all(struct perf_record *rec)
359{ 364{
360 int i; 365 int i;
361 366
362 for (i = 0; i < evsel_list->nr_mmaps; i++) { 367 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
363 if (evsel_list->mmap[i].base) 368 if (rec->evlist->mmap[i].base)
364 mmap_read(&evsel_list->mmap[i]); 369 perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
365 } 370 }
366 371
367 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO)) 372 if (perf_header__has_feat(&rec->session->header, HEADER_TRACE_INFO))
368 write_output(&finished_round_event, sizeof(finished_round_event)); 373 write_output(rec, &finished_round_event, sizeof(finished_round_event));
369} 374}
370 375
371static int __cmd_record(int argc, const char **argv) 376static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
372{ 377{
373 struct stat st; 378 struct stat st;
374 int flags; 379 int flags;
375 int err; 380 int err, output;
376 unsigned long waking = 0; 381 unsigned long waking = 0;
377 const bool forks = argc > 0; 382 const bool forks = argc > 0;
378 struct machine *machine; 383 struct machine *machine;
384 struct perf_event_ops *ops = &rec->ops;
385 struct perf_record_opts *opts = &rec->opts;
386 struct perf_evlist *evsel_list = rec->evlist;
387 const char *output_name = rec->output_name;
388 struct perf_session *session;
379 389
380 progname = argv[0]; 390 rec->progname = argv[0];
381 391
382 page_size = sysconf(_SC_PAGE_SIZE); 392 rec->page_size = sysconf(_SC_PAGE_SIZE);
383 393
384 atexit(sig_atexit); 394 on_exit(perf_record__sig_exit, rec);
385 signal(SIGCHLD, sig_handler); 395 signal(SIGCHLD, sig_handler);
386 signal(SIGINT, sig_handler); 396 signal(SIGINT, sig_handler);
387 signal(SIGUSR1, sig_handler); 397 signal(SIGUSR1, sig_handler);
388 398
389 if (!output_name) { 399 if (!output_name) {
390 if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode)) 400 if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
391 record_opts.pipe_output = true; 401 opts->pipe_output = true;
392 else 402 else
393 output_name = "perf.data"; 403 rec->output_name = output_name = "perf.data";
394 } 404 }
395 if (output_name) { 405 if (output_name) {
396 if (!strcmp(output_name, "-")) 406 if (!strcmp(output_name, "-"))
397 record_opts.pipe_output = true; 407 opts->pipe_output = true;
398 else if (!stat(output_name, &st) && st.st_size) { 408 else if (!stat(output_name, &st) && st.st_size) {
399 if (write_mode == WRITE_FORCE) { 409 if (rec->write_mode == WRITE_FORCE) {
400 char oldname[PATH_MAX]; 410 char oldname[PATH_MAX];
401 snprintf(oldname, sizeof(oldname), "%s.old", 411 snprintf(oldname, sizeof(oldname), "%s.old",
402 output_name); 412 output_name);
403 unlink(oldname); 413 unlink(oldname);
404 rename(output_name, oldname); 414 rename(output_name, oldname);
405 } 415 }
406 } else if (write_mode == WRITE_APPEND) { 416 } else if (rec->write_mode == WRITE_APPEND) {
407 write_mode = WRITE_FORCE; 417 rec->write_mode = WRITE_FORCE;
408 } 418 }
409 } 419 }
410 420
411 flags = O_CREAT|O_RDWR; 421 flags = O_CREAT|O_RDWR;
412 if (write_mode == WRITE_APPEND) 422 if (rec->write_mode == WRITE_APPEND)
413 file_new = 0; 423 rec->file_new = 0;
414 else 424 else
415 flags |= O_TRUNC; 425 flags |= O_TRUNC;
416 426
417 if (record_opts.pipe_output) 427 if (opts->pipe_output)
418 output = STDOUT_FILENO; 428 output = STDOUT_FILENO;
419 else 429 else
420 output = open(output_name, flags, S_IRUSR | S_IWUSR); 430 output = open(output_name, flags, S_IRUSR | S_IWUSR);
@@ -423,17 +433,21 @@ static int __cmd_record(int argc, const char **argv)
423 exit(-1); 433 exit(-1);
424 } 434 }
425 435
436 rec->output = output;
437
426 session = perf_session__new(output_name, O_WRONLY, 438 session = perf_session__new(output_name, O_WRONLY,
427 write_mode == WRITE_FORCE, false, NULL); 439 rec->write_mode == WRITE_FORCE, false, NULL);
428 if (session == NULL) { 440 if (session == NULL) {
429 pr_err("Not enough memory for reading perf file header\n"); 441 pr_err("Not enough memory for reading perf file header\n");
430 return -1; 442 return -1;
431 } 443 }
432 444
433 if (!no_buildid) 445 rec->session = session;
446
447 if (!rec->no_buildid)
434 perf_header__set_feat(&session->header, HEADER_BUILD_ID); 448 perf_header__set_feat(&session->header, HEADER_BUILD_ID);
435 449
436 if (!file_new) { 450 if (!rec->file_new) {
437 err = perf_session__read_header(session, output); 451 err = perf_session__read_header(session, output);
438 if (err < 0) 452 if (err < 0)
439 goto out_delete_session; 453 goto out_delete_session;
@@ -456,42 +470,42 @@ static int __cmd_record(int argc, const char **argv)
456 perf_header__set_feat(&session->header, HEADER_CPUID); 470 perf_header__set_feat(&session->header, HEADER_CPUID);
457 471
458 if (forks) { 472 if (forks) {
459 err = perf_evlist__prepare_workload(evsel_list, &record_opts, argv); 473 err = perf_evlist__prepare_workload(evsel_list, opts, argv);
460 if (err < 0) { 474 if (err < 0) {
461 pr_err("Couldn't run the workload!\n"); 475 pr_err("Couldn't run the workload!\n");
462 goto out_delete_session; 476 goto out_delete_session;
463 } 477 }
464 } 478 }
465 479
466 open_counters(evsel_list); 480 perf_record__open(rec);
467 481
468 /* 482 /*
469 * perf_session__delete(session) will be called at atexit_header() 483 * perf_session__delete(session) will be called at perf_record__exit()
470 */ 484 */
471 atexit(atexit_header); 485 on_exit(perf_record__exit, rec);
472 486
473 if (record_opts.pipe_output) { 487 if (opts->pipe_output) {
474 err = perf_header__write_pipe(output); 488 err = perf_header__write_pipe(output);
475 if (err < 0) 489 if (err < 0)
476 return err; 490 return err;
477 } else if (file_new) { 491 } else if (rec->file_new) {
478 err = perf_session__write_header(session, evsel_list, 492 err = perf_session__write_header(session, evsel_list,
479 output, false); 493 output, false);
480 if (err < 0) 494 if (err < 0)
481 return err; 495 return err;
482 } 496 }
483 497
484 post_processing_offset = lseek(output, 0, SEEK_CUR); 498 rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
485 499
486 if (record_opts.pipe_output) { 500 if (opts->pipe_output) {
487 err = perf_session__synthesize_attrs(session, 501 err = perf_event__synthesize_attrs(ops, session,
488 process_synthesized_event); 502 process_synthesized_event);
489 if (err < 0) { 503 if (err < 0) {
490 pr_err("Couldn't synthesize attrs.\n"); 504 pr_err("Couldn't synthesize attrs.\n");
491 return err; 505 return err;
492 } 506 }
493 507
494 err = perf_event__synthesize_event_types(process_synthesized_event, 508 err = perf_event__synthesize_event_types(ops, process_synthesized_event,
495 session); 509 session);
496 if (err < 0) { 510 if (err < 0) {
497 pr_err("Couldn't synthesize event_types.\n"); 511 pr_err("Couldn't synthesize event_types.\n");
@@ -507,14 +521,14 @@ static int __cmd_record(int argc, const char **argv)
507 * return this more properly and also 521 * return this more properly and also
508 * propagate errors that now are calling die() 522 * propagate errors that now are calling die()
509 */ 523 */
510 err = perf_event__synthesize_tracing_data(output, evsel_list, 524 err = perf_event__synthesize_tracing_data(ops, output, evsel_list,
511 process_synthesized_event, 525 process_synthesized_event,
512 session); 526 session);
513 if (err <= 0) { 527 if (err <= 0) {
514 pr_err("Couldn't record tracing data.\n"); 528 pr_err("Couldn't record tracing data.\n");
515 return err; 529 return err;
516 } 530 }
517 advance_output(err); 531 advance_output(rec, err);
518 } 532 }
519 } 533 }
520 534
@@ -524,17 +538,17 @@ static int __cmd_record(int argc, const char **argv)
524 return -1; 538 return -1;
525 } 539 }
526 540
527 err = perf_event__synthesize_kernel_mmap(process_synthesized_event, 541 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
528 session, machine, "_text"); 542 session, machine, "_text");
529 if (err < 0) 543 if (err < 0)
530 err = perf_event__synthesize_kernel_mmap(process_synthesized_event, 544 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
531 session, machine, "_stext"); 545 session, machine, "_stext");
532 if (err < 0) 546 if (err < 0)
533 pr_err("Couldn't record kernel reference relocation symbol\n" 547 pr_err("Couldn't record kernel reference relocation symbol\n"
534 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 548 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
535 "Check /proc/kallsyms permission or run as root.\n"); 549 "Check /proc/kallsyms permission or run as root.\n");
536 550
537 err = perf_event__synthesize_modules(process_synthesized_event, 551 err = perf_event__synthesize_modules(ops, process_synthesized_event,
538 session, machine); 552 session, machine);
539 if (err < 0) 553 if (err < 0)
540 pr_err("Couldn't record kernel module information.\n" 554 pr_err("Couldn't record kernel module information.\n"
@@ -542,21 +556,21 @@ static int __cmd_record(int argc, const char **argv)
542 "Check /proc/modules permission or run as root.\n"); 556 "Check /proc/modules permission or run as root.\n");
543 557
544 if (perf_guest) 558 if (perf_guest)
545 perf_session__process_machines(session, 559 perf_session__process_machines(session, ops,
546 perf_event__synthesize_guest_os); 560 perf_event__synthesize_guest_os);
547 561
548 if (!record_opts.system_wide) 562 if (!opts->system_wide)
549 perf_event__synthesize_thread_map(evsel_list->threads, 563 perf_event__synthesize_thread_map(ops, evsel_list->threads,
550 process_synthesized_event, 564 process_synthesized_event,
551 session); 565 session);
552 else 566 else
553 perf_event__synthesize_threads(process_synthesized_event, 567 perf_event__synthesize_threads(ops, process_synthesized_event,
554 session); 568 session);
555 569
556 if (realtime_prio) { 570 if (rec->realtime_prio) {
557 struct sched_param param; 571 struct sched_param param;
558 572
559 param.sched_priority = realtime_prio; 573 param.sched_priority = rec->realtime_prio;
560 if (sched_setscheduler(0, SCHED_FIFO, &param)) { 574 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
561 pr_err("Could not set realtime priority.\n"); 575 pr_err("Could not set realtime priority.\n");
562 exit(-1); 576 exit(-1);
@@ -572,11 +586,11 @@ static int __cmd_record(int argc, const char **argv)
572 perf_evlist__start_workload(evsel_list); 586 perf_evlist__start_workload(evsel_list);
573 587
574 for (;;) { 588 for (;;) {
575 int hits = samples; 589 int hits = rec->samples;
576 590
577 mmap_read_all(); 591 perf_record__mmap_read_all(rec);
578 592
579 if (hits == samples) { 593 if (hits == rec->samples) {
580 if (done) 594 if (done)
581 break; 595 break;
582 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1); 596 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
@@ -597,9 +611,9 @@ static int __cmd_record(int argc, const char **argv)
597 */ 611 */
598 fprintf(stderr, 612 fprintf(stderr,
599 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n", 613 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
600 (double)bytes_written / 1024.0 / 1024.0, 614 (double)rec->bytes_written / 1024.0 / 1024.0,
601 output_name, 615 output_name,
602 bytes_written / 24); 616 rec->bytes_written / 24);
603 617
604 return 0; 618 return 0;
605 619
@@ -614,59 +628,88 @@ static const char * const record_usage[] = {
614 NULL 628 NULL
615}; 629};
616 630
617static bool force, append_file; 631/*
632 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
633 * because we need to have access to it in perf_record__exit, that is called
634 * after cmd_record() exits, but since record_options need to be accessible to
635 * builtin-script, leave it here.
636 *
637 * At least we don't ouch it in all the other functions here directly.
638 *
639 * Just say no to tons of global variables, sigh.
640 */
641static struct perf_record record = {
642 .opts = {
643 .target_pid = -1,
644 .target_tid = -1,
645 .mmap_pages = UINT_MAX,
646 .user_freq = UINT_MAX,
647 .user_interval = ULLONG_MAX,
648 .freq = 1000,
649 .sample_id_all_avail = true,
650 },
651 .write_mode = WRITE_FORCE,
652 .file_new = true,
653};
618 654
655/*
656 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
657 * with it and switch to use the library functions in perf_evlist that came
658 * from builtin-record.c, i.e. use perf_record_opts,
659 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
660 * using pipes, etc.
661 */
619const struct option record_options[] = { 662const struct option record_options[] = {
620 OPT_CALLBACK('e', "event", &evsel_list, "event", 663 OPT_CALLBACK('e', "event", &record.evlist, "event",
621 "event selector. use 'perf list' to list available events", 664 "event selector. use 'perf list' to list available events",
622 parse_events_option), 665 parse_events_option),
623 OPT_CALLBACK(0, "filter", &evsel_list, "filter", 666 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
624 "event filter", parse_filter), 667 "event filter", parse_filter),
625 OPT_INTEGER('p', "pid", &record_opts.target_pid, 668 OPT_INTEGER('p', "pid", &record.opts.target_pid,
626 "record events on existing process id"), 669 "record events on existing process id"),
627 OPT_INTEGER('t', "tid", &record_opts.target_tid, 670 OPT_INTEGER('t', "tid", &record.opts.target_tid,
628 "record events on existing thread id"), 671 "record events on existing thread id"),
629 OPT_INTEGER('r', "realtime", &realtime_prio, 672 OPT_INTEGER('r', "realtime", &record.realtime_prio,
630 "collect data with this RT SCHED_FIFO priority"), 673 "collect data with this RT SCHED_FIFO priority"),
631 OPT_BOOLEAN('D', "no-delay", &record_opts.no_delay, 674 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
632 "collect data without buffering"), 675 "collect data without buffering"),
633 OPT_BOOLEAN('R', "raw-samples", &record_opts.raw_samples, 676 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
634 "collect raw sample records from all opened counters"), 677 "collect raw sample records from all opened counters"),
635 OPT_BOOLEAN('a', "all-cpus", &record_opts.system_wide, 678 OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide,
636 "system-wide collection from all CPUs"), 679 "system-wide collection from all CPUs"),
637 OPT_BOOLEAN('A', "append", &append_file, 680 OPT_BOOLEAN('A', "append", &record.append_file,
638 "append to the output file to do incremental profiling"), 681 "append to the output file to do incremental profiling"),
639 OPT_STRING('C', "cpu", &record_opts.cpu_list, "cpu", 682 OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu",
640 "list of cpus to monitor"), 683 "list of cpus to monitor"),
641 OPT_BOOLEAN('f', "force", &force, 684 OPT_BOOLEAN('f', "force", &record.force,
642 "overwrite existing data file (deprecated)"), 685 "overwrite existing data file (deprecated)"),
643 OPT_U64('c', "count", &record_opts.user_interval, "event period to sample"), 686 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
644 OPT_STRING('o', "output", &output_name, "file", 687 OPT_STRING('o', "output", &record.output_name, "file",
645 "output file name"), 688 "output file name"),
646 OPT_BOOLEAN('i', "no-inherit", &record_opts.no_inherit, 689 OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
647 "child tasks do not inherit counters"), 690 "child tasks do not inherit counters"),
648 OPT_UINTEGER('F', "freq", &record_opts.user_freq, "profile at this frequency"), 691 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
649 OPT_UINTEGER('m', "mmap-pages", &record_opts.mmap_pages, 692 OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
650 "number of mmap data pages"), 693 "number of mmap data pages"),
651 OPT_BOOLEAN(0, "group", &record_opts.group, 694 OPT_BOOLEAN(0, "group", &record.opts.group,
652 "put the counters into a counter group"), 695 "put the counters into a counter group"),
653 OPT_BOOLEAN('g', "call-graph", &record_opts.call_graph, 696 OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph,
654 "do call-graph (stack chain/backtrace) recording"), 697 "do call-graph (stack chain/backtrace) recording"),
655 OPT_INCR('v', "verbose", &verbose, 698 OPT_INCR('v', "verbose", &verbose,
656 "be more verbose (show counter open errors, etc)"), 699 "be more verbose (show counter open errors, etc)"),
657 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), 700 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
658 OPT_BOOLEAN('s', "stat", &record_opts.inherit_stat, 701 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
659 "per thread counts"), 702 "per thread counts"),
660 OPT_BOOLEAN('d', "data", &record_opts.sample_address, 703 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
661 "Sample addresses"), 704 "Sample addresses"),
662 OPT_BOOLEAN('T', "timestamp", &record_opts.sample_time, "Sample timestamps"), 705 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
663 OPT_BOOLEAN('n', "no-samples", &record_opts.no_samples, 706 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
664 "don't sample"), 707 "don't sample"),
665 OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache, 708 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
666 "do not update the buildid cache"), 709 "do not update the buildid cache"),
667 OPT_BOOLEAN('B', "no-buildid", &no_buildid, 710 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
668 "do not collect buildids in perf.data"), 711 "do not collect buildids in perf.data"),
669 OPT_CALLBACK('G', "cgroup", &evsel_list, "name", 712 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
670 "monitor event in cgroup name only", 713 "monitor event in cgroup name only",
671 parse_cgroups), 714 parse_cgroups),
672 OPT_END() 715 OPT_END()
@@ -676,6 +719,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
676{ 719{
677 int err = -ENOMEM; 720 int err = -ENOMEM;
678 struct perf_evsel *pos; 721 struct perf_evsel *pos;
722 struct perf_evlist *evsel_list;
723 struct perf_record *rec = &record;
679 724
680 perf_header__set_cmdline(argc, argv); 725 perf_header__set_cmdline(argc, argv);
681 726
@@ -683,23 +728,25 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
683 if (evsel_list == NULL) 728 if (evsel_list == NULL)
684 return -ENOMEM; 729 return -ENOMEM;
685 730
731 rec->evlist = evsel_list;
732
686 argc = parse_options(argc, argv, record_options, record_usage, 733 argc = parse_options(argc, argv, record_options, record_usage,
687 PARSE_OPT_STOP_AT_NON_OPTION); 734 PARSE_OPT_STOP_AT_NON_OPTION);
688 if (!argc && record_opts.target_pid == -1 && record_opts.target_tid == -1 && 735 if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 &&
689 !record_opts.system_wide && !record_opts.cpu_list) 736 !rec->opts.system_wide && !rec->opts.cpu_list)
690 usage_with_options(record_usage, record_options); 737 usage_with_options(record_usage, record_options);
691 738
692 if (force && append_file) { 739 if (rec->force && rec->append_file) {
693 fprintf(stderr, "Can't overwrite and append at the same time." 740 fprintf(stderr, "Can't overwrite and append at the same time."
694 " You need to choose between -f and -A"); 741 " You need to choose between -f and -A");
695 usage_with_options(record_usage, record_options); 742 usage_with_options(record_usage, record_options);
696 } else if (append_file) { 743 } else if (rec->append_file) {
697 write_mode = WRITE_APPEND; 744 rec->write_mode = WRITE_APPEND;
698 } else { 745 } else {
699 write_mode = WRITE_FORCE; 746 rec->write_mode = WRITE_FORCE;
700 } 747 }
701 748
702 if (nr_cgroups && !record_opts.system_wide) { 749 if (nr_cgroups && !rec->opts.system_wide) {
703 fprintf(stderr, "cgroup monitoring only available in" 750 fprintf(stderr, "cgroup monitoring only available in"
704 " system-wide mode\n"); 751 " system-wide mode\n");
705 usage_with_options(record_usage, record_options); 752 usage_with_options(record_usage, record_options);
@@ -717,7 +764,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
717"If some relocation was applied (e.g. kexec) symbols may be misresolved\n" 764"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
718"even with a suitable vmlinux or kallsyms file.\n\n"); 765"even with a suitable vmlinux or kallsyms file.\n\n");
719 766
720 if (no_buildid_cache || no_buildid) 767 if (rec->no_buildid_cache || rec->no_buildid)
721 disable_buildid_cache(); 768 disable_buildid_cache();
722 769
723 if (evsel_list->nr_entries == 0 && 770 if (evsel_list->nr_entries == 0 &&
@@ -726,11 +773,11 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
726 goto out_symbol_exit; 773 goto out_symbol_exit;
727 } 774 }
728 775
729 if (record_opts.target_pid != -1) 776 if (rec->opts.target_pid != -1)
730 record_opts.target_tid = record_opts.target_pid; 777 rec->opts.target_tid = rec->opts.target_pid;
731 778
732 if (perf_evlist__create_maps(evsel_list, record_opts.target_pid, 779 if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid,
733 record_opts.target_tid, record_opts.cpu_list) < 0) 780 rec->opts.target_tid, rec->opts.cpu_list) < 0)
734 usage_with_options(record_usage, record_options); 781 usage_with_options(record_usage, record_options);
735 782
736 list_for_each_entry(pos, &evsel_list->entries, node) { 783 list_for_each_entry(pos, &evsel_list->entries, node) {
@@ -744,25 +791,25 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
744 if (perf_evlist__alloc_pollfd(evsel_list) < 0) 791 if (perf_evlist__alloc_pollfd(evsel_list) < 0)
745 goto out_free_fd; 792 goto out_free_fd;
746 793
747 if (record_opts.user_interval != ULLONG_MAX) 794 if (rec->opts.user_interval != ULLONG_MAX)
748 record_opts.default_interval = record_opts.user_interval; 795 rec->opts.default_interval = rec->opts.user_interval;
749 if (record_opts.user_freq != UINT_MAX) 796 if (rec->opts.user_freq != UINT_MAX)
750 record_opts.freq = record_opts.user_freq; 797 rec->opts.freq = rec->opts.user_freq;
751 798
752 /* 799 /*
753 * User specified count overrides default frequency. 800 * User specified count overrides default frequency.
754 */ 801 */
755 if (record_opts.default_interval) 802 if (rec->opts.default_interval)
756 record_opts.freq = 0; 803 rec->opts.freq = 0;
757 else if (record_opts.freq) { 804 else if (rec->opts.freq) {
758 record_opts.default_interval = record_opts.freq; 805 rec->opts.default_interval = rec->opts.freq;
759 } else { 806 } else {
760 fprintf(stderr, "frequency and count are zero, aborting\n"); 807 fprintf(stderr, "frequency and count are zero, aborting\n");
761 err = -EINVAL; 808 err = -EINVAL;
762 goto out_free_fd; 809 goto out_free_fd;
763 } 810 }
764 811
765 err = __cmd_record(argc, argv); 812 err = __cmd_record(&record, argc, argv);
766out_free_fd: 813out_free_fd:
767 perf_evlist__delete_maps(evsel_list); 814 perf_evlist__delete_maps(evsel_list);
768out_symbol_exit: 815out_symbol_exit: