diff options
Diffstat (limited to 'tools/perf/builtin-timechart.c')
-rw-r--r-- | tools/perf/builtin-timechart.c | 169 |
1 files changed, 145 insertions, 24 deletions
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 1c60ed3f5b97..491662bdfe0b 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
@@ -52,6 +52,7 @@ static u64 first_time, last_time; | |||
52 | 52 | ||
53 | static bool power_only; | 53 | static bool power_only; |
54 | static bool tasks_only; | 54 | static bool tasks_only; |
55 | static bool with_backtrace; | ||
55 | 56 | ||
56 | 57 | ||
57 | struct per_pid; | 58 | struct per_pid; |
@@ -126,6 +127,7 @@ struct cpu_sample { | |||
126 | u64 end_time; | 127 | u64 end_time; |
127 | int type; | 128 | int type; |
128 | int cpu; | 129 | int cpu; |
130 | const char *backtrace; | ||
129 | }; | 131 | }; |
130 | 132 | ||
131 | static struct per_pid *all_data; | 133 | static struct per_pid *all_data; |
@@ -147,6 +149,7 @@ struct wake_event { | |||
147 | int waker; | 149 | int waker; |
148 | int wakee; | 150 | int wakee; |
149 | u64 time; | 151 | u64 time; |
152 | const char *backtrace; | ||
150 | }; | 153 | }; |
151 | 154 | ||
152 | static struct power_event *power_events; | 155 | static struct power_event *power_events; |
@@ -231,7 +234,8 @@ static void pid_exit(int pid, u64 timestamp) | |||
231 | } | 234 | } |
232 | 235 | ||
233 | static void | 236 | static void |
234 | pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) | 237 | pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end, |
238 | const char *backtrace) | ||
235 | { | 239 | { |
236 | struct per_pid *p; | 240 | struct per_pid *p; |
237 | struct per_pidcomm *c; | 241 | struct per_pidcomm *c; |
@@ -254,6 +258,7 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) | |||
254 | sample->type = type; | 258 | sample->type = type; |
255 | sample->next = c->samples; | 259 | sample->next = c->samples; |
256 | sample->cpu = cpu; | 260 | sample->cpu = cpu; |
261 | sample->backtrace = backtrace; | ||
257 | c->samples = sample; | 262 | c->samples = sample; |
258 | 263 | ||
259 | if (sample->type == TYPE_RUNNING && end > start && start > 0) { | 264 | if (sample->type == TYPE_RUNNING && end > start && start > 0) { |
@@ -405,7 +410,8 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq) | |||
405 | } | 410 | } |
406 | 411 | ||
407 | static void | 412 | static void |
408 | sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) | 413 | sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te, |
414 | const char *backtrace) | ||
409 | { | 415 | { |
410 | struct per_pid *p; | 416 | struct per_pid *p; |
411 | struct wakeup_entry *wake = (void *)te; | 417 | struct wakeup_entry *wake = (void *)te; |
@@ -416,6 +422,7 @@ sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) | |||
416 | 422 | ||
417 | we->time = timestamp; | 423 | we->time = timestamp; |
418 | we->waker = pid; | 424 | we->waker = pid; |
425 | we->backtrace = backtrace; | ||
419 | 426 | ||
420 | if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) | 427 | if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) |
421 | we->waker = -1; | 428 | we->waker = -1; |
@@ -430,13 +437,15 @@ sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) | |||
430 | p->current->state = TYPE_WAITING; | 437 | p->current->state = TYPE_WAITING; |
431 | } | 438 | } |
432 | if (p && p->current && p->current->state == TYPE_BLOCKED) { | 439 | if (p && p->current && p->current->state == TYPE_BLOCKED) { |
433 | pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); | 440 | pid_put_sample(p->pid, p->current->state, cpu, |
441 | p->current->state_since, timestamp, NULL); | ||
434 | p->current->state_since = timestamp; | 442 | p->current->state_since = timestamp; |
435 | p->current->state = TYPE_WAITING; | 443 | p->current->state = TYPE_WAITING; |
436 | } | 444 | } |
437 | } | 445 | } |
438 | 446 | ||
439 | static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) | 447 | static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te, |
448 | const char *backtrace) | ||
440 | { | 449 | { |
441 | struct per_pid *p = NULL, *prev_p; | 450 | struct per_pid *p = NULL, *prev_p; |
442 | struct sched_switch *sw = (void *)te; | 451 | struct sched_switch *sw = (void *)te; |
@@ -447,10 +456,14 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) | |||
447 | p = find_create_pid(sw->next_pid); | 456 | p = find_create_pid(sw->next_pid); |
448 | 457 | ||
449 | if (prev_p->current && prev_p->current->state != TYPE_NONE) | 458 | if (prev_p->current && prev_p->current->state != TYPE_NONE) |
450 | pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); | 459 | pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, |
460 | prev_p->current->state_since, timestamp, | ||
461 | backtrace); | ||
451 | if (p && p->current) { | 462 | if (p && p->current) { |
452 | if (p->current->state != TYPE_NONE) | 463 | if (p->current->state != TYPE_NONE) |
453 | pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); | 464 | pid_put_sample(sw->next_pid, p->current->state, cpu, |
465 | p->current->state_since, timestamp, | ||
466 | backtrace); | ||
454 | 467 | ||
455 | p->current->state_since = timestamp; | 468 | p->current->state_since = timestamp; |
456 | p->current->state = TYPE_RUNNING; | 469 | p->current->state = TYPE_RUNNING; |
@@ -466,8 +479,87 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) | |||
466 | } | 479 | } |
467 | } | 480 | } |
468 | 481 | ||
482 | static const char *cat_backtrace(union perf_event *event, | ||
483 | struct perf_sample *sample, | ||
484 | struct machine *machine) | ||
485 | { | ||
486 | struct addr_location al; | ||
487 | unsigned int i; | ||
488 | char *p = NULL; | ||
489 | size_t p_len; | ||
490 | u8 cpumode = PERF_RECORD_MISC_USER; | ||
491 | struct addr_location tal; | ||
492 | struct ip_callchain *chain = sample->callchain; | ||
493 | FILE *f = open_memstream(&p, &p_len); | ||
494 | |||
495 | if (!f) { | ||
496 | perror("open_memstream error"); | ||
497 | return NULL; | ||
498 | } | ||
499 | |||
500 | if (!chain) | ||
501 | goto exit; | ||
502 | |||
503 | if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { | ||
504 | fprintf(stderr, "problem processing %d event, skipping it.\n", | ||
505 | event->header.type); | ||
506 | goto exit; | ||
507 | } | ||
508 | |||
509 | for (i = 0; i < chain->nr; i++) { | ||
510 | u64 ip; | ||
511 | |||
512 | if (callchain_param.order == ORDER_CALLEE) | ||
513 | ip = chain->ips[i]; | ||
514 | else | ||
515 | ip = chain->ips[chain->nr - i - 1]; | ||
516 | |||
517 | if (ip >= PERF_CONTEXT_MAX) { | ||
518 | switch (ip) { | ||
519 | case PERF_CONTEXT_HV: | ||
520 | cpumode = PERF_RECORD_MISC_HYPERVISOR; | ||
521 | break; | ||
522 | case PERF_CONTEXT_KERNEL: | ||
523 | cpumode = PERF_RECORD_MISC_KERNEL; | ||
524 | break; | ||
525 | case PERF_CONTEXT_USER: | ||
526 | cpumode = PERF_RECORD_MISC_USER; | ||
527 | break; | ||
528 | default: | ||
529 | pr_debug("invalid callchain context: " | ||
530 | "%"PRId64"\n", (s64) ip); | ||
531 | |||
532 | /* | ||
533 | * It seems the callchain is corrupted. | ||
534 | * Discard all. | ||
535 | */ | ||
536 | free(p); | ||
537 | p = NULL; | ||
538 | goto exit; | ||
539 | } | ||
540 | continue; | ||
541 | } | ||
542 | |||
543 | tal.filtered = false; | ||
544 | thread__find_addr_location(al.thread, machine, cpumode, | ||
545 | MAP__FUNCTION, ip, &tal); | ||
546 | |||
547 | if (tal.sym) | ||
548 | fprintf(f, "..... %016" PRIx64 " %s\n", ip, | ||
549 | tal.sym->name); | ||
550 | else | ||
551 | fprintf(f, "..... %016" PRIx64 "\n", ip); | ||
552 | } | ||
553 | |||
554 | exit: | ||
555 | fclose(f); | ||
556 | |||
557 | return p; | ||
558 | } | ||
559 | |||
469 | typedef int (*tracepoint_handler)(struct perf_evsel *evsel, | 560 | typedef int (*tracepoint_handler)(struct perf_evsel *evsel, |
470 | struct perf_sample *sample); | 561 | struct perf_sample *sample, |
562 | const char *backtrace); | ||
471 | 563 | ||
472 | static int process_sample_event(struct perf_tool *tool __maybe_unused, | 564 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
473 | union perf_event *event __maybe_unused, | 565 | union perf_event *event __maybe_unused, |
@@ -487,7 +579,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, | |||
487 | 579 | ||
488 | if (evsel->handler != NULL) { | 580 | if (evsel->handler != NULL) { |
489 | tracepoint_handler f = evsel->handler; | 581 | tracepoint_handler f = evsel->handler; |
490 | return f(evsel, sample); | 582 | return f(evsel, sample, cat_backtrace(event, sample, machine)); |
491 | } | 583 | } |
492 | 584 | ||
493 | return 0; | 585 | return 0; |
@@ -495,7 +587,8 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, | |||
495 | 587 | ||
496 | static int | 588 | static int |
497 | process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused, | 589 | process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused, |
498 | struct perf_sample *sample) | 590 | struct perf_sample *sample, |
591 | const char *backtrace __maybe_unused) | ||
499 | { | 592 | { |
500 | struct power_processor_entry *ppe = sample->raw_data; | 593 | struct power_processor_entry *ppe = sample->raw_data; |
501 | 594 | ||
@@ -508,7 +601,8 @@ process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused, | |||
508 | 601 | ||
509 | static int | 602 | static int |
510 | process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused, | 603 | process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused, |
511 | struct perf_sample *sample) | 604 | struct perf_sample *sample, |
605 | const char *backtrace __maybe_unused) | ||
512 | { | 606 | { |
513 | struct power_processor_entry *ppe = sample->raw_data; | 607 | struct power_processor_entry *ppe = sample->raw_data; |
514 | 608 | ||
@@ -518,28 +612,31 @@ process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused, | |||
518 | 612 | ||
519 | static int | 613 | static int |
520 | process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused, | 614 | process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused, |
521 | struct perf_sample *sample) | 615 | struct perf_sample *sample, |
616 | const char *backtrace) | ||
522 | { | 617 | { |
523 | struct trace_entry *te = sample->raw_data; | 618 | struct trace_entry *te = sample->raw_data; |
524 | 619 | ||
525 | sched_wakeup(sample->cpu, sample->time, sample->pid, te); | 620 | sched_wakeup(sample->cpu, sample->time, sample->pid, te, backtrace); |
526 | return 0; | 621 | return 0; |
527 | } | 622 | } |
528 | 623 | ||
529 | static int | 624 | static int |
530 | process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused, | 625 | process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused, |
531 | struct perf_sample *sample) | 626 | struct perf_sample *sample, |
627 | const char *backtrace) | ||
532 | { | 628 | { |
533 | struct trace_entry *te = sample->raw_data; | 629 | struct trace_entry *te = sample->raw_data; |
534 | 630 | ||
535 | sched_switch(sample->cpu, sample->time, te); | 631 | sched_switch(sample->cpu, sample->time, te, backtrace); |
536 | return 0; | 632 | return 0; |
537 | } | 633 | } |
538 | 634 | ||
539 | #ifdef SUPPORT_OLD_POWER_EVENTS | 635 | #ifdef SUPPORT_OLD_POWER_EVENTS |
540 | static int | 636 | static int |
541 | process_sample_power_start(struct perf_evsel *evsel __maybe_unused, | 637 | process_sample_power_start(struct perf_evsel *evsel __maybe_unused, |
542 | struct perf_sample *sample) | 638 | struct perf_sample *sample, |
639 | const char *backtrace __maybe_unused) | ||
543 | { | 640 | { |
544 | struct power_entry_old *peo = sample->raw_data; | 641 | struct power_entry_old *peo = sample->raw_data; |
545 | 642 | ||
@@ -549,7 +646,8 @@ process_sample_power_start(struct perf_evsel *evsel __maybe_unused, | |||
549 | 646 | ||
550 | static int | 647 | static int |
551 | process_sample_power_end(struct perf_evsel *evsel __maybe_unused, | 648 | process_sample_power_end(struct perf_evsel *evsel __maybe_unused, |
552 | struct perf_sample *sample) | 649 | struct perf_sample *sample, |
650 | const char *backtrace __maybe_unused) | ||
553 | { | 651 | { |
554 | c_state_end(sample->cpu, sample->time); | 652 | c_state_end(sample->cpu, sample->time); |
555 | return 0; | 653 | return 0; |
@@ -557,7 +655,8 @@ process_sample_power_end(struct perf_evsel *evsel __maybe_unused, | |||
557 | 655 | ||
558 | static int | 656 | static int |
559 | process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused, | 657 | process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused, |
560 | struct perf_sample *sample) | 658 | struct perf_sample *sample, |
659 | const char *backtrace __maybe_unused) | ||
561 | { | 660 | { |
562 | struct power_entry_old *peo = sample->raw_data; | 661 | struct power_entry_old *peo = sample->raw_data; |
563 | 662 | ||
@@ -741,11 +840,12 @@ static void draw_wakeups(void) | |||
741 | } | 840 | } |
742 | 841 | ||
743 | if (we->waker == -1) | 842 | if (we->waker == -1) |
744 | svg_interrupt(we->time, to); | 843 | svg_interrupt(we->time, to, we->backtrace); |
745 | else if (from && to && abs(from - to) == 1) | 844 | else if (from && to && abs(from - to) == 1) |
746 | svg_wakeline(we->time, from, to); | 845 | svg_wakeline(we->time, from, to, we->backtrace); |
747 | else | 846 | else |
748 | svg_partial_wakeline(we->time, from, task_from, to, task_to); | 847 | svg_partial_wakeline(we->time, from, task_from, to, |
848 | task_to, we->backtrace); | ||
749 | we = we->next; | 849 | we = we->next; |
750 | 850 | ||
751 | free(task_from); | 851 | free(task_from); |
@@ -798,11 +898,20 @@ static void draw_process_bars(void) | |||
798 | sample = c->samples; | 898 | sample = c->samples; |
799 | while (sample) { | 899 | while (sample) { |
800 | if (sample->type == TYPE_RUNNING) | 900 | if (sample->type == TYPE_RUNNING) |
801 | svg_running(Y, sample->cpu, sample->start_time, sample->end_time); | 901 | svg_running(Y, sample->cpu, |
902 | sample->start_time, | ||
903 | sample->end_time, | ||
904 | sample->backtrace); | ||
802 | if (sample->type == TYPE_BLOCKED) | 905 | if (sample->type == TYPE_BLOCKED) |
803 | svg_blocked(Y, sample->cpu, sample->start_time, sample->end_time); | 906 | svg_blocked(Y, sample->cpu, |
907 | sample->start_time, | ||
908 | sample->end_time, | ||
909 | sample->backtrace); | ||
804 | if (sample->type == TYPE_WAITING) | 910 | if (sample->type == TYPE_WAITING) |
805 | svg_waiting(Y, sample->cpu, sample->start_time, sample->end_time); | 911 | svg_waiting(Y, sample->cpu, |
912 | sample->start_time, | ||
913 | sample->end_time, | ||
914 | sample->backtrace); | ||
806 | sample = sample->next; | 915 | sample = sample->next; |
807 | } | 916 | } |
808 | 917 | ||
@@ -1050,6 +1159,11 @@ static int __cmd_record(int argc, const char **argv) | |||
1050 | }; | 1159 | }; |
1051 | unsigned int common_args_nr = ARRAY_SIZE(common_args); | 1160 | unsigned int common_args_nr = ARRAY_SIZE(common_args); |
1052 | 1161 | ||
1162 | const char * const backtrace_args[] = { | ||
1163 | "-g", | ||
1164 | }; | ||
1165 | unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args); | ||
1166 | |||
1053 | const char * const power_args[] = { | 1167 | const char * const power_args[] = { |
1054 | "-e", "power:cpu_frequency", | 1168 | "-e", "power:cpu_frequency", |
1055 | "-e", "power:cpu_idle", | 1169 | "-e", "power:cpu_idle", |
@@ -1089,8 +1203,11 @@ static int __cmd_record(int argc, const char **argv) | |||
1089 | old_power_args_nr = 0; | 1203 | old_power_args_nr = 0; |
1090 | } | 1204 | } |
1091 | 1205 | ||
1206 | if (!with_backtrace) | ||
1207 | backtrace_args_no = 0; | ||
1208 | |||
1092 | record_elems = common_args_nr + tasks_args_nr + | 1209 | record_elems = common_args_nr + tasks_args_nr + |
1093 | power_args_nr + old_power_args_nr; | 1210 | power_args_nr + old_power_args_nr + backtrace_args_no; |
1094 | 1211 | ||
1095 | rec_argc = record_elems + argc; | 1212 | rec_argc = record_elems + argc; |
1096 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | 1213 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); |
@@ -1102,6 +1219,9 @@ static int __cmd_record(int argc, const char **argv) | |||
1102 | for (i = 0; i < common_args_nr; i++) | 1219 | for (i = 0; i < common_args_nr; i++) |
1103 | *p++ = strdup(common_args[i]); | 1220 | *p++ = strdup(common_args[i]); |
1104 | 1221 | ||
1222 | for (i = 0; i < backtrace_args_no; i++) | ||
1223 | *p++ = strdup(backtrace_args[i]); | ||
1224 | |||
1105 | for (i = 0; i < tasks_args_nr; i++) | 1225 | for (i = 0; i < tasks_args_nr; i++) |
1106 | *p++ = strdup(tasks_args[i]); | 1226 | *p++ = strdup(tasks_args[i]); |
1107 | 1227 | ||
@@ -1155,6 +1275,7 @@ int cmd_timechart(int argc, const char **argv, | |||
1155 | OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"), | 1275 | OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"), |
1156 | OPT_BOOLEAN('T', "tasks-only", &tasks_only, | 1276 | OPT_BOOLEAN('T', "tasks-only", &tasks_only, |
1157 | "output processes data only"), | 1277 | "output processes data only"), |
1278 | OPT_BOOLEAN('g', "callchain", &with_backtrace, "record callchain"), | ||
1158 | OPT_END() | 1279 | OPT_END() |
1159 | }; | 1280 | }; |
1160 | const char * const record_usage[] = { | 1281 | const char * const record_usage[] = { |