diff options
Diffstat (limited to 'tools/perf/builtin-timechart.c')
-rw-r--r-- | tools/perf/builtin-timechart.c | 176 |
1 files changed, 99 insertions, 77 deletions
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 4536a92b18f3..c2e02319347a 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * of the License. | 12 | * of the License. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <traceevent/event-parse.h> | ||
16 | |||
15 | #include "builtin.h" | 17 | #include "builtin.h" |
16 | 18 | ||
17 | #include "util/util.h" | 19 | #include "util/util.h" |
@@ -19,6 +21,7 @@ | |||
19 | #include "util/color.h" | 21 | #include "util/color.h" |
20 | #include <linux/list.h> | 22 | #include <linux/list.h> |
21 | #include "util/cache.h" | 23 | #include "util/cache.h" |
24 | #include "util/evlist.h" | ||
22 | #include "util/evsel.h" | 25 | #include "util/evsel.h" |
23 | #include <linux/rbtree.h> | 26 | #include <linux/rbtree.h> |
24 | #include "util/symbol.h" | 27 | #include "util/symbol.h" |
@@ -328,25 +331,6 @@ struct wakeup_entry { | |||
328 | int success; | 331 | int success; |
329 | }; | 332 | }; |
330 | 333 | ||
331 | /* | ||
332 | * trace_flag_type is an enumeration that holds different | ||
333 | * states when a trace occurs. These are: | ||
334 | * IRQS_OFF - interrupts were disabled | ||
335 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags | ||
336 | * NEED_RESCED - reschedule is requested | ||
337 | * HARDIRQ - inside an interrupt handler | ||
338 | * SOFTIRQ - inside a softirq handler | ||
339 | */ | ||
340 | enum trace_flag_type { | ||
341 | TRACE_FLAG_IRQS_OFF = 0x01, | ||
342 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, | ||
343 | TRACE_FLAG_NEED_RESCHED = 0x04, | ||
344 | TRACE_FLAG_HARDIRQ = 0x08, | ||
345 | TRACE_FLAG_SOFTIRQ = 0x10, | ||
346 | }; | ||
347 | |||
348 | |||
349 | |||
350 | struct sched_switch { | 334 | struct sched_switch { |
351 | struct trace_entry te; | 335 | struct trace_entry te; |
352 | char prev_comm[TASK_COMM_LEN]; | 336 | char prev_comm[TASK_COMM_LEN]; |
@@ -479,6 +463,8 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) | |||
479 | } | 463 | } |
480 | } | 464 | } |
481 | 465 | ||
466 | typedef int (*tracepoint_handler)(struct perf_evsel *evsel, | ||
467 | struct perf_sample *sample); | ||
482 | 468 | ||
483 | static int process_sample_event(struct perf_tool *tool __maybe_unused, | 469 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
484 | union perf_event *event __maybe_unused, | 470 | union perf_event *event __maybe_unused, |
@@ -486,8 +472,6 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, | |||
486 | struct perf_evsel *evsel, | 472 | struct perf_evsel *evsel, |
487 | struct machine *machine __maybe_unused) | 473 | struct machine *machine __maybe_unused) |
488 | { | 474 | { |
489 | struct trace_entry *te; | ||
490 | |||
491 | if (evsel->attr.sample_type & PERF_SAMPLE_TIME) { | 475 | if (evsel->attr.sample_type & PERF_SAMPLE_TIME) { |
492 | if (!first_time || first_time > sample->time) | 476 | if (!first_time || first_time > sample->time) |
493 | first_time = sample->time; | 477 | first_time = sample->time; |
@@ -495,69 +479,90 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, | |||
495 | last_time = sample->time; | 479 | last_time = sample->time; |
496 | } | 480 | } |
497 | 481 | ||
498 | te = (void *)sample->raw_data; | 482 | if (sample->cpu > numcpus) |
499 | if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) { | 483 | numcpus = sample->cpu; |
500 | char *event_str; | 484 | |
501 | #ifdef SUPPORT_OLD_POWER_EVENTS | 485 | if (evsel->handler.func != NULL) { |
502 | struct power_entry_old *peo; | 486 | tracepoint_handler f = evsel->handler.func; |
503 | peo = (void *)te; | 487 | return f(evsel, sample); |
504 | #endif | 488 | } |
505 | /* | 489 | |
506 | * FIXME: use evsel, its already mapped from id to perf_evsel, | 490 | return 0; |
507 | * remove perf_header__find_event infrastructure bits. | 491 | } |
508 | * Mapping all these "power:cpu_idle" strings to the tracepoint | 492 | |
509 | * ID and then just comparing against evsel->attr.config. | 493 | static int |
510 | * | 494 | process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused, |
511 | * e.g.: | 495 | struct perf_sample *sample) |
512 | * | 496 | { |
513 | * if (evsel->attr.config == power_cpu_idle_id) | 497 | struct power_processor_entry *ppe = sample->raw_data; |
514 | */ | 498 | |
515 | event_str = perf_header__find_event(te->type); | 499 | if (ppe->state == (u32) PWR_EVENT_EXIT) |
516 | 500 | c_state_end(ppe->cpu_id, sample->time); | |
517 | if (!event_str) | 501 | else |
518 | return 0; | 502 | c_state_start(ppe->cpu_id, sample->time, ppe->state); |
519 | 503 | return 0; | |
520 | if (sample->cpu > numcpus) | 504 | } |
521 | numcpus = sample->cpu; | 505 | |
522 | 506 | static int | |
523 | if (strcmp(event_str, "power:cpu_idle") == 0) { | 507 | process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused, |
524 | struct power_processor_entry *ppe = (void *)te; | 508 | struct perf_sample *sample) |
525 | if (ppe->state == (u32)PWR_EVENT_EXIT) | 509 | { |
526 | c_state_end(ppe->cpu_id, sample->time); | 510 | struct power_processor_entry *ppe = sample->raw_data; |
527 | else | 511 | |
528 | c_state_start(ppe->cpu_id, sample->time, | 512 | p_state_change(ppe->cpu_id, sample->time, ppe->state); |
529 | ppe->state); | 513 | return 0; |
530 | } | 514 | } |
531 | else if (strcmp(event_str, "power:cpu_frequency") == 0) { | 515 | |
532 | struct power_processor_entry *ppe = (void *)te; | 516 | static int |
533 | p_state_change(ppe->cpu_id, sample->time, ppe->state); | 517 | process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused, |
534 | } | 518 | struct perf_sample *sample) |
519 | { | ||
520 | struct trace_entry *te = sample->raw_data; | ||
521 | |||
522 | sched_wakeup(sample->cpu, sample->time, sample->pid, te); | ||
523 | return 0; | ||
524 | } | ||
535 | 525 | ||
536 | else if (strcmp(event_str, "sched:sched_wakeup") == 0) | 526 | static int |
537 | sched_wakeup(sample->cpu, sample->time, sample->pid, te); | 527 | process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused, |
528 | struct perf_sample *sample) | ||
529 | { | ||
530 | struct trace_entry *te = sample->raw_data; | ||
538 | 531 | ||
539 | else if (strcmp(event_str, "sched:sched_switch") == 0) | 532 | sched_switch(sample->cpu, sample->time, te); |
540 | sched_switch(sample->cpu, sample->time, te); | 533 | return 0; |
534 | } | ||
541 | 535 | ||
542 | #ifdef SUPPORT_OLD_POWER_EVENTS | 536 | #ifdef SUPPORT_OLD_POWER_EVENTS |
543 | if (use_old_power_events) { | 537 | static int |
544 | if (strcmp(event_str, "power:power_start") == 0) | 538 | process_sample_power_start(struct perf_evsel *evsel __maybe_unused, |
545 | c_state_start(peo->cpu_id, sample->time, | 539 | struct perf_sample *sample) |
546 | peo->value); | 540 | { |
547 | 541 | struct power_entry_old *peo = sample->raw_data; | |
548 | else if (strcmp(event_str, "power:power_end") == 0) | 542 | |
549 | c_state_end(sample->cpu, sample->time); | 543 | c_state_start(peo->cpu_id, sample->time, peo->value); |
550 | |||
551 | else if (strcmp(event_str, | ||
552 | "power:power_frequency") == 0) | ||
553 | p_state_change(peo->cpu_id, sample->time, | ||
554 | peo->value); | ||
555 | } | ||
556 | #endif | ||
557 | } | ||
558 | return 0; | 544 | return 0; |
559 | } | 545 | } |
560 | 546 | ||
547 | static int | ||
548 | process_sample_power_end(struct perf_evsel *evsel __maybe_unused, | ||
549 | struct perf_sample *sample) | ||
550 | { | ||
551 | c_state_end(sample->cpu, sample->time); | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static int | ||
556 | process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused, | ||
557 | struct perf_sample *sample) | ||
558 | { | ||
559 | struct power_entry_old *peo = sample->raw_data; | ||
560 | |||
561 | p_state_change(peo->cpu_id, sample->time, peo->value); | ||
562 | return 0; | ||
563 | } | ||
564 | #endif /* SUPPORT_OLD_POWER_EVENTS */ | ||
565 | |||
561 | /* | 566 | /* |
562 | * After the last sample we need to wrap up the current C/P state | 567 | * After the last sample we need to wrap up the current C/P state |
563 | * and close out each CPU for these. | 568 | * and close out each CPU for these. |
@@ -974,6 +979,17 @@ static int __cmd_timechart(const char *output_name) | |||
974 | .sample = process_sample_event, | 979 | .sample = process_sample_event, |
975 | .ordered_samples = true, | 980 | .ordered_samples = true, |
976 | }; | 981 | }; |
982 | const struct perf_evsel_str_handler power_tracepoints[] = { | ||
983 | { "power:cpu_idle", process_sample_cpu_idle }, | ||
984 | { "power:cpu_frequency", process_sample_cpu_frequency }, | ||
985 | { "sched:sched_wakeup", process_sample_sched_wakeup }, | ||
986 | { "sched:sched_switch", process_sample_sched_switch }, | ||
987 | #ifdef SUPPORT_OLD_POWER_EVENTS | ||
988 | { "power:power_start", process_sample_power_start }, | ||
989 | { "power:power_end", process_sample_power_end }, | ||
990 | { "power:power_frequency", process_sample_power_frequency }, | ||
991 | #endif | ||
992 | }; | ||
977 | struct perf_session *session = perf_session__new(input_name, O_RDONLY, | 993 | struct perf_session *session = perf_session__new(input_name, O_RDONLY, |
978 | 0, false, &perf_timechart); | 994 | 0, false, &perf_timechart); |
979 | int ret = -EINVAL; | 995 | int ret = -EINVAL; |
@@ -984,6 +1000,12 @@ static int __cmd_timechart(const char *output_name) | |||
984 | if (!perf_session__has_traces(session, "timechart record")) | 1000 | if (!perf_session__has_traces(session, "timechart record")) |
985 | goto out_delete; | 1001 | goto out_delete; |
986 | 1002 | ||
1003 | if (perf_session__set_tracepoints_handlers(session, | ||
1004 | power_tracepoints)) { | ||
1005 | pr_err("Initializing session tracepoint handlers failed\n"); | ||
1006 | goto out_delete; | ||
1007 | } | ||
1008 | |||
987 | ret = perf_session__process_events(session, &perf_timechart); | 1009 | ret = perf_session__process_events(session, &perf_timechart); |
988 | if (ret) | 1010 | if (ret) |
989 | goto out_delete; | 1011 | goto out_delete; |