aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2013-07-11 11:28:30 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2013-07-15 15:14:47 -0400
commit5936678e7da5f8d2944a2ad45d66c88b4a7ccb67 (patch)
treef4fb96dbe64589cb8b5d151c73ef1d5f3519d750
parentc85cffa5894fad8ad7c8051ccf7dd73a3a3f92b6 (diff)
perf timechart: Remove event types framework only user
The only user of the event types data is 'perf timechart' command and uses this info to identify proper tracepoints based on its name. Switching this code to use tracepoint callbacks handlers same as another commands like builtin-{kmem,lock,sched}.c using the perf_session__set_tracepoints_handlers function. This way we get rid of the only event types user and can remove them completely in next patches. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Renninger <trenn@suse.de> Link: http://lkml.kernel.org/r/1373556513-3000-3-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/builtin-timechart.c155
1 files changed, 97 insertions, 58 deletions
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index a5120095978e..c2e02319347a 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -21,6 +21,7 @@
21#include "util/color.h" 21#include "util/color.h"
22#include <linux/list.h> 22#include <linux/list.h>
23#include "util/cache.h" 23#include "util/cache.h"
24#include "util/evlist.h"
24#include "util/evsel.h" 25#include "util/evsel.h"
25#include <linux/rbtree.h> 26#include <linux/rbtree.h>
26#include "util/symbol.h" 27#include "util/symbol.h"
@@ -462,6 +463,8 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
462 } 463 }
463} 464}
464 465
466typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
467 struct perf_sample *sample);
465 468
466static int process_sample_event(struct perf_tool *tool __maybe_unused, 469static int process_sample_event(struct perf_tool *tool __maybe_unused,
467 union perf_event *event __maybe_unused, 470 union perf_event *event __maybe_unused,
@@ -469,8 +472,6 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
469 struct perf_evsel *evsel, 472 struct perf_evsel *evsel,
470 struct machine *machine __maybe_unused) 473 struct machine *machine __maybe_unused)
471{ 474{
472 struct trace_entry *te;
473
474 if (evsel->attr.sample_type & PERF_SAMPLE_TIME) { 475 if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
475 if (!first_time || first_time > sample->time) 476 if (!first_time || first_time > sample->time)
476 first_time = sample->time; 477 first_time = sample->time;
@@ -478,69 +479,90 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
478 last_time = sample->time; 479 last_time = sample->time;
479 } 480 }
480 481
481 te = (void *)sample->raw_data; 482 if (sample->cpu > numcpus)
482 if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) { 483 numcpus = sample->cpu;
483 char *event_str; 484
484#ifdef SUPPORT_OLD_POWER_EVENTS 485 if (evsel->handler.func != NULL) {
485 struct power_entry_old *peo; 486 tracepoint_handler f = evsel->handler.func;
486 peo = (void *)te; 487 return f(evsel, sample);
487#endif 488 }
488 /* 489
489 * FIXME: use evsel, its already mapped from id to perf_evsel, 490 return 0;
490 * remove perf_header__find_event infrastructure bits. 491}
491 * Mapping all these "power:cpu_idle" strings to the tracepoint 492
492 * ID and then just comparing against evsel->attr.config. 493static int
493 * 494process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused,
494 * e.g.: 495 struct perf_sample *sample)
495 * 496{
496 * if (evsel->attr.config == power_cpu_idle_id) 497 struct power_processor_entry *ppe = sample->raw_data;
497 */ 498
498 event_str = perf_header__find_event(te->type); 499 if (ppe->state == (u32) PWR_EVENT_EXIT)
499 500 c_state_end(ppe->cpu_id, sample->time);
500 if (!event_str) 501 else
501 return 0; 502 c_state_start(ppe->cpu_id, sample->time, ppe->state);
502 503 return 0;
503 if (sample->cpu > numcpus) 504}
504 numcpus = sample->cpu; 505
505 506static int
506 if (strcmp(event_str, "power:cpu_idle") == 0) { 507process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused,
507 struct power_processor_entry *ppe = (void *)te; 508 struct perf_sample *sample)
508 if (ppe->state == (u32)PWR_EVENT_EXIT) 509{
509 c_state_end(ppe->cpu_id, sample->time); 510 struct power_processor_entry *ppe = sample->raw_data;
510 else 511
511 c_state_start(ppe->cpu_id, sample->time, 512 p_state_change(ppe->cpu_id, sample->time, ppe->state);
512 ppe->state); 513 return 0;
513 } 514}
514 else if (strcmp(event_str, "power:cpu_frequency") == 0) {
515 struct power_processor_entry *ppe = (void *)te;
516 p_state_change(ppe->cpu_id, sample->time, ppe->state);
517 }
518 515
519 else if (strcmp(event_str, "sched:sched_wakeup") == 0) 516static int
520 sched_wakeup(sample->cpu, sample->time, sample->pid, te); 517process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused,
518 struct perf_sample *sample)
519{
520 struct trace_entry *te = sample->raw_data;
521 521
522 else if (strcmp(event_str, "sched:sched_switch") == 0) 522 sched_wakeup(sample->cpu, sample->time, sample->pid, te);
523 sched_switch(sample->cpu, sample->time, te); 523 return 0;
524}
525
526static int
527process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused,
528 struct perf_sample *sample)
529{
530 struct trace_entry *te = sample->raw_data;
531
532 sched_switch(sample->cpu, sample->time, te);
533 return 0;
534}
524 535
525#ifdef SUPPORT_OLD_POWER_EVENTS 536#ifdef SUPPORT_OLD_POWER_EVENTS
526 if (use_old_power_events) { 537static int
527 if (strcmp(event_str, "power:power_start") == 0) 538process_sample_power_start(struct perf_evsel *evsel __maybe_unused,
528 c_state_start(peo->cpu_id, sample->time, 539 struct perf_sample *sample)
529 peo->value); 540{
530 541 struct power_entry_old *peo = sample->raw_data;
531 else if (strcmp(event_str, "power:power_end") == 0) 542
532 c_state_end(sample->cpu, sample->time); 543 c_state_start(peo->cpu_id, sample->time, peo->value);
533
534 else if (strcmp(event_str,
535 "power:power_frequency") == 0)
536 p_state_change(peo->cpu_id, sample->time,
537 peo->value);
538 }
539#endif
540 }
541 return 0; 544 return 0;
542} 545}
543 546
547static int
548process_sample_power_end(struct perf_evsel *evsel __maybe_unused,
549 struct perf_sample *sample)
550{
551 c_state_end(sample->cpu, sample->time);
552 return 0;
553}
554
555static int
556process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused,
557 struct perf_sample *sample)
558{
559 struct power_entry_old *peo = sample->raw_data;
560
561 p_state_change(peo->cpu_id, sample->time, peo->value);
562 return 0;
563}
564#endif /* SUPPORT_OLD_POWER_EVENTS */
565
544/* 566/*
545 * After the last sample we need to wrap up the current C/P state 567 * After the last sample we need to wrap up the current C/P state
546 * and close out each CPU for these. 568 * and close out each CPU for these.
@@ -957,6 +979,17 @@ static int __cmd_timechart(const char *output_name)
957 .sample = process_sample_event, 979 .sample = process_sample_event,
958 .ordered_samples = true, 980 .ordered_samples = true,
959 }; 981 };
982 const struct perf_evsel_str_handler power_tracepoints[] = {
983 { "power:cpu_idle", process_sample_cpu_idle },
984 { "power:cpu_frequency", process_sample_cpu_frequency },
985 { "sched:sched_wakeup", process_sample_sched_wakeup },
986 { "sched:sched_switch", process_sample_sched_switch },
987#ifdef SUPPORT_OLD_POWER_EVENTS
988 { "power:power_start", process_sample_power_start },
989 { "power:power_end", process_sample_power_end },
990 { "power:power_frequency", process_sample_power_frequency },
991#endif
992 };
960 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 993 struct perf_session *session = perf_session__new(input_name, O_RDONLY,
961 0, false, &perf_timechart); 994 0, false, &perf_timechart);
962 int ret = -EINVAL; 995 int ret = -EINVAL;
@@ -967,6 +1000,12 @@ static int __cmd_timechart(const char *output_name)
967 if (!perf_session__has_traces(session, "timechart record")) 1000 if (!perf_session__has_traces(session, "timechart record"))
968 goto out_delete; 1001 goto out_delete;
969 1002
1003 if (perf_session__set_tracepoints_handlers(session,
1004 power_tracepoints)) {
1005 pr_err("Initializing session tracepoint handlers failed\n");
1006 goto out_delete;
1007 }
1008
970 ret = perf_session__process_events(session, &perf_timechart); 1009 ret = perf_session__process_events(session, &perf_timechart);
971 if (ret) 1010 if (ret)
972 goto out_delete; 1011 goto out_delete;