diff options
author | Stanislav Fomichev <stfomichev@yandex-team.ru> | 2013-11-27 05:45:00 -0500 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2013-11-27 13:10:11 -0500 |
commit | 3ed0d21e1103787e21ca38bed2ff50c9f087bedb (patch) | |
tree | cbff6509269034b33c56cc1fcab90d73ddf0c7b8 /tools/perf/builtin-timechart.c | |
parent | 449867e346bfd52c5df6bba5b706a795c35e78d4 (diff) |
perf timechart: dynamically determine event fields offset
Since b000c8065a92 "tracing: Remove the extra 4 bytes of padding in
events" removed padding bytes, perf timechart got out of sync with the
kernel's trace_entry structure.
Convert perf timechart to use dynamic fields offsets (via
perf_evsel__intval) not relying on a hardcoded copy of fields layout
from the kernel.
Signed-off-by: Stanislav Fomichev <stfomichev@yandex-team.ru>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Chia-I Wu <olvaffe@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20131127104459.GB3309@stfomichev-desktop
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-timechart.c')
-rw-r--r-- | tools/perf/builtin-timechart.c | 119 |
1 files changed, 42 insertions, 77 deletions
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 491662bdfe0b..436cb5f9d751 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
@@ -306,50 +306,10 @@ static int process_exit_event(struct perf_tool *tool __maybe_unused, | |||
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | struct trace_entry { | ||
310 | unsigned short type; | ||
311 | unsigned char flags; | ||
312 | unsigned char preempt_count; | ||
313 | int pid; | ||
314 | int lock_depth; | ||
315 | }; | ||
316 | |||
317 | #ifdef SUPPORT_OLD_POWER_EVENTS | 309 | #ifdef SUPPORT_OLD_POWER_EVENTS |
318 | static int use_old_power_events; | 310 | static int use_old_power_events; |
319 | struct power_entry_old { | ||
320 | struct trace_entry te; | ||
321 | u64 type; | ||
322 | u64 value; | ||
323 | u64 cpu_id; | ||
324 | }; | ||
325 | #endif | 311 | #endif |
326 | 312 | ||
327 | struct power_processor_entry { | ||
328 | struct trace_entry te; | ||
329 | u32 state; | ||
330 | u32 cpu_id; | ||
331 | }; | ||
332 | |||
333 | #define TASK_COMM_LEN 16 | ||
334 | struct wakeup_entry { | ||
335 | struct trace_entry te; | ||
336 | char comm[TASK_COMM_LEN]; | ||
337 | int pid; | ||
338 | int prio; | ||
339 | int success; | ||
340 | }; | ||
341 | |||
342 | struct sched_switch { | ||
343 | struct trace_entry te; | ||
344 | char prev_comm[TASK_COMM_LEN]; | ||
345 | int prev_pid; | ||
346 | int prev_prio; | ||
347 | long prev_state; /* Arjan weeps. */ | ||
348 | char next_comm[TASK_COMM_LEN]; | ||
349 | int next_pid; | ||
350 | int next_prio; | ||
351 | }; | ||
352 | |||
353 | static void c_state_start(int cpu, u64 timestamp, int state) | 313 | static void c_state_start(int cpu, u64 timestamp, int state) |
354 | { | 314 | { |
355 | cpus_cstate_start_times[cpu] = timestamp; | 315 | cpus_cstate_start_times[cpu] = timestamp; |
@@ -409,25 +369,23 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq) | |||
409 | turbo_frequency = max_freq; | 369 | turbo_frequency = max_freq; |
410 | } | 370 | } |
411 | 371 | ||
412 | static void | 372 | static void sched_wakeup(int cpu, u64 timestamp, int waker, int wakee, |
413 | sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te, | 373 | u8 flags, const char *backtrace) |
414 | const char *backtrace) | ||
415 | { | 374 | { |
416 | struct per_pid *p; | 375 | struct per_pid *p; |
417 | struct wakeup_entry *wake = (void *)te; | ||
418 | struct wake_event *we = zalloc(sizeof(*we)); | 376 | struct wake_event *we = zalloc(sizeof(*we)); |
419 | 377 | ||
420 | if (!we) | 378 | if (!we) |
421 | return; | 379 | return; |
422 | 380 | ||
423 | we->time = timestamp; | 381 | we->time = timestamp; |
424 | we->waker = pid; | 382 | we->waker = waker; |
425 | we->backtrace = backtrace; | 383 | we->backtrace = backtrace; |
426 | 384 | ||
427 | if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) | 385 | if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ)) |
428 | we->waker = -1; | 386 | we->waker = -1; |
429 | 387 | ||
430 | we->wakee = wake->pid; | 388 | we->wakee = wakee; |
431 | we->next = wake_events; | 389 | we->next = wake_events; |
432 | wake_events = we; | 390 | wake_events = we; |
433 | p = find_create_pid(we->wakee); | 391 | p = find_create_pid(we->wakee); |
@@ -444,24 +402,22 @@ sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te, | |||
444 | } | 402 | } |
445 | } | 403 | } |
446 | 404 | ||
447 | static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te, | 405 | static void sched_switch(int cpu, u64 timestamp, int prev_pid, int next_pid, |
448 | const char *backtrace) | 406 | u64 prev_state, const char *backtrace) |
449 | { | 407 | { |
450 | struct per_pid *p = NULL, *prev_p; | 408 | struct per_pid *p = NULL, *prev_p; |
451 | struct sched_switch *sw = (void *)te; | ||
452 | |||
453 | 409 | ||
454 | prev_p = find_create_pid(sw->prev_pid); | 410 | prev_p = find_create_pid(prev_pid); |
455 | 411 | ||
456 | p = find_create_pid(sw->next_pid); | 412 | p = find_create_pid(next_pid); |
457 | 413 | ||
458 | if (prev_p->current && prev_p->current->state != TYPE_NONE) | 414 | if (prev_p->current && prev_p->current->state != TYPE_NONE) |
459 | pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, | 415 | pid_put_sample(prev_pid, TYPE_RUNNING, cpu, |
460 | prev_p->current->state_since, timestamp, | 416 | prev_p->current->state_since, timestamp, |
461 | backtrace); | 417 | backtrace); |
462 | if (p && p->current) { | 418 | if (p && p->current) { |
463 | if (p->current->state != TYPE_NONE) | 419 | if (p->current->state != TYPE_NONE) |
464 | pid_put_sample(sw->next_pid, p->current->state, cpu, | 420 | pid_put_sample(next_pid, p->current->state, cpu, |
465 | p->current->state_since, timestamp, | 421 | p->current->state_since, timestamp, |
466 | backtrace); | 422 | backtrace); |
467 | 423 | ||
@@ -472,9 +428,9 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te, | |||
472 | if (prev_p->current) { | 428 | if (prev_p->current) { |
473 | prev_p->current->state = TYPE_NONE; | 429 | prev_p->current->state = TYPE_NONE; |
474 | prev_p->current->state_since = timestamp; | 430 | prev_p->current->state_since = timestamp; |
475 | if (sw->prev_state & 2) | 431 | if (prev_state & 2) |
476 | prev_p->current->state = TYPE_BLOCKED; | 432 | prev_p->current->state = TYPE_BLOCKED; |
477 | if (sw->prev_state == 0) | 433 | if (prev_state == 0) |
478 | prev_p->current->state = TYPE_WAITING; | 434 | prev_p->current->state = TYPE_WAITING; |
479 | } | 435 | } |
480 | } | 436 | } |
@@ -586,61 +542,69 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, | |||
586 | } | 542 | } |
587 | 543 | ||
588 | static int | 544 | static int |
589 | process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused, | 545 | process_sample_cpu_idle(struct perf_evsel *evsel, |
590 | struct perf_sample *sample, | 546 | struct perf_sample *sample, |
591 | const char *backtrace __maybe_unused) | 547 | const char *backtrace __maybe_unused) |
592 | { | 548 | { |
593 | struct power_processor_entry *ppe = sample->raw_data; | 549 | u32 state = perf_evsel__intval(evsel, sample, "state"); |
550 | u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); | ||
594 | 551 | ||
595 | if (ppe->state == (u32) PWR_EVENT_EXIT) | 552 | if (state == (u32)PWR_EVENT_EXIT) |
596 | c_state_end(ppe->cpu_id, sample->time); | 553 | c_state_end(cpu_id, sample->time); |
597 | else | 554 | else |
598 | c_state_start(ppe->cpu_id, sample->time, ppe->state); | 555 | c_state_start(cpu_id, sample->time, state); |
599 | return 0; | 556 | return 0; |
600 | } | 557 | } |
601 | 558 | ||
602 | static int | 559 | static int |
603 | process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused, | 560 | process_sample_cpu_frequency(struct perf_evsel *evsel, |
604 | struct perf_sample *sample, | 561 | struct perf_sample *sample, |
605 | const char *backtrace __maybe_unused) | 562 | const char *backtrace __maybe_unused) |
606 | { | 563 | { |
607 | struct power_processor_entry *ppe = sample->raw_data; | 564 | u32 state = perf_evsel__intval(evsel, sample, "state"); |
565 | u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); | ||
608 | 566 | ||
609 | p_state_change(ppe->cpu_id, sample->time, ppe->state); | 567 | p_state_change(cpu_id, sample->time, state); |
610 | return 0; | 568 | return 0; |
611 | } | 569 | } |
612 | 570 | ||
613 | static int | 571 | static int |
614 | process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused, | 572 | process_sample_sched_wakeup(struct perf_evsel *evsel, |
615 | struct perf_sample *sample, | 573 | struct perf_sample *sample, |
616 | const char *backtrace) | 574 | const char *backtrace) |
617 | { | 575 | { |
618 | struct trace_entry *te = sample->raw_data; | 576 | u8 flags = perf_evsel__intval(evsel, sample, "common_flags"); |
577 | int waker = perf_evsel__intval(evsel, sample, "common_pid"); | ||
578 | int wakee = perf_evsel__intval(evsel, sample, "pid"); | ||
619 | 579 | ||
620 | sched_wakeup(sample->cpu, sample->time, sample->pid, te, backtrace); | 580 | sched_wakeup(sample->cpu, sample->time, waker, wakee, flags, backtrace); |
621 | return 0; | 581 | return 0; |
622 | } | 582 | } |
623 | 583 | ||
624 | static int | 584 | static int |
625 | process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused, | 585 | process_sample_sched_switch(struct perf_evsel *evsel, |
626 | struct perf_sample *sample, | 586 | struct perf_sample *sample, |
627 | const char *backtrace) | 587 | const char *backtrace) |
628 | { | 588 | { |
629 | struct trace_entry *te = sample->raw_data; | 589 | int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"); |
590 | int next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
591 | u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); | ||
630 | 592 | ||
631 | sched_switch(sample->cpu, sample->time, te, backtrace); | 593 | sched_switch(sample->cpu, sample->time, prev_pid, next_pid, prev_state, |
594 | backtrace); | ||
632 | return 0; | 595 | return 0; |
633 | } | 596 | } |
634 | 597 | ||
635 | #ifdef SUPPORT_OLD_POWER_EVENTS | 598 | #ifdef SUPPORT_OLD_POWER_EVENTS |
636 | static int | 599 | static int |
637 | process_sample_power_start(struct perf_evsel *evsel __maybe_unused, | 600 | process_sample_power_start(struct perf_evsel *evsel, |
638 | struct perf_sample *sample, | 601 | struct perf_sample *sample, |
639 | const char *backtrace __maybe_unused) | 602 | const char *backtrace __maybe_unused) |
640 | { | 603 | { |
641 | struct power_entry_old *peo = sample->raw_data; | 604 | u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); |
605 | u64 value = perf_evsel__intval(evsel, sample, "value"); | ||
642 | 606 | ||
643 | c_state_start(peo->cpu_id, sample->time, peo->value); | 607 | c_state_start(cpu_id, sample->time, value); |
644 | return 0; | 608 | return 0; |
645 | } | 609 | } |
646 | 610 | ||
@@ -654,13 +618,14 @@ process_sample_power_end(struct perf_evsel *evsel __maybe_unused, | |||
654 | } | 618 | } |
655 | 619 | ||
656 | static int | 620 | static int |
657 | process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused, | 621 | process_sample_power_frequency(struct perf_evsel *evsel, |
658 | struct perf_sample *sample, | 622 | struct perf_sample *sample, |
659 | const char *backtrace __maybe_unused) | 623 | const char *backtrace __maybe_unused) |
660 | { | 624 | { |
661 | struct power_entry_old *peo = sample->raw_data; | 625 | u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); |
626 | u64 value = perf_evsel__intval(evsel, sample, "value"); | ||
662 | 627 | ||
663 | p_state_change(peo->cpu_id, sample->time, peo->value); | 628 | p_state_change(cpu_id, sample->time, value); |
664 | return 0; | 629 | return 0; |
665 | } | 630 | } |
666 | #endif /* SUPPORT_OLD_POWER_EVENTS */ | 631 | #endif /* SUPPORT_OLD_POWER_EVENTS */ |