diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-08-07 09:59:44 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-08-07 22:45:53 -0400 |
commit | 01d955244b99827814570ed4b675271ca7b8af02 (patch) | |
tree | f9f80791c82270fd14619b309cc2aadf1336dd5e /tools/perf/builtin-lock.c | |
parent | 22ad798c37cb554afae79a72c1d420ecb4d27b86 (diff) |
perf lock: Use evsel->tp_format and perf_sample
To reduce the number of parameters passed to the various event handling
functions.
Cc: Andrey Wagin <avagin@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bipk647rzq357yot9ao6ih73@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-lock.c')
-rw-r--r-- | tools/perf/builtin-lock.c | 116 |
1 files changed, 42 insertions, 74 deletions
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 142b3033e4be..3f8b9550a6ef 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c | |||
@@ -357,28 +357,16 @@ struct trace_release_event { | |||
357 | 357 | ||
358 | struct trace_lock_handler { | 358 | struct trace_lock_handler { |
359 | void (*acquire_event)(struct trace_acquire_event *, | 359 | void (*acquire_event)(struct trace_acquire_event *, |
360 | struct event_format *, | 360 | const struct perf_sample *sample); |
361 | int cpu, | ||
362 | u64 timestamp, | ||
363 | struct thread *thread); | ||
364 | 361 | ||
365 | void (*acquired_event)(struct trace_acquired_event *, | 362 | void (*acquired_event)(struct trace_acquired_event *, |
366 | struct event_format *, | 363 | const struct perf_sample *sample); |
367 | int cpu, | ||
368 | u64 timestamp, | ||
369 | struct thread *thread); | ||
370 | 364 | ||
371 | void (*contended_event)(struct trace_contended_event *, | 365 | void (*contended_event)(struct trace_contended_event *, |
372 | struct event_format *, | 366 | const struct perf_sample *sample); |
373 | int cpu, | ||
374 | u64 timestamp, | ||
375 | struct thread *thread); | ||
376 | 367 | ||
377 | void (*release_event)(struct trace_release_event *, | 368 | void (*release_event)(struct trace_release_event *, |
378 | struct event_format *, | 369 | const struct perf_sample *sample); |
379 | int cpu, | ||
380 | u64 timestamp, | ||
381 | struct thread *thread); | ||
382 | }; | 370 | }; |
383 | 371 | ||
384 | static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) | 372 | static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) |
@@ -417,10 +405,7 @@ enum acquire_flags { | |||
417 | 405 | ||
418 | static void | 406 | static void |
419 | report_lock_acquire_event(struct trace_acquire_event *acquire_event, | 407 | report_lock_acquire_event(struct trace_acquire_event *acquire_event, |
420 | struct event_format *__event __used, | 408 | const struct perf_sample *sample) |
421 | int cpu __used, | ||
422 | u64 timestamp __used, | ||
423 | struct thread *thread __used) | ||
424 | { | 409 | { |
425 | struct lock_stat *ls; | 410 | struct lock_stat *ls; |
426 | struct thread_stat *ts; | 411 | struct thread_stat *ts; |
@@ -430,7 +415,7 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event, | |||
430 | if (ls->discard) | 415 | if (ls->discard) |
431 | return; | 416 | return; |
432 | 417 | ||
433 | ts = thread_stat_findnew(thread->pid); | 418 | ts = thread_stat_findnew(sample->tid); |
434 | seq = get_seq(ts, acquire_event->addr); | 419 | seq = get_seq(ts, acquire_event->addr); |
435 | 420 | ||
436 | switch (seq->state) { | 421 | switch (seq->state) { |
@@ -474,18 +459,16 @@ broken: | |||
474 | } | 459 | } |
475 | 460 | ||
476 | ls->nr_acquire++; | 461 | ls->nr_acquire++; |
477 | seq->prev_event_time = timestamp; | 462 | seq->prev_event_time = sample->time; |
478 | end: | 463 | end: |
479 | return; | 464 | return; |
480 | } | 465 | } |
481 | 466 | ||
482 | static void | 467 | static void |
483 | report_lock_acquired_event(struct trace_acquired_event *acquired_event, | 468 | report_lock_acquired_event(struct trace_acquired_event *acquired_event, |
484 | struct event_format *__event __used, | 469 | const struct perf_sample *sample) |
485 | int cpu __used, | ||
486 | u64 timestamp __used, | ||
487 | struct thread *thread __used) | ||
488 | { | 470 | { |
471 | u64 timestamp = sample->time; | ||
489 | struct lock_stat *ls; | 472 | struct lock_stat *ls; |
490 | struct thread_stat *ts; | 473 | struct thread_stat *ts; |
491 | struct lock_seq_stat *seq; | 474 | struct lock_seq_stat *seq; |
@@ -495,7 +478,7 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event, | |||
495 | if (ls->discard) | 478 | if (ls->discard) |
496 | return; | 479 | return; |
497 | 480 | ||
498 | ts = thread_stat_findnew(thread->pid); | 481 | ts = thread_stat_findnew(sample->tid); |
499 | seq = get_seq(ts, acquired_event->addr); | 482 | seq = get_seq(ts, acquired_event->addr); |
500 | 483 | ||
501 | switch (seq->state) { | 484 | switch (seq->state) { |
@@ -537,10 +520,7 @@ end: | |||
537 | 520 | ||
538 | static void | 521 | static void |
539 | report_lock_contended_event(struct trace_contended_event *contended_event, | 522 | report_lock_contended_event(struct trace_contended_event *contended_event, |
540 | struct event_format *__event __used, | 523 | const struct perf_sample *sample) |
541 | int cpu __used, | ||
542 | u64 timestamp __used, | ||
543 | struct thread *thread __used) | ||
544 | { | 524 | { |
545 | struct lock_stat *ls; | 525 | struct lock_stat *ls; |
546 | struct thread_stat *ts; | 526 | struct thread_stat *ts; |
@@ -550,7 +530,7 @@ report_lock_contended_event(struct trace_contended_event *contended_event, | |||
550 | if (ls->discard) | 530 | if (ls->discard) |
551 | return; | 531 | return; |
552 | 532 | ||
553 | ts = thread_stat_findnew(thread->pid); | 533 | ts = thread_stat_findnew(sample->tid); |
554 | seq = get_seq(ts, contended_event->addr); | 534 | seq = get_seq(ts, contended_event->addr); |
555 | 535 | ||
556 | switch (seq->state) { | 536 | switch (seq->state) { |
@@ -577,17 +557,14 @@ report_lock_contended_event(struct trace_contended_event *contended_event, | |||
577 | 557 | ||
578 | seq->state = SEQ_STATE_CONTENDED; | 558 | seq->state = SEQ_STATE_CONTENDED; |
579 | ls->nr_contended++; | 559 | ls->nr_contended++; |
580 | seq->prev_event_time = timestamp; | 560 | seq->prev_event_time = sample->time; |
581 | end: | 561 | end: |
582 | return; | 562 | return; |
583 | } | 563 | } |
584 | 564 | ||
585 | static void | 565 | static void |
586 | report_lock_release_event(struct trace_release_event *release_event, | 566 | report_lock_release_event(struct trace_release_event *release_event, |
587 | struct event_format *__event __used, | 567 | const struct perf_sample *sample) |
588 | int cpu __used, | ||
589 | u64 timestamp __used, | ||
590 | struct thread *thread __used) | ||
591 | { | 568 | { |
592 | struct lock_stat *ls; | 569 | struct lock_stat *ls; |
593 | struct thread_stat *ts; | 570 | struct thread_stat *ts; |
@@ -597,7 +574,7 @@ report_lock_release_event(struct trace_release_event *release_event, | |||
597 | if (ls->discard) | 574 | if (ls->discard) |
598 | return; | 575 | return; |
599 | 576 | ||
600 | ts = thread_stat_findnew(thread->pid); | 577 | ts = thread_stat_findnew(sample->tid); |
601 | seq = get_seq(ts, release_event->addr); | 578 | seq = get_seq(ts, release_event->addr); |
602 | 579 | ||
603 | switch (seq->state) { | 580 | switch (seq->state) { |
@@ -646,14 +623,12 @@ static struct trace_lock_handler report_lock_ops = { | |||
646 | 623 | ||
647 | static struct trace_lock_handler *trace_handler; | 624 | static struct trace_lock_handler *trace_handler; |
648 | 625 | ||
649 | static void | 626 | static void perf_evsel__process_lock_acquire(struct perf_evsel *evsel, |
650 | process_lock_acquire_event(void *data, | 627 | struct perf_sample *sample) |
651 | struct event_format *event __used, | ||
652 | int cpu __used, | ||
653 | u64 timestamp __used, | ||
654 | struct thread *thread __used) | ||
655 | { | 628 | { |
656 | struct trace_acquire_event acquire_event; | 629 | struct trace_acquire_event acquire_event; |
630 | struct event_format *event = evsel->tp_format; | ||
631 | void *data = sample->raw_data; | ||
657 | u64 tmp; /* this is required for casting... */ | 632 | u64 tmp; /* this is required for casting... */ |
658 | 633 | ||
659 | tmp = raw_field_value(event, "lockdep_addr", data); | 634 | tmp = raw_field_value(event, "lockdep_addr", data); |
@@ -662,17 +637,15 @@ process_lock_acquire_event(void *data, | |||
662 | acquire_event.flag = (int)raw_field_value(event, "flag", data); | 637 | acquire_event.flag = (int)raw_field_value(event, "flag", data); |
663 | 638 | ||
664 | if (trace_handler->acquire_event) | 639 | if (trace_handler->acquire_event) |
665 | trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread); | 640 | trace_handler->acquire_event(&acquire_event, sample); |
666 | } | 641 | } |
667 | 642 | ||
668 | static void | 643 | static void perf_evsel__process_lock_acquired(struct perf_evsel *evsel, |
669 | process_lock_acquired_event(void *data, | 644 | struct perf_sample *sample) |
670 | struct event_format *event __used, | ||
671 | int cpu __used, | ||
672 | u64 timestamp __used, | ||
673 | struct thread *thread __used) | ||
674 | { | 645 | { |
675 | struct trace_acquired_event acquired_event; | 646 | struct trace_acquired_event acquired_event; |
647 | struct event_format *event = evsel->tp_format; | ||
648 | void *data = sample->raw_data; | ||
676 | u64 tmp; /* this is required for casting... */ | 649 | u64 tmp; /* this is required for casting... */ |
677 | 650 | ||
678 | tmp = raw_field_value(event, "lockdep_addr", data); | 651 | tmp = raw_field_value(event, "lockdep_addr", data); |
@@ -680,17 +653,15 @@ process_lock_acquired_event(void *data, | |||
680 | acquired_event.name = (char *)raw_field_ptr(event, "name", data); | 653 | acquired_event.name = (char *)raw_field_ptr(event, "name", data); |
681 | 654 | ||
682 | if (trace_handler->acquire_event) | 655 | if (trace_handler->acquire_event) |
683 | trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread); | 656 | trace_handler->acquired_event(&acquired_event, sample); |
684 | } | 657 | } |
685 | 658 | ||
686 | static void | 659 | static void perf_evsel__process_lock_contended(struct perf_evsel *evsel, |
687 | process_lock_contended_event(void *data, | 660 | struct perf_sample *sample) |
688 | struct event_format *event __used, | ||
689 | int cpu __used, | ||
690 | u64 timestamp __used, | ||
691 | struct thread *thread __used) | ||
692 | { | 661 | { |
693 | struct trace_contended_event contended_event; | 662 | struct trace_contended_event contended_event; |
663 | struct event_format *event = evsel->tp_format; | ||
664 | void *data = sample->raw_data; | ||
694 | u64 tmp; /* this is required for casting... */ | 665 | u64 tmp; /* this is required for casting... */ |
695 | 666 | ||
696 | tmp = raw_field_value(event, "lockdep_addr", data); | 667 | tmp = raw_field_value(event, "lockdep_addr", data); |
@@ -698,17 +669,15 @@ process_lock_contended_event(void *data, | |||
698 | contended_event.name = (char *)raw_field_ptr(event, "name", data); | 669 | contended_event.name = (char *)raw_field_ptr(event, "name", data); |
699 | 670 | ||
700 | if (trace_handler->acquire_event) | 671 | if (trace_handler->acquire_event) |
701 | trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread); | 672 | trace_handler->contended_event(&contended_event, sample); |
702 | } | 673 | } |
703 | 674 | ||
704 | static void | 675 | static void perf_evsel__process_lock_release(struct perf_evsel *evsel, |
705 | process_lock_release_event(void *data, | 676 | struct perf_sample *sample) |
706 | struct event_format *event __used, | ||
707 | int cpu __used, | ||
708 | u64 timestamp __used, | ||
709 | struct thread *thread __used) | ||
710 | { | 677 | { |
711 | struct trace_release_event release_event; | 678 | struct trace_release_event release_event; |
679 | struct event_format *event = evsel->tp_format; | ||
680 | void *data = sample->raw_data; | ||
712 | u64 tmp; /* this is required for casting... */ | 681 | u64 tmp; /* this is required for casting... */ |
713 | 682 | ||
714 | tmp = raw_field_value(event, "lockdep_addr", data); | 683 | tmp = raw_field_value(event, "lockdep_addr", data); |
@@ -716,22 +685,22 @@ process_lock_release_event(void *data, | |||
716 | release_event.name = (char *)raw_field_ptr(event, "name", data); | 685 | release_event.name = (char *)raw_field_ptr(event, "name", data); |
717 | 686 | ||
718 | if (trace_handler->acquire_event) | 687 | if (trace_handler->acquire_event) |
719 | trace_handler->release_event(&release_event, event, cpu, timestamp, thread); | 688 | trace_handler->release_event(&release_event, sample); |
720 | } | 689 | } |
721 | 690 | ||
722 | static void process_raw_event(struct perf_evsel *evsel, void *data, int cpu, | 691 | static void perf_evsel__process_lock_event(struct perf_evsel *evsel, |
723 | u64 timestamp, struct thread *thread) | 692 | struct perf_sample *sample) |
724 | { | 693 | { |
725 | struct event_format *event = evsel->tp_format; | 694 | struct event_format *event = evsel->tp_format; |
726 | 695 | ||
727 | if (!strcmp(event->name, "lock_acquire")) | 696 | if (!strcmp(event->name, "lock_acquire")) |
728 | process_lock_acquire_event(data, event, cpu, timestamp, thread); | 697 | perf_evsel__process_lock_acquire(evsel, sample); |
729 | if (!strcmp(event->name, "lock_acquired")) | 698 | if (!strcmp(event->name, "lock_acquired")) |
730 | process_lock_acquired_event(data, event, cpu, timestamp, thread); | 699 | perf_evsel__process_lock_acquired(evsel, sample); |
731 | if (!strcmp(event->name, "lock_contended")) | 700 | if (!strcmp(event->name, "lock_contended")) |
732 | process_lock_contended_event(data, event, cpu, timestamp, thread); | 701 | perf_evsel__process_lock_contended(evsel, sample); |
733 | if (!strcmp(event->name, "lock_release")) | 702 | if (!strcmp(event->name, "lock_release")) |
734 | process_lock_release_event(data, event, cpu, timestamp, thread); | 703 | perf_evsel__process_lock_release(evsel, sample); |
735 | } | 704 | } |
736 | 705 | ||
737 | static void print_bad_events(int bad, int total) | 706 | static void print_bad_events(int bad, int total) |
@@ -857,8 +826,7 @@ static int process_sample_event(struct perf_tool *tool __used, | |||
857 | return -1; | 826 | return -1; |
858 | } | 827 | } |
859 | 828 | ||
860 | process_raw_event(evsel, sample->raw_data, sample->cpu, sample->time, thread); | 829 | perf_evsel__process_lock_event(evsel, sample); |
861 | |||
862 | return 0; | 830 | return 0; |
863 | } | 831 | } |
864 | 832 | ||