diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-08-07 10:33:42 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-08-07 22:46:19 -0400 |
commit | 7f7f8d0bea5d6bb985f4ae84ca3daff34802fd32 (patch) | |
tree | f1d01ef219719544c234b50dab7c980b6494597d /tools/perf/builtin-sched.c | |
parent | 01d955244b99827814570ed4b675271ca7b8af02 (diff) |
perf sched: Use perf_sample
To reduce the number of parameters passed to the various event handling
functions.
Cc: Andrey Wagin <avagin@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-fc537qykjjqzvyol5fecx6ug@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 113 |
1 files changed, 37 insertions, 76 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 30ef82aca885..a25a023965bb 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -729,46 +729,30 @@ struct trace_sched_handler { | |||
729 | void (*switch_event)(struct trace_switch_event *, | 729 | void (*switch_event)(struct trace_switch_event *, |
730 | struct machine *, | 730 | struct machine *, |
731 | struct event_format *, | 731 | struct event_format *, |
732 | int cpu, | 732 | struct perf_sample *sample); |
733 | u64 timestamp, | ||
734 | struct thread *thread); | ||
735 | 733 | ||
736 | void (*runtime_event)(struct trace_runtime_event *, | 734 | void (*runtime_event)(struct trace_runtime_event *, |
737 | struct machine *, | 735 | struct machine *, |
738 | struct event_format *, | 736 | struct perf_sample *sample); |
739 | int cpu, | ||
740 | u64 timestamp, | ||
741 | struct thread *thread); | ||
742 | 737 | ||
743 | void (*wakeup_event)(struct trace_wakeup_event *, | 738 | void (*wakeup_event)(struct trace_wakeup_event *, |
744 | struct machine *, | 739 | struct machine *, |
745 | struct event_format *, | 740 | struct event_format *, |
746 | int cpu, | 741 | struct perf_sample *sample); |
747 | u64 timestamp, | ||
748 | struct thread *thread); | ||
749 | 742 | ||
750 | void (*fork_event)(struct trace_fork_event *, | 743 | void (*fork_event)(struct trace_fork_event *, |
751 | struct event_format *, | 744 | struct event_format *event); |
752 | int cpu, | ||
753 | u64 timestamp, | ||
754 | struct thread *thread); | ||
755 | 745 | ||
756 | void (*migrate_task_event)(struct trace_migrate_task_event *, | 746 | void (*migrate_task_event)(struct trace_migrate_task_event *, |
757 | struct machine *machine, | 747 | struct machine *machine, |
758 | struct event_format *, | 748 | struct perf_sample *sample); |
759 | int cpu, | ||
760 | u64 timestamp, | ||
761 | struct thread *thread); | ||
762 | }; | 749 | }; |
763 | 750 | ||
764 | 751 | ||
765 | static void | 752 | static void |
766 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | 753 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, |
767 | struct machine *machine __used, | 754 | struct machine *machine __used, |
768 | struct event_format *event, | 755 | struct event_format *event, struct perf_sample *sample) |
769 | int cpu __used, | ||
770 | u64 timestamp __used, | ||
771 | struct thread *thread __used) | ||
772 | { | 756 | { |
773 | struct task_desc *waker, *wakee; | 757 | struct task_desc *waker, *wakee; |
774 | 758 | ||
@@ -784,7 +768,7 @@ replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
784 | waker = register_pid(wakeup_event->common_pid, "<unknown>"); | 768 | waker = register_pid(wakeup_event->common_pid, "<unknown>"); |
785 | wakee = register_pid(wakeup_event->pid, wakeup_event->comm); | 769 | wakee = register_pid(wakeup_event->pid, wakeup_event->comm); |
786 | 770 | ||
787 | add_sched_event_wakeup(waker, timestamp, wakee); | 771 | add_sched_event_wakeup(waker, sample->time, wakee); |
788 | } | 772 | } |
789 | 773 | ||
790 | static u64 cpu_last_switched[MAX_CPUS]; | 774 | static u64 cpu_last_switched[MAX_CPUS]; |
@@ -793,12 +777,11 @@ static void | |||
793 | replay_switch_event(struct trace_switch_event *switch_event, | 777 | replay_switch_event(struct trace_switch_event *switch_event, |
794 | struct machine *machine __used, | 778 | struct machine *machine __used, |
795 | struct event_format *event, | 779 | struct event_format *event, |
796 | int cpu, | 780 | struct perf_sample *sample) |
797 | u64 timestamp, | ||
798 | struct thread *thread __used) | ||
799 | { | 781 | { |
800 | struct task_desc *prev, __used *next; | 782 | struct task_desc *prev, __used *next; |
801 | u64 timestamp0; | 783 | u64 timestamp0, timestamp = sample->time; |
784 | int cpu = sample->cpu; | ||
802 | s64 delta; | 785 | s64 delta; |
803 | 786 | ||
804 | if (verbose) | 787 | if (verbose) |
@@ -835,10 +818,7 @@ replay_switch_event(struct trace_switch_event *switch_event, | |||
835 | 818 | ||
836 | static void | 819 | static void |
837 | replay_fork_event(struct trace_fork_event *fork_event, | 820 | replay_fork_event(struct trace_fork_event *fork_event, |
838 | struct event_format *event, | 821 | struct event_format *event) |
839 | int cpu __used, | ||
840 | u64 timestamp __used, | ||
841 | struct thread *thread __used) | ||
842 | { | 822 | { |
843 | if (verbose) { | 823 | if (verbose) { |
844 | printf("sched_fork event %p\n", event); | 824 | printf("sched_fork event %p\n", event); |
@@ -944,10 +924,7 @@ static void thread_atoms_insert(struct thread *thread) | |||
944 | 924 | ||
945 | static void | 925 | static void |
946 | latency_fork_event(struct trace_fork_event *fork_event __used, | 926 | latency_fork_event(struct trace_fork_event *fork_event __used, |
947 | struct event_format *event __used, | 927 | struct event_format *event __used) |
948 | int cpu __used, | ||
949 | u64 timestamp __used, | ||
950 | struct thread *thread __used) | ||
951 | { | 928 | { |
952 | /* should insert the newcomer */ | 929 | /* should insert the newcomer */ |
953 | } | 930 | } |
@@ -1027,13 +1004,12 @@ static void | |||
1027 | latency_switch_event(struct trace_switch_event *switch_event, | 1004 | latency_switch_event(struct trace_switch_event *switch_event, |
1028 | struct machine *machine, | 1005 | struct machine *machine, |
1029 | struct event_format *event __used, | 1006 | struct event_format *event __used, |
1030 | int cpu, | 1007 | struct perf_sample *sample) |
1031 | u64 timestamp, | ||
1032 | struct thread *thread __used) | ||
1033 | { | 1008 | { |
1034 | struct work_atoms *out_events, *in_events; | 1009 | struct work_atoms *out_events, *in_events; |
1035 | struct thread *sched_out, *sched_in; | 1010 | struct thread *sched_out, *sched_in; |
1036 | u64 timestamp0; | 1011 | u64 timestamp0, timestamp = sample->time; |
1012 | int cpu = sample->cpu; | ||
1037 | s64 delta; | 1013 | s64 delta; |
1038 | 1014 | ||
1039 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 1015 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
@@ -1078,14 +1054,12 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1078 | 1054 | ||
1079 | static void | 1055 | static void |
1080 | latency_runtime_event(struct trace_runtime_event *runtime_event, | 1056 | latency_runtime_event(struct trace_runtime_event *runtime_event, |
1081 | struct machine *machine, | 1057 | struct machine *machine, struct perf_sample *sample) |
1082 | struct event_format *event __used, | ||
1083 | int cpu, | ||
1084 | u64 timestamp, | ||
1085 | struct thread *this_thread __used) | ||
1086 | { | 1058 | { |
1087 | struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); | 1059 | struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); |
1088 | struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | 1060 | struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); |
1061 | u64 timestamp = sample->time; | ||
1062 | int cpu = sample->cpu; | ||
1089 | 1063 | ||
1090 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 1064 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
1091 | if (!atoms) { | 1065 | if (!atoms) { |
@@ -1101,15 +1075,13 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, | |||
1101 | 1075 | ||
1102 | static void | 1076 | static void |
1103 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | 1077 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, |
1104 | struct machine *machine, | 1078 | struct machine *machine, struct event_format *event __used, |
1105 | struct event_format *__event __used, | 1079 | struct perf_sample *sample) |
1106 | int cpu __used, | ||
1107 | u64 timestamp, | ||
1108 | struct thread *thread __used) | ||
1109 | { | 1080 | { |
1110 | struct work_atoms *atoms; | 1081 | struct work_atoms *atoms; |
1111 | struct work_atom *atom; | 1082 | struct work_atom *atom; |
1112 | struct thread *wakee; | 1083 | struct thread *wakee; |
1084 | u64 timestamp = sample->time; | ||
1113 | 1085 | ||
1114 | /* Note for later, it may be interesting to observe the failing cases */ | 1086 | /* Note for later, it may be interesting to observe the failing cases */ |
1115 | if (!wakeup_event->success) | 1087 | if (!wakeup_event->success) |
@@ -1149,12 +1121,9 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1149 | 1121 | ||
1150 | static void | 1122 | static void |
1151 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | 1123 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, |
1152 | struct machine *machine, | 1124 | struct machine *machine, struct perf_sample *sample) |
1153 | struct event_format *__event __used, | ||
1154 | int cpu __used, | ||
1155 | u64 timestamp, | ||
1156 | struct thread *thread __used) | ||
1157 | { | 1125 | { |
1126 | u64 timestamp = sample->time; | ||
1158 | struct work_atoms *atoms; | 1127 | struct work_atoms *atoms; |
1159 | struct work_atom *atom; | 1128 | struct work_atom *atom; |
1160 | struct thread *migrant; | 1129 | struct thread *migrant; |
@@ -1364,7 +1333,7 @@ process_sched_wakeup_event(struct perf_tool *tool __used, | |||
1364 | struct event_format *event, | 1333 | struct event_format *event, |
1365 | struct perf_sample *sample, | 1334 | struct perf_sample *sample, |
1366 | struct machine *machine, | 1335 | struct machine *machine, |
1367 | struct thread *thread) | 1336 | struct thread *thread __used) |
1368 | { | 1337 | { |
1369 | void *data = sample->raw_data; | 1338 | void *data = sample->raw_data; |
1370 | struct trace_wakeup_event wakeup_event; | 1339 | struct trace_wakeup_event wakeup_event; |
@@ -1378,8 +1347,7 @@ process_sched_wakeup_event(struct perf_tool *tool __used, | |||
1378 | FILL_FIELD(wakeup_event, cpu, event, data); | 1347 | FILL_FIELD(wakeup_event, cpu, event, data); |
1379 | 1348 | ||
1380 | if (trace_handler->wakeup_event) | 1349 | if (trace_handler->wakeup_event) |
1381 | trace_handler->wakeup_event(&wakeup_event, machine, event, | 1350 | trace_handler->wakeup_event(&wakeup_event, machine, event, sample); |
1382 | sample->cpu, sample->time, thread); | ||
1383 | } | 1351 | } |
1384 | 1352 | ||
1385 | /* | 1353 | /* |
@@ -1399,15 +1367,13 @@ static void | |||
1399 | map_switch_event(struct trace_switch_event *switch_event, | 1367 | map_switch_event(struct trace_switch_event *switch_event, |
1400 | struct machine *machine, | 1368 | struct machine *machine, |
1401 | struct event_format *event __used, | 1369 | struct event_format *event __used, |
1402 | int this_cpu, | 1370 | struct perf_sample *sample) |
1403 | u64 timestamp, | ||
1404 | struct thread *thread __used) | ||
1405 | { | 1371 | { |
1406 | struct thread *sched_out __used, *sched_in; | 1372 | struct thread *sched_out __used, *sched_in; |
1407 | int new_shortname; | 1373 | int new_shortname; |
1408 | u64 timestamp0; | 1374 | u64 timestamp0, timestamp = sample->time; |
1409 | s64 delta; | 1375 | s64 delta; |
1410 | int cpu; | 1376 | int cpu, this_cpu = sample->cpu; |
1411 | 1377 | ||
1412 | BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); | 1378 | BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); |
1413 | 1379 | ||
@@ -1479,7 +1445,7 @@ process_sched_switch_event(struct perf_tool *tool __used, | |||
1479 | struct event_format *event, | 1445 | struct event_format *event, |
1480 | struct perf_sample *sample, | 1446 | struct perf_sample *sample, |
1481 | struct machine *machine, | 1447 | struct machine *machine, |
1482 | struct thread *thread) | 1448 | struct thread *thread __used) |
1483 | { | 1449 | { |
1484 | int this_cpu = sample->cpu; | 1450 | int this_cpu = sample->cpu; |
1485 | void *data = sample->raw_data; | 1451 | void *data = sample->raw_data; |
@@ -1504,8 +1470,7 @@ process_sched_switch_event(struct perf_tool *tool __used, | |||
1504 | nr_context_switch_bugs++; | 1470 | nr_context_switch_bugs++; |
1505 | } | 1471 | } |
1506 | if (trace_handler->switch_event) | 1472 | if (trace_handler->switch_event) |
1507 | trace_handler->switch_event(&switch_event, machine, event, | 1473 | trace_handler->switch_event(&switch_event, machine, event, sample); |
1508 | this_cpu, sample->time, thread); | ||
1509 | 1474 | ||
1510 | curr_pid[this_cpu] = switch_event.next_pid; | 1475 | curr_pid[this_cpu] = switch_event.next_pid; |
1511 | } | 1476 | } |
@@ -1515,7 +1480,7 @@ process_sched_runtime_event(struct perf_tool *tool __used, | |||
1515 | struct event_format *event, | 1480 | struct event_format *event, |
1516 | struct perf_sample *sample, | 1481 | struct perf_sample *sample, |
1517 | struct machine *machine, | 1482 | struct machine *machine, |
1518 | struct thread *thread) | 1483 | struct thread *thread __used) |
1519 | { | 1484 | { |
1520 | void *data = sample->raw_data; | 1485 | void *data = sample->raw_data; |
1521 | struct trace_runtime_event runtime_event; | 1486 | struct trace_runtime_event runtime_event; |
@@ -1526,8 +1491,7 @@ process_sched_runtime_event(struct perf_tool *tool __used, | |||
1526 | FILL_FIELD(runtime_event, vruntime, event, data); | 1491 | FILL_FIELD(runtime_event, vruntime, event, data); |
1527 | 1492 | ||
1528 | if (trace_handler->runtime_event) | 1493 | if (trace_handler->runtime_event) |
1529 | trace_handler->runtime_event(&runtime_event, machine, event, | 1494 | trace_handler->runtime_event(&runtime_event, machine, sample); |
1530 | sample->cpu, sample->time, thread); | ||
1531 | } | 1495 | } |
1532 | 1496 | ||
1533 | static void | 1497 | static void |
@@ -1535,7 +1499,7 @@ process_sched_fork_event(struct perf_tool *tool __used, | |||
1535 | struct event_format *event, | 1499 | struct event_format *event, |
1536 | struct perf_sample *sample, | 1500 | struct perf_sample *sample, |
1537 | struct machine *machine __used, | 1501 | struct machine *machine __used, |
1538 | struct thread *thread) | 1502 | struct thread *thread __used) |
1539 | { | 1503 | { |
1540 | void *data = sample->raw_data; | 1504 | void *data = sample->raw_data; |
1541 | struct trace_fork_event fork_event; | 1505 | struct trace_fork_event fork_event; |
@@ -1548,8 +1512,7 @@ process_sched_fork_event(struct perf_tool *tool __used, | |||
1548 | FILL_FIELD(fork_event, child_pid, event, data); | 1512 | FILL_FIELD(fork_event, child_pid, event, data); |
1549 | 1513 | ||
1550 | if (trace_handler->fork_event) | 1514 | if (trace_handler->fork_event) |
1551 | trace_handler->fork_event(&fork_event, event, | 1515 | trace_handler->fork_event(&fork_event, event); |
1552 | sample->cpu, sample->time, thread); | ||
1553 | } | 1516 | } |
1554 | 1517 | ||
1555 | static void | 1518 | static void |
@@ -1568,7 +1531,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, | |||
1568 | struct event_format *event, | 1531 | struct event_format *event, |
1569 | struct perf_sample *sample, | 1532 | struct perf_sample *sample, |
1570 | struct machine *machine, | 1533 | struct machine *machine, |
1571 | struct thread *thread) | 1534 | struct thread *thread __used) |
1572 | { | 1535 | { |
1573 | void *data = sample->raw_data; | 1536 | void *data = sample->raw_data; |
1574 | struct trace_migrate_task_event migrate_task_event; | 1537 | struct trace_migrate_task_event migrate_task_event; |
@@ -1581,9 +1544,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, | |||
1581 | FILL_FIELD(migrate_task_event, cpu, event, data); | 1544 | FILL_FIELD(migrate_task_event, cpu, event, data); |
1582 | 1545 | ||
1583 | if (trace_handler->migrate_task_event) | 1546 | if (trace_handler->migrate_task_event) |
1584 | trace_handler->migrate_task_event(&migrate_task_event, machine, | 1547 | trace_handler->migrate_task_event(&migrate_task_event, machine, sample); |
1585 | event, sample->cpu, | ||
1586 | sample->time, thread); | ||
1587 | } | 1548 | } |
1588 | 1549 | ||
1589 | typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, | 1550 | typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, |