aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-sched.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2012-09-11 18:29:17 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-09-11 18:33:51 -0400
commit2b7fcbc5a9c719a306af1c4986a9f5c2cbfcec65 (patch)
treec3a104d55c0a1a77a9b82088e84ca7f7bb261ece /tools/perf/builtin-sched.c
parent5555ded44698ed82ffa3d8742ec2994f695127bc (diff)
perf sched: Use perf_evsel__{int,str}val
This patch also stops reading the common fields, as they were not being used except for one ->common_pid case that was replaced by sample->tid, i.e. the info is already in the perf_sample struct. Also it only fills the _event structures when there is a handler. [root@sandy ~]# perf sched record sleep 30s [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 8.585 MB perf.data (~375063 samples) ] Before: [root@sandy ~]# perf stat -r 10 perf sched lat > /dev/null Performance counter stats for 'perf sched lat' (10 runs): 129.117838 task-clock # 0.994 CPUs utilized ( +- 0.28% ) 14 context-switches # 0.111 K/sec ( +- 2.10% ) 0 cpu-migrations # 0.002 K/sec ( +- 66.67% ) 7,654 page-faults # 0.059 M/sec ( +- 0.67% ) 438,121,661 cycles # 3.393 GHz ( +- 0.06% ) [83.06%] 150,808,605 stalled-cycles-frontend # 34.42% frontend cycles idle ( +- 0.14% ) [83.10%] 80,748,941 stalled-cycles-backend # 18.43% backend cycles idle ( +- 0.64% ) [66.73%] 758,605,879 instructions # 1.73 insns per cycle # 0.20 stalled cycles per insn ( +- 0.08% ) [83.54%] 162,164,321 branches # 1255.940 M/sec ( +- 0.10% ) [83.70%] 1,609,903 branch-misses # 0.99% of all branches ( +- 0.08% ) [83.62%] 0.129949153 seconds time elapsed ( +- 0.28% ) After: [root@sandy ~]# perf stat -r 10 perf sched lat > /dev/null Performance counter stats for 'perf sched lat' (10 runs): 103.592215 task-clock # 0.993 CPUs utilized ( +- 0.33% ) 12 context-switches # 0.114 K/sec ( +- 3.29% ) 0 cpu-migrations # 0.000 K/sec 7,605 page-faults # 0.073 M/sec ( +- 0.00% ) 345,796,112 cycles # 3.338 GHz ( +- 0.07% ) [82.90%] 106,876,796 stalled-cycles-frontend # 30.91% frontend cycles idle ( +- 0.38% ) [83.23%] 62,060,877 stalled-cycles-backend # 17.95% backend cycles idle ( +- 0.80% ) [67.14%] 628,246,586 instructions # 1.82 insns per cycle # 0.17 stalled cycles per insn ( +- 0.04% ) [83.64%] 134,962,057 branches # 1302.820 M/sec ( +- 0.10% ) [83.64%] 1,233,037 branch-misses # 0.91% of all branches ( +- 0.29% ) [83.41%] 0.104333272 seconds time elapsed ( +- 0.33% ) [root@sandy ~]# Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-weu9t63zkrfrazkn0gxj48xy@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r--tools/perf/builtin-sched.c249
1 files changed, 90 insertions, 159 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 79f88fa3f7a3..0df5e7a08c63 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -98,82 +98,40 @@ struct work_atoms {
98typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); 98typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
99 99
100struct trace_switch_event { 100struct trace_switch_event {
101 u32 size; 101 char *prev_comm;
102
103 u16 common_type;
104 u8 common_flags;
105 u8 common_preempt_count;
106 u32 common_pid;
107 u32 common_tgid;
108
109 char prev_comm[16];
110 u32 prev_pid; 102 u32 prev_pid;
111 u32 prev_prio; 103 u32 prev_prio;
112 u64 prev_state; 104 u64 prev_state;
113 char next_comm[16]; 105 char *next_comm;
114 u32 next_pid; 106 u32 next_pid;
115 u32 next_prio; 107 u32 next_prio;
116}; 108};
117 109
118struct trace_runtime_event { 110struct trace_runtime_event {
119 u32 size; 111 char *comm;
120
121 u16 common_type;
122 u8 common_flags;
123 u8 common_preempt_count;
124 u32 common_pid;
125 u32 common_tgid;
126
127 char comm[16];
128 u32 pid; 112 u32 pid;
129 u64 runtime; 113 u64 runtime;
130 u64 vruntime; 114 u64 vruntime;
131}; 115};
132 116
133struct trace_wakeup_event { 117struct trace_wakeup_event {
134 u32 size; 118 char *comm;
135
136 u16 common_type;
137 u8 common_flags;
138 u8 common_preempt_count;
139 u32 common_pid;
140 u32 common_tgid;
141
142 char comm[16];
143 u32 pid; 119 u32 pid;
144
145 u32 prio; 120 u32 prio;
146 u32 success; 121 u32 success;
147 u32 cpu; 122 u32 cpu;
148}; 123};
149 124
150struct trace_fork_event { 125struct trace_fork_event {
151 u32 size; 126 char *parent_comm;
152
153 u16 common_type;
154 u8 common_flags;
155 u8 common_preempt_count;
156 u32 common_pid;
157 u32 common_tgid;
158
159 char parent_comm[16];
160 u32 parent_pid; 127 u32 parent_pid;
161 char child_comm[16]; 128 char *child_comm;
162 u32 child_pid; 129 u32 child_pid;
163}; 130};
164 131
165struct trace_migrate_task_event { 132struct trace_migrate_task_event {
166 u32 size; 133 char *comm;
167
168 u16 common_type;
169 u8 common_flags;
170 u8 common_preempt_count;
171 u32 common_pid;
172 u32 common_tgid;
173
174 char comm[16];
175 u32 pid; 134 u32 pid;
176
177 u32 prio; 135 u32 prio;
178 u32 cpu; 136 u32 cpu;
179}; 137};
@@ -184,7 +142,7 @@ struct trace_sched_handler {
184 int (*switch_event)(struct perf_sched *sched, 142 int (*switch_event)(struct perf_sched *sched,
185 struct trace_switch_event *event, 143 struct trace_switch_event *event,
186 struct machine *machine, 144 struct machine *machine,
187 struct event_format *tp_format, 145 struct perf_evsel *evsel,
188 struct perf_sample *sample); 146 struct perf_sample *sample);
189 147
190 int (*runtime_event)(struct perf_sched *sched, 148 int (*runtime_event)(struct perf_sched *sched,
@@ -195,12 +153,12 @@ struct trace_sched_handler {
195 int (*wakeup_event)(struct perf_sched *sched, 153 int (*wakeup_event)(struct perf_sched *sched,
196 struct trace_wakeup_event *event, 154 struct trace_wakeup_event *event,
197 struct machine *machine, 155 struct machine *machine,
198 struct event_format *tp_format, 156 struct perf_evsel *evsel,
199 struct perf_sample *sample); 157 struct perf_sample *sample);
200 158
201 int (*fork_event)(struct perf_sched *sched, 159 int (*fork_event)(struct perf_sched *sched,
202 struct trace_fork_event *event, 160 struct trace_fork_event *event,
203 struct event_format *tp_format); 161 struct perf_evsel *evsel);
204 162
205 int (*migrate_task_event)(struct perf_sched *sched, 163 int (*migrate_task_event)(struct perf_sched *sched,
206 struct trace_migrate_task_event *event, 164 struct trace_migrate_task_event *event,
@@ -740,40 +698,22 @@ static void test_calibrations(struct perf_sched *sched)
740 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); 698 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
741} 699}
742 700
743#define FILL_FIELD(ptr, field, event, data) \
744 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
745
746#define FILL_ARRAY(ptr, array, event, data) \
747do { \
748 void *__array = raw_field_ptr(event, #array, data); \
749 memcpy(ptr.array, __array, sizeof(ptr.array)); \
750} while(0)
751
752#define FILL_COMMON_FIELDS(ptr, event, data) \
753do { \
754 FILL_FIELD(ptr, common_type, event, data); \
755 FILL_FIELD(ptr, common_flags, event, data); \
756 FILL_FIELD(ptr, common_preempt_count, event, data); \
757 FILL_FIELD(ptr, common_pid, event, data); \
758 FILL_FIELD(ptr, common_tgid, event, data); \
759} while (0)
760
761static int 701static int
762replay_wakeup_event(struct perf_sched *sched, 702replay_wakeup_event(struct perf_sched *sched,
763 struct trace_wakeup_event *wakeup_event, 703 struct trace_wakeup_event *wakeup_event,
764 struct machine *machine __maybe_unused, 704 struct machine *machine __maybe_unused,
765 struct event_format *event, struct perf_sample *sample) 705 struct perf_evsel *evsel, struct perf_sample *sample)
766{ 706{
767 struct task_desc *waker, *wakee; 707 struct task_desc *waker, *wakee;
768 708
769 if (verbose) { 709 if (verbose) {
770 printf("sched_wakeup event %p\n", event); 710 printf("sched_wakeup event %p\n", evsel);
771 711
772 printf(" ... pid %d woke up %s/%d\n", 712 printf(" ... pid %d woke up %s/%d\n",
773 wakeup_event->common_pid, wakeup_event->comm, wakeup_event->pid); 713 sample->tid, wakeup_event->comm, wakeup_event->pid);
774 } 714 }
775 715
776 waker = register_pid(sched, wakeup_event->common_pid, "<unknown>"); 716 waker = register_pid(sched, sample->tid, "<unknown>");
777 wakee = register_pid(sched, wakeup_event->pid, wakeup_event->comm); 717 wakee = register_pid(sched, wakeup_event->pid, wakeup_event->comm);
778 718
779 add_sched_event_wakeup(sched, waker, sample->time, wakee); 719 add_sched_event_wakeup(sched, waker, sample->time, wakee);
@@ -784,7 +724,7 @@ static int
784replay_switch_event(struct perf_sched *sched, 724replay_switch_event(struct perf_sched *sched,
785 struct trace_switch_event *switch_event, 725 struct trace_switch_event *switch_event,
786 struct machine *machine __maybe_unused, 726 struct machine *machine __maybe_unused,
787 struct event_format *event, 727 struct perf_evsel *evsel,
788 struct perf_sample *sample) 728 struct perf_sample *sample)
789{ 729{
790 struct task_desc *prev, __maybe_unused *next; 730 struct task_desc *prev, __maybe_unused *next;
@@ -793,7 +733,7 @@ replay_switch_event(struct perf_sched *sched,
793 s64 delta; 733 s64 delta;
794 734
795 if (verbose) 735 if (verbose)
796 printf("sched_switch event %p\n", event); 736 printf("sched_switch event %p\n", evsel);
797 737
798 if (cpu >= MAX_CPUS || cpu < 0) 738 if (cpu >= MAX_CPUS || cpu < 0)
799 return 0; 739 return 0;
@@ -829,10 +769,10 @@ replay_switch_event(struct perf_sched *sched,
829 769
830static int 770static int
831replay_fork_event(struct perf_sched *sched, struct trace_fork_event *fork_event, 771replay_fork_event(struct perf_sched *sched, struct trace_fork_event *fork_event,
832 struct event_format *event) 772 struct perf_evsel *evsel)
833{ 773{
834 if (verbose) { 774 if (verbose) {
835 printf("sched_fork event %p\n", event); 775 printf("sched_fork event %p\n", evsel);
836 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); 776 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
837 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); 777 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
838 } 778 }
@@ -931,7 +871,7 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
931 871
932static int latency_fork_event(struct perf_sched *sched __maybe_unused, 872static int latency_fork_event(struct perf_sched *sched __maybe_unused,
933 struct trace_fork_event *fork_event __maybe_unused, 873 struct trace_fork_event *fork_event __maybe_unused,
934 struct event_format *event __maybe_unused) 874 struct perf_evsel *evsel __maybe_unused)
935{ 875{
936 /* should insert the newcomer */ 876 /* should insert the newcomer */
937 return 0; 877 return 0;
@@ -1015,7 +955,7 @@ static int
1015latency_switch_event(struct perf_sched *sched, 955latency_switch_event(struct perf_sched *sched,
1016 struct trace_switch_event *switch_event, 956 struct trace_switch_event *switch_event,
1017 struct machine *machine, 957 struct machine *machine,
1018 struct event_format *event __maybe_unused, 958 struct perf_evsel *evsel __maybe_unused,
1019 struct perf_sample *sample) 959 struct perf_sample *sample)
1020{ 960{
1021 struct work_atoms *out_events, *in_events; 961 struct work_atoms *out_events, *in_events;
@@ -1106,7 +1046,7 @@ static int
1106latency_wakeup_event(struct perf_sched *sched, 1046latency_wakeup_event(struct perf_sched *sched,
1107 struct trace_wakeup_event *wakeup_event, 1047 struct trace_wakeup_event *wakeup_event,
1108 struct machine *machine, 1048 struct machine *machine,
1109 struct event_format *event __maybe_unused, 1049 struct perf_evsel *evsel __maybe_unused,
1110 struct perf_sample *sample) 1050 struct perf_sample *sample)
1111{ 1051{
1112 struct work_atoms *atoms; 1052 struct work_atoms *atoms;
@@ -1350,34 +1290,32 @@ static void perf_sched__sort_lat(struct perf_sched *sched)
1350} 1290}
1351 1291
1352static int process_sched_wakeup_event(struct perf_tool *tool, 1292static int process_sched_wakeup_event(struct perf_tool *tool,
1353 struct event_format *event, 1293 struct perf_evsel *evsel,
1354 struct perf_sample *sample, 1294 struct perf_sample *sample,
1355 struct machine *machine) 1295 struct machine *machine)
1356{ 1296{
1357 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1297 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1358 void *data = sample->raw_data;
1359 struct trace_wakeup_event wakeup_event;
1360 int err = 0;
1361
1362 FILL_COMMON_FIELDS(wakeup_event, event, data);
1363 1298
1364 FILL_ARRAY(wakeup_event, comm, event, data); 1299 if (sched->tp_handler->wakeup_event) {
1365 FILL_FIELD(wakeup_event, pid, event, data); 1300 struct trace_wakeup_event event = {
1366 FILL_FIELD(wakeup_event, prio, event, data); 1301 .comm = perf_evsel__strval(evsel, sample, "comm"),
1367 FILL_FIELD(wakeup_event, success, event, data); 1302 .pid = perf_evsel__intval(evsel, sample, "pid"),
1368 FILL_FIELD(wakeup_event, cpu, event, data); 1303 .prio = perf_evsel__intval(evsel, sample, "prio"),
1304 .success = perf_evsel__intval(evsel, sample, "success"),
1305 .cpu = perf_evsel__intval(evsel, sample, "cpu"),
1306 };
1369 1307
1370 if (sched->tp_handler->wakeup_event) 1308 return sched->tp_handler->wakeup_event(sched, &event, machine, evsel, sample);
1371 err = sched->tp_handler->wakeup_event(sched, &wakeup_event, machine, event, sample); 1309 }
1372 1310
1373 return err; 1311 return 0;
1374} 1312}
1375 1313
1376static int 1314static int
1377map_switch_event(struct perf_sched *sched, 1315map_switch_event(struct perf_sched *sched,
1378 struct trace_switch_event *switch_event, 1316 struct trace_switch_event *switch_event,
1379 struct machine *machine, 1317 struct machine *machine,
1380 struct event_format *event __maybe_unused, 1318 struct perf_evsel *evsel __maybe_unused,
1381 struct perf_sample *sample) 1319 struct perf_sample *sample)
1382{ 1320{
1383 struct thread *sched_out __maybe_unused, *sched_in; 1321 struct thread *sched_out __maybe_unused, *sched_in;
@@ -1455,120 +1393,113 @@ map_switch_event(struct perf_sched *sched,
1455} 1393}
1456 1394
1457static int process_sched_switch_event(struct perf_tool *tool, 1395static int process_sched_switch_event(struct perf_tool *tool,
1458 struct event_format *event, 1396 struct perf_evsel *evsel,
1459 struct perf_sample *sample, 1397 struct perf_sample *sample,
1460 struct machine *machine) 1398 struct machine *machine)
1461{ 1399{
1462 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1400 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1463 int this_cpu = sample->cpu, err = 0; 1401 int this_cpu = sample->cpu, err = 0;
1464 void *data = sample->raw_data; 1402 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1465 struct trace_switch_event switch_event; 1403 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1466
1467 FILL_COMMON_FIELDS(switch_event, event, data);
1468
1469 FILL_ARRAY(switch_event, prev_comm, event, data);
1470 FILL_FIELD(switch_event, prev_pid, event, data);
1471 FILL_FIELD(switch_event, prev_prio, event, data);
1472 FILL_FIELD(switch_event, prev_state, event, data);
1473 FILL_ARRAY(switch_event, next_comm, event, data);
1474 FILL_FIELD(switch_event, next_pid, event, data);
1475 FILL_FIELD(switch_event, next_prio, event, data);
1476 1404
1477 if (sched->curr_pid[this_cpu] != (u32)-1) { 1405 if (sched->curr_pid[this_cpu] != (u32)-1) {
1478 /* 1406 /*
1479 * Are we trying to switch away a PID that is 1407 * Are we trying to switch away a PID that is
1480 * not current? 1408 * not current?
1481 */ 1409 */
1482 if (sched->curr_pid[this_cpu] != switch_event.prev_pid) 1410 if (sched->curr_pid[this_cpu] != prev_pid)
1483 sched->nr_context_switch_bugs++; 1411 sched->nr_context_switch_bugs++;
1484 } 1412 }
1485 if (sched->tp_handler->switch_event)
1486 err = sched->tp_handler->switch_event(sched, &switch_event, machine, event, sample);
1487 1413
1488 sched->curr_pid[this_cpu] = switch_event.next_pid; 1414 if (sched->tp_handler->switch_event) {
1415 struct trace_switch_event event = {
1416 .prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
1417 .prev_pid = prev_pid,
1418 .prev_prio = perf_evsel__intval(evsel, sample, "prev_prio"),
1419 .prev_state = perf_evsel__intval(evsel, sample, "prev_state"),
1420 .next_comm = perf_evsel__strval(evsel, sample, "next_comm"),
1421 .next_pid = next_pid,
1422 .next_prio = perf_evsel__intval(evsel, sample, "next_prio"),
1423 };
1424
1425 err = sched->tp_handler->switch_event(sched, &event, machine, evsel, sample);
1426 }
1427
1428 sched->curr_pid[this_cpu] = next_pid;
1489 return err; 1429 return err;
1490} 1430}
1491 1431
1492static int process_sched_runtime_event(struct perf_tool *tool, 1432static int process_sched_runtime_event(struct perf_tool *tool,
1493 struct event_format *event, 1433 struct perf_evsel *evsel,
1494 struct perf_sample *sample, 1434 struct perf_sample *sample,
1495 struct machine *machine) 1435 struct machine *machine)
1496{ 1436{
1497 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1437 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1498 void *data = sample->raw_data;
1499 struct trace_runtime_event runtime_event;
1500 int err = 0;
1501 1438
1502 FILL_ARRAY(runtime_event, comm, event, data); 1439 if (sched->tp_handler->runtime_event) {
1503 FILL_FIELD(runtime_event, pid, event, data); 1440 struct trace_runtime_event event = {
1504 FILL_FIELD(runtime_event, runtime, event, data); 1441 .comm = perf_evsel__strval(evsel, sample, "comm"),
1505 FILL_FIELD(runtime_event, vruntime, event, data); 1442 .pid = perf_evsel__intval(evsel, sample, "pid"),
1506 1443 .runtime = perf_evsel__intval(evsel, sample, "runtime"),
1507 if (sched->tp_handler->runtime_event) 1444 .vruntime = perf_evsel__intval(evsel, sample, "vruntime"),
1508 err = sched->tp_handler->runtime_event(sched, &runtime_event, machine, sample); 1445 };
1446 return sched->tp_handler->runtime_event(sched, &event, machine, sample);
1447 }
1509 1448
1510 return err; 1449 return 0;
1511} 1450}
1512 1451
1513static int process_sched_fork_event(struct perf_tool *tool, 1452static int process_sched_fork_event(struct perf_tool *tool,
1514 struct event_format *event, 1453 struct perf_evsel *evsel,
1515 struct perf_sample *sample, 1454 struct perf_sample *sample,
1516 struct machine *machine __maybe_unused) 1455 struct machine *machine __maybe_unused)
1517{ 1456{
1518 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1457 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1519 void *data = sample->raw_data;
1520 struct trace_fork_event fork_event;
1521 int err = 0;
1522
1523 FILL_COMMON_FIELDS(fork_event, event, data);
1524 1458
1525 FILL_ARRAY(fork_event, parent_comm, event, data); 1459 if (sched->tp_handler->fork_event) {
1526 FILL_FIELD(fork_event, parent_pid, event, data); 1460 struct trace_fork_event event = {
1527 FILL_ARRAY(fork_event, child_comm, event, data); 1461 .parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"),
1528 FILL_FIELD(fork_event, child_pid, event, data); 1462 .child_comm = perf_evsel__strval(evsel, sample, "child_comm"),
1529 1463 .parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"),
1530 if (sched->tp_handler->fork_event) 1464 .child_pid = perf_evsel__intval(evsel, sample, "child_pid"),
1531 err = sched->tp_handler->fork_event(sched, &fork_event, event); 1465 };
1466 return sched->tp_handler->fork_event(sched, &event, evsel);
1467 }
1532 1468
1533 return err; 1469 return 0;
1534} 1470}
1535 1471
1536static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, 1472static int process_sched_exit_event(struct perf_tool *tool __maybe_unused,
1537 struct event_format *event, 1473 struct perf_evsel *evsel,
1538 struct perf_sample *sample __maybe_unused, 1474 struct perf_sample *sample __maybe_unused,
1539 struct machine *machine __maybe_unused) 1475 struct machine *machine __maybe_unused)
1540{ 1476{
1541 if (verbose) 1477 pr_debug("sched_exit event %p\n", evsel);
1542 printf("sched_exit event %p\n", event);
1543
1544 return 0; 1478 return 0;
1545} 1479}
1546 1480
1547static int process_sched_migrate_task_event(struct perf_tool *tool, 1481static int process_sched_migrate_task_event(struct perf_tool *tool,
1548 struct event_format *event, 1482 struct perf_evsel *evsel,
1549 struct perf_sample *sample, 1483 struct perf_sample *sample,
1550 struct machine *machine) 1484 struct machine *machine)
1551{ 1485{
1552 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1486 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1553 void *data = sample->raw_data;
1554 struct trace_migrate_task_event migrate_task_event;
1555 int err = 0;
1556 1487
1557 FILL_COMMON_FIELDS(migrate_task_event, event, data); 1488 if (sched->tp_handler->migrate_task_event) {
1558 1489 struct trace_migrate_task_event event = {
1559 FILL_ARRAY(migrate_task_event, comm, event, data); 1490 .comm = perf_evsel__strval(evsel, sample, "comm"),
1560 FILL_FIELD(migrate_task_event, pid, event, data); 1491 .pid = perf_evsel__intval(evsel, sample, "pid"),
1561 FILL_FIELD(migrate_task_event, prio, event, data); 1492 .prio = perf_evsel__intval(evsel, sample, "prio"),
1562 FILL_FIELD(migrate_task_event, cpu, event, data); 1493 .cpu = perf_evsel__intval(evsel, sample, "cpu"),
1563 1494 };
1564 if (sched->tp_handler->migrate_task_event) 1495 return sched->tp_handler->migrate_task_event(sched, &event, machine, sample);
1565 err = sched->tp_handler->migrate_task_event(sched, &migrate_task_event, machine, sample); 1496 }
1566 1497
1567 return err; 1498 return 0;
1568} 1499}
1569 1500
1570typedef int (*tracepoint_handler)(struct perf_tool *tool, 1501typedef int (*tracepoint_handler)(struct perf_tool *tool,
1571 struct event_format *tp_format, 1502 struct perf_evsel *evsel,
1572 struct perf_sample *sample, 1503 struct perf_sample *sample,
1573 struct machine *machine); 1504 struct machine *machine);
1574 1505
@@ -1592,7 +1523,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
1592 1523
1593 if (evsel->handler.func != NULL) { 1524 if (evsel->handler.func != NULL) {
1594 tracepoint_handler f = evsel->handler.func; 1525 tracepoint_handler f = evsel->handler.func;
1595 err = f(tool, evsel->tp_format, sample, machine); 1526 err = f(tool, evsel, sample, machine);
1596 } 1527 }
1597 1528
1598 return err; 1529 return err;