aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-sched.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2011-11-28 04:56:39 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2011-11-28 07:39:12 -0500
commit743eb868657bdb1b26c7b24077ca21c67c82c777 (patch)
tree4803b557725213043ccd5d3f83d2eec796a49f69 /tools/perf/builtin-sched.c
parentd20deb64e0490ee9442b5181bc08a62d2cadcb90 (diff)
perf tools: Resolve machine earlier and pass it to perf_event_ops
Reducing the exposure of perf_session further, so that we can use the classes in cases where no perf.data file is created. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-stua66dcscsezzrcdugvbmvd@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r--tools/perf/builtin-sched.c70
1 files changed, 35 insertions, 35 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index b11d6283fedf..6a771f822e5d 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -724,21 +724,21 @@ struct trace_migrate_task_event {
724 724
725struct trace_sched_handler { 725struct trace_sched_handler {
726 void (*switch_event)(struct trace_switch_event *, 726 void (*switch_event)(struct trace_switch_event *,
727 struct perf_session *, 727 struct machine *,
728 struct event *, 728 struct event *,
729 int cpu, 729 int cpu,
730 u64 timestamp, 730 u64 timestamp,
731 struct thread *thread); 731 struct thread *thread);
732 732
733 void (*runtime_event)(struct trace_runtime_event *, 733 void (*runtime_event)(struct trace_runtime_event *,
734 struct perf_session *, 734 struct machine *,
735 struct event *, 735 struct event *,
736 int cpu, 736 int cpu,
737 u64 timestamp, 737 u64 timestamp,
738 struct thread *thread); 738 struct thread *thread);
739 739
740 void (*wakeup_event)(struct trace_wakeup_event *, 740 void (*wakeup_event)(struct trace_wakeup_event *,
741 struct perf_session *, 741 struct machine *,
742 struct event *, 742 struct event *,
743 int cpu, 743 int cpu,
744 u64 timestamp, 744 u64 timestamp,
@@ -751,7 +751,7 @@ struct trace_sched_handler {
751 struct thread *thread); 751 struct thread *thread);
752 752
753 void (*migrate_task_event)(struct trace_migrate_task_event *, 753 void (*migrate_task_event)(struct trace_migrate_task_event *,
754 struct perf_session *session, 754 struct machine *machine,
755 struct event *, 755 struct event *,
756 int cpu, 756 int cpu,
757 u64 timestamp, 757 u64 timestamp,
@@ -761,7 +761,7 @@ struct trace_sched_handler {
761 761
762static void 762static void
763replay_wakeup_event(struct trace_wakeup_event *wakeup_event, 763replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
764 struct perf_session *session __used, 764 struct machine *machine __used,
765 struct event *event, 765 struct event *event,
766 int cpu __used, 766 int cpu __used,
767 u64 timestamp __used, 767 u64 timestamp __used,
@@ -788,7 +788,7 @@ static u64 cpu_last_switched[MAX_CPUS];
788 788
789static void 789static void
790replay_switch_event(struct trace_switch_event *switch_event, 790replay_switch_event(struct trace_switch_event *switch_event,
791 struct perf_session *session __used, 791 struct machine *machine __used,
792 struct event *event, 792 struct event *event,
793 int cpu, 793 int cpu,
794 u64 timestamp, 794 u64 timestamp,
@@ -1022,7 +1022,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1022 1022
1023static void 1023static void
1024latency_switch_event(struct trace_switch_event *switch_event, 1024latency_switch_event(struct trace_switch_event *switch_event,
1025 struct perf_session *session, 1025 struct machine *machine,
1026 struct event *event __used, 1026 struct event *event __used,
1027 int cpu, 1027 int cpu,
1028 u64 timestamp, 1028 u64 timestamp,
@@ -1046,8 +1046,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
1046 die("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1046 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1047 1047
1048 1048
1049 sched_out = perf_session__findnew(session, switch_event->prev_pid); 1049 sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1050 sched_in = perf_session__findnew(session, switch_event->next_pid); 1050 sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1051 1051
1052 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); 1052 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1053 if (!out_events) { 1053 if (!out_events) {
@@ -1075,13 +1075,13 @@ latency_switch_event(struct trace_switch_event *switch_event,
1075 1075
1076static void 1076static void
1077latency_runtime_event(struct trace_runtime_event *runtime_event, 1077latency_runtime_event(struct trace_runtime_event *runtime_event,
1078 struct perf_session *session, 1078 struct machine *machine,
1079 struct event *event __used, 1079 struct event *event __used,
1080 int cpu, 1080 int cpu,
1081 u64 timestamp, 1081 u64 timestamp,
1082 struct thread *this_thread __used) 1082 struct thread *this_thread __used)
1083{ 1083{
1084 struct thread *thread = perf_session__findnew(session, runtime_event->pid); 1084 struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
1085 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); 1085 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1086 1086
1087 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1087 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -1098,7 +1098,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
1098 1098
1099static void 1099static void
1100latency_wakeup_event(struct trace_wakeup_event *wakeup_event, 1100latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1101 struct perf_session *session, 1101 struct machine *machine,
1102 struct event *__event __used, 1102 struct event *__event __used,
1103 int cpu __used, 1103 int cpu __used,
1104 u64 timestamp, 1104 u64 timestamp,
@@ -1112,7 +1112,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1112 if (!wakeup_event->success) 1112 if (!wakeup_event->success)
1113 return; 1113 return;
1114 1114
1115 wakee = perf_session__findnew(session, wakeup_event->pid); 1115 wakee = machine__findnew_thread(machine, wakeup_event->pid);
1116 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); 1116 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1117 if (!atoms) { 1117 if (!atoms) {
1118 thread_atoms_insert(wakee); 1118 thread_atoms_insert(wakee);
@@ -1146,7 +1146,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1146 1146
1147static void 1147static void
1148latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, 1148latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1149 struct perf_session *session, 1149 struct machine *machine,
1150 struct event *__event __used, 1150 struct event *__event __used,
1151 int cpu __used, 1151 int cpu __used,
1152 u64 timestamp, 1152 u64 timestamp,
@@ -1162,7 +1162,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1162 if (profile_cpu == -1) 1162 if (profile_cpu == -1)
1163 return; 1163 return;
1164 1164
1165 migrant = perf_session__findnew(session, migrate_task_event->pid); 1165 migrant = machine__findnew_thread(machine, migrate_task_event->pid);
1166 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); 1166 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1167 if (!atoms) { 1167 if (!atoms) {
1168 thread_atoms_insert(migrant); 1168 thread_atoms_insert(migrant);
@@ -1357,7 +1357,7 @@ static void sort_lat(void)
1357static struct trace_sched_handler *trace_handler; 1357static struct trace_sched_handler *trace_handler;
1358 1358
1359static void 1359static void
1360process_sched_wakeup_event(void *data, struct perf_session *session, 1360process_sched_wakeup_event(void *data, struct machine *machine,
1361 struct event *event, 1361 struct event *event,
1362 int cpu __used, 1362 int cpu __used,
1363 u64 timestamp __used, 1363 u64 timestamp __used,
@@ -1374,7 +1374,7 @@ process_sched_wakeup_event(void *data, struct perf_session *session,
1374 FILL_FIELD(wakeup_event, cpu, event, data); 1374 FILL_FIELD(wakeup_event, cpu, event, data);
1375 1375
1376 if (trace_handler->wakeup_event) 1376 if (trace_handler->wakeup_event)
1377 trace_handler->wakeup_event(&wakeup_event, session, event, 1377 trace_handler->wakeup_event(&wakeup_event, machine, event,
1378 cpu, timestamp, thread); 1378 cpu, timestamp, thread);
1379} 1379}
1380 1380
@@ -1393,7 +1393,7 @@ static char next_shortname2 = '0';
1393 1393
1394static void 1394static void
1395map_switch_event(struct trace_switch_event *switch_event, 1395map_switch_event(struct trace_switch_event *switch_event,
1396 struct perf_session *session, 1396 struct machine *machine,
1397 struct event *event __used, 1397 struct event *event __used,
1398 int this_cpu, 1398 int this_cpu,
1399 u64 timestamp, 1399 u64 timestamp,
@@ -1421,8 +1421,8 @@ map_switch_event(struct trace_switch_event *switch_event,
1421 die("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1421 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1422 1422
1423 1423
1424 sched_out = perf_session__findnew(session, switch_event->prev_pid); 1424 sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1425 sched_in = perf_session__findnew(session, switch_event->next_pid); 1425 sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1426 1426
1427 curr_thread[this_cpu] = sched_in; 1427 curr_thread[this_cpu] = sched_in;
1428 1428
@@ -1472,7 +1472,7 @@ map_switch_event(struct trace_switch_event *switch_event,
1472 1472
1473 1473
1474static void 1474static void
1475process_sched_switch_event(void *data, struct perf_session *session, 1475process_sched_switch_event(void *data, struct machine *machine,
1476 struct event *event, 1476 struct event *event,
1477 int this_cpu, 1477 int this_cpu,
1478 u64 timestamp __used, 1478 u64 timestamp __used,
@@ -1499,14 +1499,14 @@ process_sched_switch_event(void *data, struct perf_session *session,
1499 nr_context_switch_bugs++; 1499 nr_context_switch_bugs++;
1500 } 1500 }
1501 if (trace_handler->switch_event) 1501 if (trace_handler->switch_event)
1502 trace_handler->switch_event(&switch_event, session, event, 1502 trace_handler->switch_event(&switch_event, machine, event,
1503 this_cpu, timestamp, thread); 1503 this_cpu, timestamp, thread);
1504 1504
1505 curr_pid[this_cpu] = switch_event.next_pid; 1505 curr_pid[this_cpu] = switch_event.next_pid;
1506} 1506}
1507 1507
1508static void 1508static void
1509process_sched_runtime_event(void *data, struct perf_session *session, 1509process_sched_runtime_event(void *data, struct machine *machine,
1510 struct event *event, 1510 struct event *event,
1511 int cpu __used, 1511 int cpu __used,
1512 u64 timestamp __used, 1512 u64 timestamp __used,
@@ -1520,7 +1520,7 @@ process_sched_runtime_event(void *data, struct perf_session *session,
1520 FILL_FIELD(runtime_event, vruntime, event, data); 1520 FILL_FIELD(runtime_event, vruntime, event, data);
1521 1521
1522 if (trace_handler->runtime_event) 1522 if (trace_handler->runtime_event)
1523 trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread); 1523 trace_handler->runtime_event(&runtime_event, machine, event, cpu, timestamp, thread);
1524} 1524}
1525 1525
1526static void 1526static void
@@ -1555,7 +1555,7 @@ process_sched_exit_event(struct event *event,
1555} 1555}
1556 1556
1557static void 1557static void
1558process_sched_migrate_task_event(void *data, struct perf_session *session, 1558process_sched_migrate_task_event(void *data, struct machine *machine,
1559 struct event *event, 1559 struct event *event,
1560 int cpu __used, 1560 int cpu __used,
1561 u64 timestamp __used, 1561 u64 timestamp __used,
@@ -1571,12 +1571,12 @@ process_sched_migrate_task_event(void *data, struct perf_session *session,
1571 FILL_FIELD(migrate_task_event, cpu, event, data); 1571 FILL_FIELD(migrate_task_event, cpu, event, data);
1572 1572
1573 if (trace_handler->migrate_task_event) 1573 if (trace_handler->migrate_task_event)
1574 trace_handler->migrate_task_event(&migrate_task_event, session, 1574 trace_handler->migrate_task_event(&migrate_task_event, machine,
1575 event, cpu, timestamp, thread); 1575 event, cpu, timestamp, thread);
1576} 1576}
1577 1577
1578static void process_raw_event(union perf_event *raw_event __used, 1578static void process_raw_event(union perf_event *raw_event __used,
1579 struct perf_session *session, void *data, int cpu, 1579 struct machine *machine, void *data, int cpu,
1580 u64 timestamp, struct thread *thread) 1580 u64 timestamp, struct thread *thread)
1581{ 1581{
1582 struct event *event; 1582 struct event *event;
@@ -1587,33 +1587,33 @@ static void process_raw_event(union perf_event *raw_event __used,
1587 event = trace_find_event(type); 1587 event = trace_find_event(type);
1588 1588
1589 if (!strcmp(event->name, "sched_switch")) 1589 if (!strcmp(event->name, "sched_switch"))
1590 process_sched_switch_event(data, session, event, cpu, timestamp, thread); 1590 process_sched_switch_event(data, machine, event, cpu, timestamp, thread);
1591 if (!strcmp(event->name, "sched_stat_runtime")) 1591 if (!strcmp(event->name, "sched_stat_runtime"))
1592 process_sched_runtime_event(data, session, event, cpu, timestamp, thread); 1592 process_sched_runtime_event(data, machine, event, cpu, timestamp, thread);
1593 if (!strcmp(event->name, "sched_wakeup")) 1593 if (!strcmp(event->name, "sched_wakeup"))
1594 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); 1594 process_sched_wakeup_event(data, machine, event, cpu, timestamp, thread);
1595 if (!strcmp(event->name, "sched_wakeup_new")) 1595 if (!strcmp(event->name, "sched_wakeup_new"))
1596 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); 1596 process_sched_wakeup_event(data, machine, event, cpu, timestamp, thread);
1597 if (!strcmp(event->name, "sched_process_fork")) 1597 if (!strcmp(event->name, "sched_process_fork"))
1598 process_sched_fork_event(data, event, cpu, timestamp, thread); 1598 process_sched_fork_event(data, event, cpu, timestamp, thread);
1599 if (!strcmp(event->name, "sched_process_exit")) 1599 if (!strcmp(event->name, "sched_process_exit"))
1600 process_sched_exit_event(event, cpu, timestamp, thread); 1600 process_sched_exit_event(event, cpu, timestamp, thread);
1601 if (!strcmp(event->name, "sched_migrate_task")) 1601 if (!strcmp(event->name, "sched_migrate_task"))
1602 process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread); 1602 process_sched_migrate_task_event(data, machine, event, cpu, timestamp, thread);
1603} 1603}
1604 1604
1605static int process_sample_event(struct perf_event_ops *ops __used, 1605static int process_sample_event(struct perf_event_ops *ops __used,
1606 union perf_event *event, 1606 union perf_event *event,
1607 struct perf_sample *sample, 1607 struct perf_sample *sample,
1608 struct perf_evsel *evsel, 1608 struct perf_evsel *evsel,
1609 struct perf_session *session) 1609 struct machine *machine)
1610{ 1610{
1611 struct thread *thread; 1611 struct thread *thread;
1612 1612
1613 if (!(evsel->attr.sample_type & PERF_SAMPLE_RAW)) 1613 if (!(evsel->attr.sample_type & PERF_SAMPLE_RAW))
1614 return 0; 1614 return 0;
1615 1615
1616 thread = perf_session__findnew(session, sample->pid); 1616 thread = machine__findnew_thread(machine, sample->pid);
1617 if (thread == NULL) { 1617 if (thread == NULL) {
1618 pr_debug("problem processing %d event, skipping it.\n", 1618 pr_debug("problem processing %d event, skipping it.\n",
1619 event->header.type); 1619 event->header.type);
@@ -1625,7 +1625,7 @@ static int process_sample_event(struct perf_event_ops *ops __used,
1625 if (profile_cpu != -1 && profile_cpu != (int)sample->cpu) 1625 if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
1626 return 0; 1626 return 0;
1627 1627
1628 process_raw_event(event, session, sample->raw_data, sample->cpu, 1628 process_raw_event(event, machine, sample->raw_data, sample->cpu,
1629 sample->time, thread); 1629 sample->time, thread);
1630 1630
1631 return 0; 1631 return 0;