aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-sched.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2009-12-13 16:50:28 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-14 10:57:16 -0500
commitb3165f414416a717f72a376720564012af5a2e01 (patch)
treeb066e4ae00b7d4bdb7386f4054e6e3ace0b976c3 /tools/perf/builtin-sched.c
parentec913369733923dbfd6bdff5953a918107059701 (diff)
perf session: Move the global threads list to perf_session
So that we can process two perf.data files. We still need to add a O_MMAP mode for perf_session so that we can do all the mmap stuff in it. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frédéric Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1260741029-4430-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r--tools/perf/builtin-sched.c68
1 files changed, 41 insertions, 27 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 1e4e508339a8..8d58d9e07a7b 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -730,18 +730,21 @@ struct trace_migrate_task_event {
730 730
731struct trace_sched_handler { 731struct trace_sched_handler {
732 void (*switch_event)(struct trace_switch_event *, 732 void (*switch_event)(struct trace_switch_event *,
733 struct perf_session *,
733 struct event *, 734 struct event *,
734 int cpu, 735 int cpu,
735 u64 timestamp, 736 u64 timestamp,
736 struct thread *thread); 737 struct thread *thread);
737 738
738 void (*runtime_event)(struct trace_runtime_event *, 739 void (*runtime_event)(struct trace_runtime_event *,
740 struct perf_session *,
739 struct event *, 741 struct event *,
740 int cpu, 742 int cpu,
741 u64 timestamp, 743 u64 timestamp,
742 struct thread *thread); 744 struct thread *thread);
743 745
744 void (*wakeup_event)(struct trace_wakeup_event *, 746 void (*wakeup_event)(struct trace_wakeup_event *,
747 struct perf_session *,
745 struct event *, 748 struct event *,
746 int cpu, 749 int cpu,
747 u64 timestamp, 750 u64 timestamp,
@@ -754,6 +757,7 @@ struct trace_sched_handler {
754 struct thread *thread); 757 struct thread *thread);
755 758
756 void (*migrate_task_event)(struct trace_migrate_task_event *, 759 void (*migrate_task_event)(struct trace_migrate_task_event *,
760 struct perf_session *session,
757 struct event *, 761 struct event *,
758 int cpu, 762 int cpu,
759 u64 timestamp, 763 u64 timestamp,
@@ -763,6 +767,7 @@ struct trace_sched_handler {
763 767
764static void 768static void
765replay_wakeup_event(struct trace_wakeup_event *wakeup_event, 769replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
770 struct perf_session *session __used,
766 struct event *event, 771 struct event *event,
767 int cpu __used, 772 int cpu __used,
768 u64 timestamp __used, 773 u64 timestamp __used,
@@ -789,6 +794,7 @@ static u64 cpu_last_switched[MAX_CPUS];
789 794
790static void 795static void
791replay_switch_event(struct trace_switch_event *switch_event, 796replay_switch_event(struct trace_switch_event *switch_event,
797 struct perf_session *session __used,
792 struct event *event, 798 struct event *event,
793 int cpu, 799 int cpu,
794 u64 timestamp, 800 u64 timestamp,
@@ -1022,6 +1028,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1022 1028
1023static void 1029static void
1024latency_switch_event(struct trace_switch_event *switch_event, 1030latency_switch_event(struct trace_switch_event *switch_event,
1031 struct perf_session *session,
1025 struct event *event __used, 1032 struct event *event __used,
1026 int cpu, 1033 int cpu,
1027 u64 timestamp, 1034 u64 timestamp,
@@ -1045,8 +1052,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
1045 die("hm, delta: %Ld < 0 ?\n", delta); 1052 die("hm, delta: %Ld < 0 ?\n", delta);
1046 1053
1047 1054
1048 sched_out = threads__findnew(switch_event->prev_pid); 1055 sched_out = perf_session__findnew(session, switch_event->prev_pid);
1049 sched_in = threads__findnew(switch_event->next_pid); 1056 sched_in = perf_session__findnew(session, switch_event->next_pid);
1050 1057
1051 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); 1058 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1052 if (!out_events) { 1059 if (!out_events) {
@@ -1074,12 +1081,13 @@ latency_switch_event(struct trace_switch_event *switch_event,
1074 1081
1075static void 1082static void
1076latency_runtime_event(struct trace_runtime_event *runtime_event, 1083latency_runtime_event(struct trace_runtime_event *runtime_event,
1084 struct perf_session *session,
1077 struct event *event __used, 1085 struct event *event __used,
1078 int cpu, 1086 int cpu,
1079 u64 timestamp, 1087 u64 timestamp,
1080 struct thread *this_thread __used) 1088 struct thread *this_thread __used)
1081{ 1089{
1082 struct thread *thread = threads__findnew(runtime_event->pid); 1090 struct thread *thread = perf_session__findnew(session, runtime_event->pid);
1083 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); 1091 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1084 1092
1085 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1093 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -1096,6 +1104,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
1096 1104
1097static void 1105static void
1098latency_wakeup_event(struct trace_wakeup_event *wakeup_event, 1106latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1107 struct perf_session *session,
1099 struct event *__event __used, 1108 struct event *__event __used,
1100 int cpu __used, 1109 int cpu __used,
1101 u64 timestamp, 1110 u64 timestamp,
@@ -1109,7 +1118,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1109 if (!wakeup_event->success) 1118 if (!wakeup_event->success)
1110 return; 1119 return;
1111 1120
1112 wakee = threads__findnew(wakeup_event->pid); 1121 wakee = perf_session__findnew(session, wakeup_event->pid);
1113 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); 1122 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1114 if (!atoms) { 1123 if (!atoms) {
1115 thread_atoms_insert(wakee); 1124 thread_atoms_insert(wakee);
@@ -1143,6 +1152,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1143 1152
1144static void 1153static void
1145latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, 1154latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1155 struct perf_session *session,
1146 struct event *__event __used, 1156 struct event *__event __used,
1147 int cpu __used, 1157 int cpu __used,
1148 u64 timestamp, 1158 u64 timestamp,
@@ -1158,7 +1168,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1158 if (profile_cpu == -1) 1168 if (profile_cpu == -1)
1159 return; 1169 return;
1160 1170
1161 migrant = threads__findnew(migrate_task_event->pid); 1171 migrant = perf_session__findnew(session, migrate_task_event->pid);
1162 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); 1172 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1163 if (!atoms) { 1173 if (!atoms) {
1164 thread_atoms_insert(migrant); 1174 thread_atoms_insert(migrant);
@@ -1353,7 +1363,7 @@ static void sort_lat(void)
1353static struct trace_sched_handler *trace_handler; 1363static struct trace_sched_handler *trace_handler;
1354 1364
1355static void 1365static void
1356process_sched_wakeup_event(void *data, 1366process_sched_wakeup_event(void *data, struct perf_session *session,
1357 struct event *event, 1367 struct event *event,
1358 int cpu __used, 1368 int cpu __used,
1359 u64 timestamp __used, 1369 u64 timestamp __used,
@@ -1370,7 +1380,8 @@ process_sched_wakeup_event(void *data,
1370 FILL_FIELD(wakeup_event, cpu, event, data); 1380 FILL_FIELD(wakeup_event, cpu, event, data);
1371 1381
1372 if (trace_handler->wakeup_event) 1382 if (trace_handler->wakeup_event)
1373 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread); 1383 trace_handler->wakeup_event(&wakeup_event, session, event,
1384 cpu, timestamp, thread);
1374} 1385}
1375 1386
1376/* 1387/*
@@ -1388,6 +1399,7 @@ static char next_shortname2 = '0';
1388 1399
1389static void 1400static void
1390map_switch_event(struct trace_switch_event *switch_event, 1401map_switch_event(struct trace_switch_event *switch_event,
1402 struct perf_session *session,
1391 struct event *event __used, 1403 struct event *event __used,
1392 int this_cpu, 1404 int this_cpu,
1393 u64 timestamp, 1405 u64 timestamp,
@@ -1415,8 +1427,8 @@ map_switch_event(struct trace_switch_event *switch_event,
1415 die("hm, delta: %Ld < 0 ?\n", delta); 1427 die("hm, delta: %Ld < 0 ?\n", delta);
1416 1428
1417 1429
1418 sched_out = threads__findnew(switch_event->prev_pid); 1430 sched_out = perf_session__findnew(session, switch_event->prev_pid);
1419 sched_in = threads__findnew(switch_event->next_pid); 1431 sched_in = perf_session__findnew(session, switch_event->next_pid);
1420 1432
1421 curr_thread[this_cpu] = sched_in; 1433 curr_thread[this_cpu] = sched_in;
1422 1434
@@ -1466,7 +1478,7 @@ map_switch_event(struct trace_switch_event *switch_event,
1466 1478
1467 1479
1468static void 1480static void
1469process_sched_switch_event(void *data, 1481process_sched_switch_event(void *data, struct perf_session *session,
1470 struct event *event, 1482 struct event *event,
1471 int this_cpu, 1483 int this_cpu,
1472 u64 timestamp __used, 1484 u64 timestamp __used,
@@ -1493,13 +1505,14 @@ process_sched_switch_event(void *data,
1493 nr_context_switch_bugs++; 1505 nr_context_switch_bugs++;
1494 } 1506 }
1495 if (trace_handler->switch_event) 1507 if (trace_handler->switch_event)
1496 trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread); 1508 trace_handler->switch_event(&switch_event, session, event,
1509 this_cpu, timestamp, thread);
1497 1510
1498 curr_pid[this_cpu] = switch_event.next_pid; 1511 curr_pid[this_cpu] = switch_event.next_pid;
1499} 1512}
1500 1513
1501static void 1514static void
1502process_sched_runtime_event(void *data, 1515process_sched_runtime_event(void *data, struct perf_session *session,
1503 struct event *event, 1516 struct event *event,
1504 int cpu __used, 1517 int cpu __used,
1505 u64 timestamp __used, 1518 u64 timestamp __used,
@@ -1513,7 +1526,7 @@ process_sched_runtime_event(void *data,
1513 FILL_FIELD(runtime_event, vruntime, event, data); 1526 FILL_FIELD(runtime_event, vruntime, event, data);
1514 1527
1515 if (trace_handler->runtime_event) 1528 if (trace_handler->runtime_event)
1516 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread); 1529 trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
1517} 1530}
1518 1531
1519static void 1532static void
@@ -1533,7 +1546,8 @@ process_sched_fork_event(void *data,
1533 FILL_FIELD(fork_event, child_pid, event, data); 1546 FILL_FIELD(fork_event, child_pid, event, data);
1534 1547
1535 if (trace_handler->fork_event) 1548 if (trace_handler->fork_event)
1536 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread); 1549 trace_handler->fork_event(&fork_event, event,
1550 cpu, timestamp, thread);
1537} 1551}
1538 1552
1539static void 1553static void
@@ -1547,7 +1561,7 @@ process_sched_exit_event(struct event *event,
1547} 1561}
1548 1562
1549static void 1563static void
1550process_sched_migrate_task_event(void *data, 1564process_sched_migrate_task_event(void *data, struct perf_session *session,
1551 struct event *event, 1565 struct event *event,
1552 int cpu __used, 1566 int cpu __used,
1553 u64 timestamp __used, 1567 u64 timestamp __used,
@@ -1563,12 +1577,13 @@ process_sched_migrate_task_event(void *data,
1563 FILL_FIELD(migrate_task_event, cpu, event, data); 1577 FILL_FIELD(migrate_task_event, cpu, event, data);
1564 1578
1565 if (trace_handler->migrate_task_event) 1579 if (trace_handler->migrate_task_event)
1566 trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); 1580 trace_handler->migrate_task_event(&migrate_task_event, session,
1581 event, cpu, timestamp, thread);
1567} 1582}
1568 1583
1569static void 1584static void
1570process_raw_event(event_t *raw_event __used, void *data, 1585process_raw_event(event_t *raw_event __used, struct perf_session *session,
1571 int cpu, u64 timestamp, struct thread *thread) 1586 void *data, int cpu, u64 timestamp, struct thread *thread)
1572{ 1587{
1573 struct event *event; 1588 struct event *event;
1574 int type; 1589 int type;
@@ -1578,23 +1593,22 @@ process_raw_event(event_t *raw_event __used, void *data,
1578 event = trace_find_event(type); 1593 event = trace_find_event(type);
1579 1594
1580 if (!strcmp(event->name, "sched_switch")) 1595 if (!strcmp(event->name, "sched_switch"))
1581 process_sched_switch_event(data, event, cpu, timestamp, thread); 1596 process_sched_switch_event(data, session, event, cpu, timestamp, thread);
1582 if (!strcmp(event->name, "sched_stat_runtime")) 1597 if (!strcmp(event->name, "sched_stat_runtime"))
1583 process_sched_runtime_event(data, event, cpu, timestamp, thread); 1598 process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
1584 if (!strcmp(event->name, "sched_wakeup")) 1599 if (!strcmp(event->name, "sched_wakeup"))
1585 process_sched_wakeup_event(data, event, cpu, timestamp, thread); 1600 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
1586 if (!strcmp(event->name, "sched_wakeup_new")) 1601 if (!strcmp(event->name, "sched_wakeup_new"))
1587 process_sched_wakeup_event(data, event, cpu, timestamp, thread); 1602 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
1588 if (!strcmp(event->name, "sched_process_fork")) 1603 if (!strcmp(event->name, "sched_process_fork"))
1589 process_sched_fork_event(data, event, cpu, timestamp, thread); 1604 process_sched_fork_event(data, event, cpu, timestamp, thread);
1590 if (!strcmp(event->name, "sched_process_exit")) 1605 if (!strcmp(event->name, "sched_process_exit"))
1591 process_sched_exit_event(event, cpu, timestamp, thread); 1606 process_sched_exit_event(event, cpu, timestamp, thread);
1592 if (!strcmp(event->name, "sched_migrate_task")) 1607 if (!strcmp(event->name, "sched_migrate_task"))
1593 process_sched_migrate_task_event(data, event, cpu, timestamp, thread); 1608 process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
1594} 1609}
1595 1610
1596static int process_sample_event(event_t *event, 1611static int process_sample_event(event_t *event, struct perf_session *session)
1597 struct perf_session *session __used)
1598{ 1612{
1599 struct sample_data data; 1613 struct sample_data data;
1600 struct thread *thread; 1614 struct thread *thread;
@@ -1615,7 +1629,7 @@ static int process_sample_event(event_t *event,
1615 (void *)(long)data.ip, 1629 (void *)(long)data.ip,
1616 (long long)data.period); 1630 (long long)data.period);
1617 1631
1618 thread = threads__findnew(data.pid); 1632 thread = perf_session__findnew(session, data.pid);
1619 if (thread == NULL) { 1633 if (thread == NULL) {
1620 pr_debug("problem processing %d event, skipping it.\n", 1634 pr_debug("problem processing %d event, skipping it.\n",
1621 event->header.type); 1635 event->header.type);
@@ -1627,7 +1641,7 @@ static int process_sample_event(event_t *event,
1627 if (profile_cpu != -1 && profile_cpu != (int)data.cpu) 1641 if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
1628 return 0; 1642 return 0;
1629 1643
1630 process_raw_event(event, data.raw_data, data.cpu, data.time, thread); 1644 process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread);
1631 1645
1632 return 0; 1646 return 0;
1633} 1647}