diff options
-rw-r--r-- | tools/perf/builtin-annotate.c | 6 | ||||
-rw-r--r-- | tools/perf/builtin-kmem.c | 4 | ||||
-rw-r--r-- | tools/perf/builtin-report.c | 10 | ||||
-rw-r--r-- | tools/perf/builtin-sched.c | 68 | ||||
-rw-r--r-- | tools/perf/builtin-top.c | 19 | ||||
-rw-r--r-- | tools/perf/builtin-trace.c | 4 | ||||
-rw-r--r-- | tools/perf/util/data_map.c | 4 | ||||
-rw-r--r-- | tools/perf/util/event.c | 18 | ||||
-rw-r--r-- | tools/perf/util/event.h | 4 | ||||
-rw-r--r-- | tools/perf/util/session.c | 4 | ||||
-rw-r--r-- | tools/perf/util/session.h | 5 | ||||
-rw-r--r-- | tools/perf/util/thread.c | 22 | ||||
-rw-r--r-- | tools/perf/util/thread.h | 4 |
13 files changed, 98 insertions, 74 deletions
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index a931b133f3ac..795f865c1366 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -131,14 +131,14 @@ static int hist_entry__add(struct addr_location *al, u64 count) | |||
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
134 | static int process_sample_event(event_t *event, struct perf_session *session __used) | 134 | static int process_sample_event(event_t *event, struct perf_session *session) |
135 | { | 135 | { |
136 | struct addr_location al; | 136 | struct addr_location al; |
137 | 137 | ||
138 | dump_printf("(IP, %d): %d: %p\n", event->header.misc, | 138 | dump_printf("(IP, %d): %d: %p\n", event->header.misc, |
139 | event->ip.pid, (void *)(long)event->ip.ip); | 139 | event->ip.pid, (void *)(long)event->ip.ip); |
140 | 140 | ||
141 | if (event__preprocess_sample(event, &al, symbol_filter) < 0) { | 141 | if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) { |
142 | fprintf(stderr, "problem processing %d event, skipping it.\n", | 142 | fprintf(stderr, "problem processing %d event, skipping it.\n", |
143 | event->header.type); | 143 | event->header.type); |
144 | return -1; | 144 | return -1; |
@@ -479,7 +479,7 @@ static int __cmd_annotate(void) | |||
479 | } | 479 | } |
480 | 480 | ||
481 | if (verbose > 3) | 481 | if (verbose > 3) |
482 | threads__fprintf(stdout); | 482 | perf_session__fprintf(session, stdout); |
483 | 483 | ||
484 | if (verbose > 2) | 484 | if (verbose > 2) |
485 | dsos__fprintf(stdout); | 485 | dsos__fprintf(stdout); |
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 237155fa756b..de194958fe6e 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c | |||
@@ -311,7 +311,7 @@ process_raw_event(event_t *raw_event __used, void *data, | |||
311 | } | 311 | } |
312 | } | 312 | } |
313 | 313 | ||
314 | static int process_sample_event(event_t *event, struct perf_session *session __used) | 314 | static int process_sample_event(event_t *event, struct perf_session *session) |
315 | { | 315 | { |
316 | struct sample_data data; | 316 | struct sample_data data; |
317 | struct thread *thread; | 317 | struct thread *thread; |
@@ -329,7 +329,7 @@ static int process_sample_event(event_t *event, struct perf_session *session __u | |||
329 | (void *)(long)data.ip, | 329 | (void *)(long)data.ip, |
330 | (long long)data.period); | 330 | (long long)data.period); |
331 | 331 | ||
332 | thread = threads__findnew(event->ip.pid); | 332 | thread = perf_session__findnew(session, event->ip.pid); |
333 | if (thread == NULL) { | 333 | if (thread == NULL) { |
334 | pr_debug("problem processing %d event, skipping it.\n", | 334 | pr_debug("problem processing %d event, skipping it.\n", |
335 | event->header.type); | 335 | event->header.type); |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 26b947860948..efa8147b8991 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -600,7 +600,7 @@ static int validate_chain(struct ip_callchain *chain, event_t *event) | |||
600 | return 0; | 600 | return 0; |
601 | } | 601 | } |
602 | 602 | ||
603 | static int process_sample_event(event_t *event, struct perf_session *session __used) | 603 | static int process_sample_event(event_t *event, struct perf_session *session) |
604 | { | 604 | { |
605 | struct sample_data data; | 605 | struct sample_data data; |
606 | int cpumode; | 606 | int cpumode; |
@@ -636,7 +636,7 @@ static int process_sample_event(event_t *event, struct perf_session *session __u | |||
636 | } | 636 | } |
637 | } | 637 | } |
638 | 638 | ||
639 | thread = threads__findnew(data.pid); | 639 | thread = perf_session__findnew(session, data.pid); |
640 | if (thread == NULL) { | 640 | if (thread == NULL) { |
641 | pr_debug("problem processing %d event, skipping it.\n", | 641 | pr_debug("problem processing %d event, skipping it.\n", |
642 | event->header.type); | 642 | event->header.type); |
@@ -679,9 +679,9 @@ static int process_sample_event(event_t *event, struct perf_session *session __u | |||
679 | return 0; | 679 | return 0; |
680 | } | 680 | } |
681 | 681 | ||
682 | static int process_comm_event(event_t *event, struct perf_session *session __used) | 682 | static int process_comm_event(event_t *event, struct perf_session *session) |
683 | { | 683 | { |
684 | struct thread *thread = threads__findnew(event->comm.pid); | 684 | struct thread *thread = perf_session__findnew(session, event->comm.pid); |
685 | 685 | ||
686 | dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid); | 686 | dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid); |
687 | 687 | ||
@@ -780,7 +780,7 @@ static int __cmd_report(void) | |||
780 | } | 780 | } |
781 | 781 | ||
782 | if (verbose > 3) | 782 | if (verbose > 3) |
783 | threads__fprintf(stdout); | 783 | perf_session__fprintf(session, stdout); |
784 | 784 | ||
785 | if (verbose > 2) | 785 | if (verbose > 2) |
786 | dsos__fprintf(stdout); | 786 | dsos__fprintf(stdout); |
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 1e4e508339a8..8d58d9e07a7b 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -730,18 +730,21 @@ struct trace_migrate_task_event { | |||
730 | 730 | ||
731 | struct trace_sched_handler { | 731 | struct trace_sched_handler { |
732 | void (*switch_event)(struct trace_switch_event *, | 732 | void (*switch_event)(struct trace_switch_event *, |
733 | struct perf_session *, | ||
733 | struct event *, | 734 | struct event *, |
734 | int cpu, | 735 | int cpu, |
735 | u64 timestamp, | 736 | u64 timestamp, |
736 | struct thread *thread); | 737 | struct thread *thread); |
737 | 738 | ||
738 | void (*runtime_event)(struct trace_runtime_event *, | 739 | void (*runtime_event)(struct trace_runtime_event *, |
740 | struct perf_session *, | ||
739 | struct event *, | 741 | struct event *, |
740 | int cpu, | 742 | int cpu, |
741 | u64 timestamp, | 743 | u64 timestamp, |
742 | struct thread *thread); | 744 | struct thread *thread); |
743 | 745 | ||
744 | void (*wakeup_event)(struct trace_wakeup_event *, | 746 | void (*wakeup_event)(struct trace_wakeup_event *, |
747 | struct perf_session *, | ||
745 | struct event *, | 748 | struct event *, |
746 | int cpu, | 749 | int cpu, |
747 | u64 timestamp, | 750 | u64 timestamp, |
@@ -754,6 +757,7 @@ struct trace_sched_handler { | |||
754 | struct thread *thread); | 757 | struct thread *thread); |
755 | 758 | ||
756 | void (*migrate_task_event)(struct trace_migrate_task_event *, | 759 | void (*migrate_task_event)(struct trace_migrate_task_event *, |
760 | struct perf_session *session, | ||
757 | struct event *, | 761 | struct event *, |
758 | int cpu, | 762 | int cpu, |
759 | u64 timestamp, | 763 | u64 timestamp, |
@@ -763,6 +767,7 @@ struct trace_sched_handler { | |||
763 | 767 | ||
764 | static void | 768 | static void |
765 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | 769 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, |
770 | struct perf_session *session __used, | ||
766 | struct event *event, | 771 | struct event *event, |
767 | int cpu __used, | 772 | int cpu __used, |
768 | u64 timestamp __used, | 773 | u64 timestamp __used, |
@@ -789,6 +794,7 @@ static u64 cpu_last_switched[MAX_CPUS]; | |||
789 | 794 | ||
790 | static void | 795 | static void |
791 | replay_switch_event(struct trace_switch_event *switch_event, | 796 | replay_switch_event(struct trace_switch_event *switch_event, |
797 | struct perf_session *session __used, | ||
792 | struct event *event, | 798 | struct event *event, |
793 | int cpu, | 799 | int cpu, |
794 | u64 timestamp, | 800 | u64 timestamp, |
@@ -1022,6 +1028,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) | |||
1022 | 1028 | ||
1023 | static void | 1029 | static void |
1024 | latency_switch_event(struct trace_switch_event *switch_event, | 1030 | latency_switch_event(struct trace_switch_event *switch_event, |
1031 | struct perf_session *session, | ||
1025 | struct event *event __used, | 1032 | struct event *event __used, |
1026 | int cpu, | 1033 | int cpu, |
1027 | u64 timestamp, | 1034 | u64 timestamp, |
@@ -1045,8 +1052,8 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1045 | die("hm, delta: %Ld < 0 ?\n", delta); | 1052 | die("hm, delta: %Ld < 0 ?\n", delta); |
1046 | 1053 | ||
1047 | 1054 | ||
1048 | sched_out = threads__findnew(switch_event->prev_pid); | 1055 | sched_out = perf_session__findnew(session, switch_event->prev_pid); |
1049 | sched_in = threads__findnew(switch_event->next_pid); | 1056 | sched_in = perf_session__findnew(session, switch_event->next_pid); |
1050 | 1057 | ||
1051 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 1058 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); |
1052 | if (!out_events) { | 1059 | if (!out_events) { |
@@ -1074,12 +1081,13 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1074 | 1081 | ||
1075 | static void | 1082 | static void |
1076 | latency_runtime_event(struct trace_runtime_event *runtime_event, | 1083 | latency_runtime_event(struct trace_runtime_event *runtime_event, |
1084 | struct perf_session *session, | ||
1077 | struct event *event __used, | 1085 | struct event *event __used, |
1078 | int cpu, | 1086 | int cpu, |
1079 | u64 timestamp, | 1087 | u64 timestamp, |
1080 | struct thread *this_thread __used) | 1088 | struct thread *this_thread __used) |
1081 | { | 1089 | { |
1082 | struct thread *thread = threads__findnew(runtime_event->pid); | 1090 | struct thread *thread = perf_session__findnew(session, runtime_event->pid); |
1083 | struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | 1091 | struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); |
1084 | 1092 | ||
1085 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 1093 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
@@ -1096,6 +1104,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, | |||
1096 | 1104 | ||
1097 | static void | 1105 | static void |
1098 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | 1106 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, |
1107 | struct perf_session *session, | ||
1099 | struct event *__event __used, | 1108 | struct event *__event __used, |
1100 | int cpu __used, | 1109 | int cpu __used, |
1101 | u64 timestamp, | 1110 | u64 timestamp, |
@@ -1109,7 +1118,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1109 | if (!wakeup_event->success) | 1118 | if (!wakeup_event->success) |
1110 | return; | 1119 | return; |
1111 | 1120 | ||
1112 | wakee = threads__findnew(wakeup_event->pid); | 1121 | wakee = perf_session__findnew(session, wakeup_event->pid); |
1113 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1122 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); |
1114 | if (!atoms) { | 1123 | if (!atoms) { |
1115 | thread_atoms_insert(wakee); | 1124 | thread_atoms_insert(wakee); |
@@ -1143,6 +1152,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1143 | 1152 | ||
1144 | static void | 1153 | static void |
1145 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | 1154 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, |
1155 | struct perf_session *session, | ||
1146 | struct event *__event __used, | 1156 | struct event *__event __used, |
1147 | int cpu __used, | 1157 | int cpu __used, |
1148 | u64 timestamp, | 1158 | u64 timestamp, |
@@ -1158,7 +1168,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | |||
1158 | if (profile_cpu == -1) | 1168 | if (profile_cpu == -1) |
1159 | return; | 1169 | return; |
1160 | 1170 | ||
1161 | migrant = threads__findnew(migrate_task_event->pid); | 1171 | migrant = perf_session__findnew(session, migrate_task_event->pid); |
1162 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | 1172 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); |
1163 | if (!atoms) { | 1173 | if (!atoms) { |
1164 | thread_atoms_insert(migrant); | 1174 | thread_atoms_insert(migrant); |
@@ -1353,7 +1363,7 @@ static void sort_lat(void) | |||
1353 | static struct trace_sched_handler *trace_handler; | 1363 | static struct trace_sched_handler *trace_handler; |
1354 | 1364 | ||
1355 | static void | 1365 | static void |
1356 | process_sched_wakeup_event(void *data, | 1366 | process_sched_wakeup_event(void *data, struct perf_session *session, |
1357 | struct event *event, | 1367 | struct event *event, |
1358 | int cpu __used, | 1368 | int cpu __used, |
1359 | u64 timestamp __used, | 1369 | u64 timestamp __used, |
@@ -1370,7 +1380,8 @@ process_sched_wakeup_event(void *data, | |||
1370 | FILL_FIELD(wakeup_event, cpu, event, data); | 1380 | FILL_FIELD(wakeup_event, cpu, event, data); |
1371 | 1381 | ||
1372 | if (trace_handler->wakeup_event) | 1382 | if (trace_handler->wakeup_event) |
1373 | trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread); | 1383 | trace_handler->wakeup_event(&wakeup_event, session, event, |
1384 | cpu, timestamp, thread); | ||
1374 | } | 1385 | } |
1375 | 1386 | ||
1376 | /* | 1387 | /* |
@@ -1388,6 +1399,7 @@ static char next_shortname2 = '0'; | |||
1388 | 1399 | ||
1389 | static void | 1400 | static void |
1390 | map_switch_event(struct trace_switch_event *switch_event, | 1401 | map_switch_event(struct trace_switch_event *switch_event, |
1402 | struct perf_session *session, | ||
1391 | struct event *event __used, | 1403 | struct event *event __used, |
1392 | int this_cpu, | 1404 | int this_cpu, |
1393 | u64 timestamp, | 1405 | u64 timestamp, |
@@ -1415,8 +1427,8 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1415 | die("hm, delta: %Ld < 0 ?\n", delta); | 1427 | die("hm, delta: %Ld < 0 ?\n", delta); |
1416 | 1428 | ||
1417 | 1429 | ||
1418 | sched_out = threads__findnew(switch_event->prev_pid); | 1430 | sched_out = perf_session__findnew(session, switch_event->prev_pid); |
1419 | sched_in = threads__findnew(switch_event->next_pid); | 1431 | sched_in = perf_session__findnew(session, switch_event->next_pid); |
1420 | 1432 | ||
1421 | curr_thread[this_cpu] = sched_in; | 1433 | curr_thread[this_cpu] = sched_in; |
1422 | 1434 | ||
@@ -1466,7 +1478,7 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1466 | 1478 | ||
1467 | 1479 | ||
1468 | static void | 1480 | static void |
1469 | process_sched_switch_event(void *data, | 1481 | process_sched_switch_event(void *data, struct perf_session *session, |
1470 | struct event *event, | 1482 | struct event *event, |
1471 | int this_cpu, | 1483 | int this_cpu, |
1472 | u64 timestamp __used, | 1484 | u64 timestamp __used, |
@@ -1493,13 +1505,14 @@ process_sched_switch_event(void *data, | |||
1493 | nr_context_switch_bugs++; | 1505 | nr_context_switch_bugs++; |
1494 | } | 1506 | } |
1495 | if (trace_handler->switch_event) | 1507 | if (trace_handler->switch_event) |
1496 | trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread); | 1508 | trace_handler->switch_event(&switch_event, session, event, |
1509 | this_cpu, timestamp, thread); | ||
1497 | 1510 | ||
1498 | curr_pid[this_cpu] = switch_event.next_pid; | 1511 | curr_pid[this_cpu] = switch_event.next_pid; |
1499 | } | 1512 | } |
1500 | 1513 | ||
1501 | static void | 1514 | static void |
1502 | process_sched_runtime_event(void *data, | 1515 | process_sched_runtime_event(void *data, struct perf_session *session, |
1503 | struct event *event, | 1516 | struct event *event, |
1504 | int cpu __used, | 1517 | int cpu __used, |
1505 | u64 timestamp __used, | 1518 | u64 timestamp __used, |
@@ -1513,7 +1526,7 @@ process_sched_runtime_event(void *data, | |||
1513 | FILL_FIELD(runtime_event, vruntime, event, data); | 1526 | FILL_FIELD(runtime_event, vruntime, event, data); |
1514 | 1527 | ||
1515 | if (trace_handler->runtime_event) | 1528 | if (trace_handler->runtime_event) |
1516 | trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread); | 1529 | trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread); |
1517 | } | 1530 | } |
1518 | 1531 | ||
1519 | static void | 1532 | static void |
@@ -1533,7 +1546,8 @@ process_sched_fork_event(void *data, | |||
1533 | FILL_FIELD(fork_event, child_pid, event, data); | 1546 | FILL_FIELD(fork_event, child_pid, event, data); |
1534 | 1547 | ||
1535 | if (trace_handler->fork_event) | 1548 | if (trace_handler->fork_event) |
1536 | trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread); | 1549 | trace_handler->fork_event(&fork_event, event, |
1550 | cpu, timestamp, thread); | ||
1537 | } | 1551 | } |
1538 | 1552 | ||
1539 | static void | 1553 | static void |
@@ -1547,7 +1561,7 @@ process_sched_exit_event(struct event *event, | |||
1547 | } | 1561 | } |
1548 | 1562 | ||
1549 | static void | 1563 | static void |
1550 | process_sched_migrate_task_event(void *data, | 1564 | process_sched_migrate_task_event(void *data, struct perf_session *session, |
1551 | struct event *event, | 1565 | struct event *event, |
1552 | int cpu __used, | 1566 | int cpu __used, |
1553 | u64 timestamp __used, | 1567 | u64 timestamp __used, |
@@ -1563,12 +1577,13 @@ process_sched_migrate_task_event(void *data, | |||
1563 | FILL_FIELD(migrate_task_event, cpu, event, data); | 1577 | FILL_FIELD(migrate_task_event, cpu, event, data); |
1564 | 1578 | ||
1565 | if (trace_handler->migrate_task_event) | 1579 | if (trace_handler->migrate_task_event) |
1566 | trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); | 1580 | trace_handler->migrate_task_event(&migrate_task_event, session, |
1581 | event, cpu, timestamp, thread); | ||
1567 | } | 1582 | } |
1568 | 1583 | ||
1569 | static void | 1584 | static void |
1570 | process_raw_event(event_t *raw_event __used, void *data, | 1585 | process_raw_event(event_t *raw_event __used, struct perf_session *session, |
1571 | int cpu, u64 timestamp, struct thread *thread) | 1586 | void *data, int cpu, u64 timestamp, struct thread *thread) |
1572 | { | 1587 | { |
1573 | struct event *event; | 1588 | struct event *event; |
1574 | int type; | 1589 | int type; |
@@ -1578,23 +1593,22 @@ process_raw_event(event_t *raw_event __used, void *data, | |||
1578 | event = trace_find_event(type); | 1593 | event = trace_find_event(type); |
1579 | 1594 | ||
1580 | if (!strcmp(event->name, "sched_switch")) | 1595 | if (!strcmp(event->name, "sched_switch")) |
1581 | process_sched_switch_event(data, event, cpu, timestamp, thread); | 1596 | process_sched_switch_event(data, session, event, cpu, timestamp, thread); |
1582 | if (!strcmp(event->name, "sched_stat_runtime")) | 1597 | if (!strcmp(event->name, "sched_stat_runtime")) |
1583 | process_sched_runtime_event(data, event, cpu, timestamp, thread); | 1598 | process_sched_runtime_event(data, session, event, cpu, timestamp, thread); |
1584 | if (!strcmp(event->name, "sched_wakeup")) | 1599 | if (!strcmp(event->name, "sched_wakeup")) |
1585 | process_sched_wakeup_event(data, event, cpu, timestamp, thread); | 1600 | process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); |
1586 | if (!strcmp(event->name, "sched_wakeup_new")) | 1601 | if (!strcmp(event->name, "sched_wakeup_new")) |
1587 | process_sched_wakeup_event(data, event, cpu, timestamp, thread); | 1602 | process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); |
1588 | if (!strcmp(event->name, "sched_process_fork")) | 1603 | if (!strcmp(event->name, "sched_process_fork")) |
1589 | process_sched_fork_event(data, event, cpu, timestamp, thread); | 1604 | process_sched_fork_event(data, event, cpu, timestamp, thread); |
1590 | if (!strcmp(event->name, "sched_process_exit")) | 1605 | if (!strcmp(event->name, "sched_process_exit")) |
1591 | process_sched_exit_event(event, cpu, timestamp, thread); | 1606 | process_sched_exit_event(event, cpu, timestamp, thread); |
1592 | if (!strcmp(event->name, "sched_migrate_task")) | 1607 | if (!strcmp(event->name, "sched_migrate_task")) |
1593 | process_sched_migrate_task_event(data, event, cpu, timestamp, thread); | 1608 | process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread); |
1594 | } | 1609 | } |
1595 | 1610 | ||
1596 | static int process_sample_event(event_t *event, | 1611 | static int process_sample_event(event_t *event, struct perf_session *session) |
1597 | struct perf_session *session __used) | ||
1598 | { | 1612 | { |
1599 | struct sample_data data; | 1613 | struct sample_data data; |
1600 | struct thread *thread; | 1614 | struct thread *thread; |
@@ -1615,7 +1629,7 @@ static int process_sample_event(event_t *event, | |||
1615 | (void *)(long)data.ip, | 1629 | (void *)(long)data.ip, |
1616 | (long long)data.period); | 1630 | (long long)data.period); |
1617 | 1631 | ||
1618 | thread = threads__findnew(data.pid); | 1632 | thread = perf_session__findnew(session, data.pid); |
1619 | if (thread == NULL) { | 1633 | if (thread == NULL) { |
1620 | pr_debug("problem processing %d event, skipping it.\n", | 1634 | pr_debug("problem processing %d event, skipping it.\n", |
1621 | event->header.type); | 1635 | event->header.type); |
@@ -1627,7 +1641,7 @@ static int process_sample_event(event_t *event, | |||
1627 | if (profile_cpu != -1 && profile_cpu != (int)data.cpu) | 1641 | if (profile_cpu != -1 && profile_cpu != (int)data.cpu) |
1628 | return 0; | 1642 | return 0; |
1629 | 1643 | ||
1630 | process_raw_event(event, data.raw_data, data.cpu, data.time, thread); | 1644 | process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread); |
1631 | 1645 | ||
1632 | return 0; | 1646 | return 0; |
1633 | } | 1647 | } |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b13f42625549..0f7a4da2924c 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -20,8 +20,9 @@ | |||
20 | 20 | ||
21 | #include "perf.h" | 21 | #include "perf.h" |
22 | 22 | ||
23 | #include "util/symbol.h" | ||
24 | #include "util/color.h" | 23 | #include "util/color.h" |
24 | #include "util/session.h" | ||
25 | #include "util/symbol.h" | ||
25 | #include "util/thread.h" | 26 | #include "util/thread.h" |
26 | #include "util/util.h" | 27 | #include "util/util.h" |
27 | #include <linux/rbtree.h> | 28 | #include <linux/rbtree.h> |
@@ -926,7 +927,8 @@ static int symbol_filter(struct map *map, struct symbol *sym) | |||
926 | return 0; | 927 | return 0; |
927 | } | 928 | } |
928 | 929 | ||
929 | static void event__process_sample(const event_t *self, int counter) | 930 | static void event__process_sample(const event_t *self, |
931 | struct perf_session *session, int counter) | ||
930 | { | 932 | { |
931 | u64 ip = self->ip.ip; | 933 | u64 ip = self->ip.ip; |
932 | struct sym_entry *syme; | 934 | struct sym_entry *syme; |
@@ -946,7 +948,7 @@ static void event__process_sample(const event_t *self, int counter) | |||
946 | return; | 948 | return; |
947 | } | 949 | } |
948 | 950 | ||
949 | if (event__preprocess_sample(self, &al, symbol_filter) < 0 || | 951 | if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 || |
950 | al.sym == NULL) | 952 | al.sym == NULL) |
951 | return; | 953 | return; |
952 | 954 | ||
@@ -1053,7 +1055,7 @@ static void perf_session__mmap_read_counter(struct perf_session *self, | |||
1053 | } | 1055 | } |
1054 | 1056 | ||
1055 | if (event->header.type == PERF_RECORD_SAMPLE) | 1057 | if (event->header.type == PERF_RECORD_SAMPLE) |
1056 | event__process_sample(event, md->counter); | 1058 | event__process_sample(event, self, md->counter); |
1057 | else | 1059 | else |
1058 | event__process(event, self); | 1060 | event__process(event, self); |
1059 | old += size; | 1061 | old += size; |
@@ -1157,10 +1159,13 @@ static int __cmd_top(void) | |||
1157 | int i, counter; | 1159 | int i, counter; |
1158 | int ret; | 1160 | int ret; |
1159 | /* | 1161 | /* |
1160 | * XXX perf_session__new should allow passing a O_MMAP, so that all this | 1162 | * FIXME: perf_session__new should allow passing a O_MMAP, so that all this |
1161 | * mmap reading, etc is encapsulated in it. | 1163 | * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. |
1162 | */ | 1164 | */ |
1163 | struct perf_session *session = NULL; | 1165 | struct perf_session *session = perf_session__new(NULL, O_WRONLY, false); |
1166 | |||
1167 | if (session == NULL) | ||
1168 | return -ENOMEM; | ||
1164 | 1169 | ||
1165 | if (target_pid != -1) | 1170 | if (target_pid != -1) |
1166 | event__synthesize_thread(target_pid, event__process, session); | 1171 | event__synthesize_thread(target_pid, event__process, session); |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index b7eb3fcc224e..d76532375054 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -63,7 +63,7 @@ static char const *input_name = "perf.data"; | |||
63 | 63 | ||
64 | static u64 sample_type; | 64 | static u64 sample_type; |
65 | 65 | ||
66 | static int process_sample_event(event_t *event, struct perf_session *session __used) | 66 | static int process_sample_event(event_t *event, struct perf_session *session) |
67 | { | 67 | { |
68 | struct sample_data data; | 68 | struct sample_data data; |
69 | struct thread *thread; | 69 | struct thread *thread; |
@@ -81,7 +81,7 @@ static int process_sample_event(event_t *event, struct perf_session *session __u | |||
81 | (void *)(long)data.ip, | 81 | (void *)(long)data.ip, |
82 | (long long)data.period); | 82 | (long long)data.period); |
83 | 83 | ||
84 | thread = threads__findnew(event->ip.pid); | 84 | thread = perf_session__findnew(session, event->ip.pid); |
85 | if (thread == NULL) { | 85 | if (thread == NULL) { |
86 | pr_debug("problem processing %d event, skipping it.\n", | 86 | pr_debug("problem processing %d event, skipping it.\n", |
87 | event->header.type); | 87 | event->header.type); |
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index ba2eb2ce018a..44dea211cc65 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c | |||
@@ -125,9 +125,9 @@ out: | |||
125 | return err; | 125 | return err; |
126 | } | 126 | } |
127 | 127 | ||
128 | static struct thread *perf_session__register_idle_thread(struct perf_session *self __used) | 128 | static struct thread *perf_session__register_idle_thread(struct perf_session *self) |
129 | { | 129 | { |
130 | struct thread *thread = threads__findnew(0); | 130 | struct thread *thread = perf_session__findnew(self, 0); |
131 | 131 | ||
132 | if (!thread || thread__set_comm(thread, "swapper")) { | 132 | if (!thread || thread__set_comm(thread, "swapper")) { |
133 | pr_err("problem inserting idle task.\n"); | 133 | pr_err("problem inserting idle task.\n"); |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 40d8d842a21f..2d09c29b3a6c 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -189,9 +189,9 @@ void event__synthesize_threads(int (*process)(event_t *event, | |||
189 | 189 | ||
190 | struct events_stats event__stats; | 190 | struct events_stats event__stats; |
191 | 191 | ||
192 | int event__process_comm(event_t *self, struct perf_session *session __used) | 192 | int event__process_comm(event_t *self, struct perf_session *session) |
193 | { | 193 | { |
194 | struct thread *thread = threads__findnew(self->comm.pid); | 194 | struct thread *thread = perf_session__findnew(session, self->comm.pid); |
195 | 195 | ||
196 | dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); | 196 | dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); |
197 | 197 | ||
@@ -212,7 +212,7 @@ int event__process_lost(event_t *self, struct perf_session *session __used) | |||
212 | 212 | ||
213 | int event__process_mmap(event_t *self, struct perf_session *session) | 213 | int event__process_mmap(event_t *self, struct perf_session *session) |
214 | { | 214 | { |
215 | struct thread *thread = threads__findnew(self->mmap.pid); | 215 | struct thread *thread = perf_session__findnew(session, self->mmap.pid); |
216 | struct map *map = map__new(&self->mmap, MAP__FUNCTION, | 216 | struct map *map = map__new(&self->mmap, MAP__FUNCTION, |
217 | session->cwd, session->cwdlen); | 217 | session->cwd, session->cwdlen); |
218 | 218 | ||
@@ -231,10 +231,10 @@ int event__process_mmap(event_t *self, struct perf_session *session) | |||
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | int event__process_task(event_t *self, struct perf_session *session __used) | 234 | int event__process_task(event_t *self, struct perf_session *session) |
235 | { | 235 | { |
236 | struct thread *thread = threads__findnew(self->fork.pid); | 236 | struct thread *thread = perf_session__findnew(session, self->fork.pid); |
237 | struct thread *parent = threads__findnew(self->fork.ppid); | 237 | struct thread *parent = perf_session__findnew(session, self->fork.ppid); |
238 | 238 | ||
239 | dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, | 239 | dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, |
240 | self->fork.ppid, self->fork.ptid); | 240 | self->fork.ppid, self->fork.ptid); |
@@ -300,11 +300,11 @@ try_again: | |||
300 | } | 300 | } |
301 | } | 301 | } |
302 | 302 | ||
303 | int event__preprocess_sample(const event_t *self, struct addr_location *al, | 303 | int event__preprocess_sample(const event_t *self, struct perf_session *session, |
304 | symbol_filter_t filter) | 304 | struct addr_location *al, symbol_filter_t filter) |
305 | { | 305 | { |
306 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 306 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
307 | struct thread *thread = threads__findnew(self->ip.pid); | 307 | struct thread *thread = perf_session__findnew(session, self->ip.pid); |
308 | 308 | ||
309 | if (thread == NULL) | 309 | if (thread == NULL) |
310 | return -1; | 310 | return -1; |
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 6b6429b63da3..bb090257570e 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -177,8 +177,8 @@ int event__process_mmap(event_t *self, struct perf_session *session); | |||
177 | int event__process_task(event_t *self, struct perf_session *session); | 177 | int event__process_task(event_t *self, struct perf_session *session); |
178 | 178 | ||
179 | struct addr_location; | 179 | struct addr_location; |
180 | int event__preprocess_sample(const event_t *self, struct addr_location *al, | 180 | int event__preprocess_sample(const event_t *self, struct perf_session *session, |
181 | symbol_filter_t filter); | 181 | struct addr_location *al, symbol_filter_t filter); |
182 | int event__parse_sample(event_t *event, u64 type, struct sample_data *data); | 182 | int event__parse_sample(event_t *event, u64 type, struct sample_data *data); |
183 | 183 | ||
184 | #endif /* __PERF_RECORD_H */ | 184 | #endif /* __PERF_RECORD_H */ |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 534a8770ee7f..09836a537fc5 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -51,7 +51,7 @@ out_close: | |||
51 | struct perf_session *perf_session__new(const char *filename, int mode, | 51 | struct perf_session *perf_session__new(const char *filename, int mode, |
52 | bool force) | 52 | bool force) |
53 | { | 53 | { |
54 | size_t len = strlen(filename) + 1; | 54 | size_t len = filename ? strlen(filename) + 1 : 0; |
55 | struct perf_session *self = zalloc(sizeof(*self) + len); | 55 | struct perf_session *self = zalloc(sizeof(*self) + len); |
56 | 56 | ||
57 | if (self == NULL) | 57 | if (self == NULL) |
@@ -61,6 +61,8 @@ struct perf_session *perf_session__new(const char *filename, int mode, | |||
61 | goto out_delete; | 61 | goto out_delete; |
62 | 62 | ||
63 | memcpy(self->filename, filename, len); | 63 | memcpy(self->filename, filename, len); |
64 | self->threads = RB_ROOT; | ||
65 | self->last_match = NULL; | ||
64 | self->mmap_window = 32; | 66 | self->mmap_window = 32; |
65 | self->cwd = NULL; | 67 | self->cwd = NULL; |
66 | self->cwdlen = 0; | 68 | self->cwdlen = 0; |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 1e0da9ca31aa..1dbef7cdd489 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -3,11 +3,16 @@ | |||
3 | 3 | ||
4 | #include "event.h" | 4 | #include "event.h" |
5 | #include "header.h" | 5 | #include "header.h" |
6 | #include <linux/rbtree.h> | ||
7 | |||
8 | struct thread; | ||
6 | 9 | ||
7 | struct perf_session { | 10 | struct perf_session { |
8 | struct perf_header header; | 11 | struct perf_header header; |
9 | unsigned long size; | 12 | unsigned long size; |
10 | unsigned long mmap_window; | 13 | unsigned long mmap_window; |
14 | struct rb_root threads; | ||
15 | struct thread *last_match; | ||
11 | int fd; | 16 | int fd; |
12 | int cwdlen; | 17 | int cwdlen; |
13 | char *cwd; | 18 | char *cwd; |
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 5c0ab14f3dba..634b7f7140d5 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -2,13 +2,11 @@ | |||
2 | #include <stdlib.h> | 2 | #include <stdlib.h> |
3 | #include <stdio.h> | 3 | #include <stdio.h> |
4 | #include <string.h> | 4 | #include <string.h> |
5 | #include "session.h" | ||
5 | #include "thread.h" | 6 | #include "thread.h" |
6 | #include "util.h" | 7 | #include "util.h" |
7 | #include "debug.h" | 8 | #include "debug.h" |
8 | 9 | ||
9 | static struct rb_root threads; | ||
10 | static struct thread *last_match; | ||
11 | |||
12 | void map_groups__init(struct map_groups *self) | 10 | void map_groups__init(struct map_groups *self) |
13 | { | 11 | { |
14 | int i; | 12 | int i; |
@@ -122,9 +120,9 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) | |||
122 | map_groups__fprintf(&self->mg, fp); | 120 | map_groups__fprintf(&self->mg, fp); |
123 | } | 121 | } |
124 | 122 | ||
125 | struct thread *threads__findnew(pid_t pid) | 123 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) |
126 | { | 124 | { |
127 | struct rb_node **p = &threads.rb_node; | 125 | struct rb_node **p = &self->threads.rb_node; |
128 | struct rb_node *parent = NULL; | 126 | struct rb_node *parent = NULL; |
129 | struct thread *th; | 127 | struct thread *th; |
130 | 128 | ||
@@ -133,15 +131,15 @@ struct thread *threads__findnew(pid_t pid) | |||
133 | * so most of the time we dont have to look up | 131 | * so most of the time we dont have to look up |
134 | * the full rbtree: | 132 | * the full rbtree: |
135 | */ | 133 | */ |
136 | if (last_match && last_match->pid == pid) | 134 | if (self->last_match && self->last_match->pid == pid) |
137 | return last_match; | 135 | return self->last_match; |
138 | 136 | ||
139 | while (*p != NULL) { | 137 | while (*p != NULL) { |
140 | parent = *p; | 138 | parent = *p; |
141 | th = rb_entry(parent, struct thread, rb_node); | 139 | th = rb_entry(parent, struct thread, rb_node); |
142 | 140 | ||
143 | if (th->pid == pid) { | 141 | if (th->pid == pid) { |
144 | last_match = th; | 142 | self->last_match = th; |
145 | return th; | 143 | return th; |
146 | } | 144 | } |
147 | 145 | ||
@@ -154,8 +152,8 @@ struct thread *threads__findnew(pid_t pid) | |||
154 | th = thread__new(pid); | 152 | th = thread__new(pid); |
155 | if (th != NULL) { | 153 | if (th != NULL) { |
156 | rb_link_node(&th->rb_node, parent, p); | 154 | rb_link_node(&th->rb_node, parent, p); |
157 | rb_insert_color(&th->rb_node, &threads); | 155 | rb_insert_color(&th->rb_node, &self->threads); |
158 | last_match = th; | 156 | self->last_match = th; |
159 | } | 157 | } |
160 | 158 | ||
161 | return th; | 159 | return th; |
@@ -269,12 +267,12 @@ int thread__fork(struct thread *self, struct thread *parent) | |||
269 | return 0; | 267 | return 0; |
270 | } | 268 | } |
271 | 269 | ||
272 | size_t threads__fprintf(FILE *fp) | 270 | size_t perf_session__fprintf(struct perf_session *self, FILE *fp) |
273 | { | 271 | { |
274 | size_t ret = 0; | 272 | size_t ret = 0; |
275 | struct rb_node *nd; | 273 | struct rb_node *nd; |
276 | 274 | ||
277 | for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { | 275 | for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) { |
278 | struct thread *pos = rb_entry(nd, struct thread, rb_node); | 276 | struct thread *pos = rb_entry(nd, struct thread, rb_node); |
279 | 277 | ||
280 | ret += thread__fprintf(pos, fp); | 278 | ret += thread__fprintf(pos, fp); |
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 2e35e1f6bb43..e93abf2d9cb6 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h | |||
@@ -23,11 +23,11 @@ struct thread { | |||
23 | void map_groups__init(struct map_groups *self); | 23 | void map_groups__init(struct map_groups *self); |
24 | int thread__set_comm(struct thread *self, const char *comm); | 24 | int thread__set_comm(struct thread *self, const char *comm); |
25 | int thread__comm_len(struct thread *self); | 25 | int thread__comm_len(struct thread *self); |
26 | struct thread *threads__findnew(pid_t pid); | 26 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); |
27 | void thread__insert_map(struct thread *self, struct map *map); | 27 | void thread__insert_map(struct thread *self, struct map *map); |
28 | int thread__fork(struct thread *self, struct thread *parent); | 28 | int thread__fork(struct thread *self, struct thread *parent); |
29 | size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp); | 29 | size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp); |
30 | size_t threads__fprintf(FILE *fp); | 30 | size_t perf_session__fprintf(struct perf_session *self, FILE *fp); |
31 | 31 | ||
32 | void maps__insert(struct rb_root *maps, struct map *map); | 32 | void maps__insert(struct rb_root *maps, struct map *map); |
33 | struct map *maps__find(struct rb_root *maps, u64 addr); | 33 | struct map *maps__find(struct rb_root *maps, u64 addr); |