aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/builtin-annotate.c4
-rw-r--r--tools/perf/builtin-diff.c9
-rw-r--r--tools/perf/builtin-inject.c70
-rw-r--r--tools/perf/builtin-kmem.c4
-rw-r--r--tools/perf/builtin-lock.c4
-rw-r--r--tools/perf/builtin-record.c38
-rw-r--r--tools/perf/builtin-report.c24
-rw-r--r--tools/perf/builtin-sched.c70
-rw-r--r--tools/perf/builtin-script.c26
-rw-r--r--tools/perf/builtin-timechart.c8
-rw-r--r--tools/perf/builtin-top.c99
-rw-r--r--tools/perf/util/build-id.c16
-rw-r--r--tools/perf/util/event.c151
-rw-r--r--tools/perf/util/event.h21
-rw-r--r--tools/perf/util/header.c28
-rw-r--r--tools/perf/util/header.h16
-rw-r--r--tools/perf/util/map.h10
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
-rw-r--r--tools/perf/util/session.c91
-rw-r--r--tools/perf/util/session.h30
-rw-r--r--tools/perf/util/thread.h14
-rw-r--r--tools/perf/util/trace-event-scripting.c2
-rw-r--r--tools/perf/util/trace-event.h8
24 files changed, 376 insertions, 375 deletions
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 483cb9466444..dff081a388bb 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -83,12 +83,12 @@ static int process_sample_event(struct perf_event_ops *ops,
83 union perf_event *event, 83 union perf_event *event,
84 struct perf_sample *sample, 84 struct perf_sample *sample,
85 struct perf_evsel *evsel, 85 struct perf_evsel *evsel,
86 struct perf_session *session) 86 struct machine *machine)
87{ 87{
88 struct perf_annotate *ann = container_of(ops, struct perf_annotate, ops); 88 struct perf_annotate *ann = container_of(ops, struct perf_annotate, ops);
89 struct addr_location al; 89 struct addr_location al;
90 90
91 if (perf_event__preprocess_sample(event, session, &al, sample, 91 if (perf_event__preprocess_sample(event, machine, &al, sample,
92 symbol__annotate_init) < 0) { 92 symbol__annotate_init) < 0) {
93 pr_warning("problem processing %d event, skipping it.\n", 93 pr_warning("problem processing %d event, skipping it.\n",
94 event->header.type); 94 event->header.type);
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 9a0872f9e837..478b0aeb2a62 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -9,6 +9,7 @@
9#include "util/debug.h" 9#include "util/debug.h"
10#include "util/event.h" 10#include "util/event.h"
11#include "util/hist.h" 11#include "util/hist.h"
12#include "util/evsel.h"
12#include "util/session.h" 13#include "util/session.h"
13#include "util/sort.h" 14#include "util/sort.h"
14#include "util/symbol.h" 15#include "util/symbol.h"
@@ -34,11 +35,11 @@ static int diff__process_sample_event(struct perf_event_ops *ops __used,
34 union perf_event *event, 35 union perf_event *event,
35 struct perf_sample *sample, 36 struct perf_sample *sample,
36 struct perf_evsel *evsel __used, 37 struct perf_evsel *evsel __used,
37 struct perf_session *session) 38 struct machine *machine)
38{ 39{
39 struct addr_location al; 40 struct addr_location al;
40 41
41 if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) { 42 if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
42 pr_warning("problem processing %d event, skipping it.\n", 43 pr_warning("problem processing %d event, skipping it.\n",
43 event->header.type); 44 event->header.type);
44 return -1; 45 return -1;
@@ -47,12 +48,12 @@ static int diff__process_sample_event(struct perf_event_ops *ops __used,
47 if (al.filtered || al.sym == NULL) 48 if (al.filtered || al.sym == NULL)
48 return 0; 49 return 0;
49 50
50 if (hists__add_entry(&session->hists, &al, sample->period)) { 51 if (hists__add_entry(&evsel->hists, &al, sample->period)) {
51 pr_warning("problem incrementing symbol period, skipping event\n"); 52 pr_warning("problem incrementing symbol period, skipping event\n");
52 return -1; 53 return -1;
53 } 54 }
54 55
55 session->hists.stats.total_period += sample->period; 56 evsel->hists.stats.total_period += sample->period;
56 return 0; 57 return 0;
57} 58}
58 59
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 6ce6d80b59db..a5bcf81776fc 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -18,7 +18,7 @@ static bool inject_build_ids;
18 18
19static int perf_event__repipe_synth(struct perf_event_ops *ops __used, 19static int perf_event__repipe_synth(struct perf_event_ops *ops __used,
20 union perf_event *event, 20 union perf_event *event,
21 struct perf_session *session __used) 21 struct machine *machine __used)
22{ 22{
23 uint32_t size; 23 uint32_t size;
24 void *buf = event; 24 void *buf = event;
@@ -37,10 +37,23 @@ static int perf_event__repipe_synth(struct perf_event_ops *ops __used,
37 return 0; 37 return 0;
38} 38}
39 39
40static int perf_event__repipe_op2_synth(struct perf_event_ops *ops,
41 union perf_event *event,
42 struct perf_session *session __used)
43{
44 return perf_event__repipe_synth(ops, event, NULL);
45}
46
47static int perf_event__repipe_event_type_synth(struct perf_event_ops *ops,
48 union perf_event *event)
49{
50 return perf_event__repipe_synth(ops, event, NULL);
51}
52
40static int perf_event__repipe_tracing_data_synth(union perf_event *event, 53static int perf_event__repipe_tracing_data_synth(union perf_event *event,
41 struct perf_session *session) 54 struct perf_session *session __used)
42{ 55{
43 return perf_event__repipe_synth(NULL, event, session); 56 return perf_event__repipe_synth(NULL, event, NULL);
44} 57}
45 58
46static int perf_event__repipe_attr(union perf_event *event, 59static int perf_event__repipe_attr(union perf_event *event,
@@ -52,29 +65,29 @@ static int perf_event__repipe_attr(union perf_event *event,
52static int perf_event__repipe(struct perf_event_ops *ops, 65static int perf_event__repipe(struct perf_event_ops *ops,
53 union perf_event *event, 66 union perf_event *event,
54 struct perf_sample *sample __used, 67 struct perf_sample *sample __used,
55 struct perf_session *session) 68 struct machine *machine)
56{ 69{
57 return perf_event__repipe_synth(ops, event, session); 70 return perf_event__repipe_synth(ops, event, machine);
58} 71}
59 72
60static int perf_event__repipe_sample(struct perf_event_ops *ops, 73static int perf_event__repipe_sample(struct perf_event_ops *ops,
61 union perf_event *event, 74 union perf_event *event,
62 struct perf_sample *sample __used, 75 struct perf_sample *sample __used,
63 struct perf_evsel *evsel __used, 76 struct perf_evsel *evsel __used,
64 struct perf_session *session) 77 struct machine *machine)
65{ 78{
66 return perf_event__repipe_synth(ops, event, session); 79 return perf_event__repipe_synth(ops, event, machine);
67} 80}
68 81
69static int perf_event__repipe_mmap(struct perf_event_ops *ops, 82static int perf_event__repipe_mmap(struct perf_event_ops *ops,
70 union perf_event *event, 83 union perf_event *event,
71 struct perf_sample *sample, 84 struct perf_sample *sample,
72 struct perf_session *session) 85 struct machine *machine)
73{ 86{
74 int err; 87 int err;
75 88
76 err = perf_event__process_mmap(ops, event, sample, session); 89 err = perf_event__process_mmap(ops, event, sample, machine);
77 perf_event__repipe(ops, event, sample, session); 90 perf_event__repipe(ops, event, sample, machine);
78 91
79 return err; 92 return err;
80} 93}
@@ -82,12 +95,12 @@ static int perf_event__repipe_mmap(struct perf_event_ops *ops,
82static int perf_event__repipe_task(struct perf_event_ops *ops, 95static int perf_event__repipe_task(struct perf_event_ops *ops,
83 union perf_event *event, 96 union perf_event *event,
84 struct perf_sample *sample, 97 struct perf_sample *sample,
85 struct perf_session *session) 98 struct machine *machine)
86{ 99{
87 int err; 100 int err;
88 101
89 err = perf_event__process_task(ops, event, sample, session); 102 err = perf_event__process_task(ops, event, sample, machine);
90 perf_event__repipe(ops, event, sample, session); 103 perf_event__repipe(ops, event, sample, machine);
91 104
92 return err; 105 return err;
93} 106}
@@ -97,7 +110,7 @@ static int perf_event__repipe_tracing_data(union perf_event *event,
97{ 110{
98 int err; 111 int err;
99 112
100 perf_event__repipe_synth(NULL, event, session); 113 perf_event__repipe_synth(NULL, event, NULL);
101 err = perf_event__process_tracing_data(event, session); 114 err = perf_event__process_tracing_data(event, session);
102 115
103 return err; 116 return err;
@@ -118,10 +131,9 @@ static int dso__read_build_id(struct dso *self)
118} 131}
119 132
120static int dso__inject_build_id(struct dso *self, struct perf_event_ops *ops, 133static int dso__inject_build_id(struct dso *self, struct perf_event_ops *ops,
121 struct perf_session *session) 134 struct machine *machine)
122{ 135{
123 u16 misc = PERF_RECORD_MISC_USER; 136 u16 misc = PERF_RECORD_MISC_USER;
124 struct machine *machine;
125 int err; 137 int err;
126 138
127 if (dso__read_build_id(self) < 0) { 139 if (dso__read_build_id(self) < 0) {
@@ -129,17 +141,11 @@ static int dso__inject_build_id(struct dso *self, struct perf_event_ops *ops,
129 return -1; 141 return -1;
130 } 142 }
131 143
132 machine = perf_session__find_host_machine(session);
133 if (machine == NULL) {
134 pr_err("Can't find machine for session\n");
135 return -1;
136 }
137
138 if (self->kernel) 144 if (self->kernel)
139 misc = PERF_RECORD_MISC_KERNEL; 145 misc = PERF_RECORD_MISC_KERNEL;
140 146
141 err = perf_event__synthesize_build_id(ops, self, misc, perf_event__repipe, 147 err = perf_event__synthesize_build_id(ops, self, misc, perf_event__repipe,
142 machine, session); 148 machine);
143 if (err) { 149 if (err) {
144 pr_err("Can't synthesize build_id event for %s\n", self->long_name); 150 pr_err("Can't synthesize build_id event for %s\n", self->long_name);
145 return -1; 151 return -1;
@@ -152,7 +158,7 @@ static int perf_event__inject_buildid(struct perf_event_ops *ops,
152 union perf_event *event, 158 union perf_event *event,
153 struct perf_sample *sample, 159 struct perf_sample *sample,
154 struct perf_evsel *evsel __used, 160 struct perf_evsel *evsel __used,
155 struct perf_session *session) 161 struct machine *machine)
156{ 162{
157 struct addr_location al; 163 struct addr_location al;
158 struct thread *thread; 164 struct thread *thread;
@@ -160,21 +166,21 @@ static int perf_event__inject_buildid(struct perf_event_ops *ops,
160 166
161 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 167 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
162 168
163 thread = perf_session__findnew(session, event->ip.pid); 169 thread = machine__findnew_thread(machine, event->ip.pid);
164 if (thread == NULL) { 170 if (thread == NULL) {
165 pr_err("problem processing %d event, skipping it.\n", 171 pr_err("problem processing %d event, skipping it.\n",
166 event->header.type); 172 event->header.type);
167 goto repipe; 173 goto repipe;
168 } 174 }
169 175
170 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 176 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
171 event->ip.pid, event->ip.ip, &al); 177 event->ip.ip, &al);
172 178
173 if (al.map != NULL) { 179 if (al.map != NULL) {
174 if (!al.map->dso->hit) { 180 if (!al.map->dso->hit) {
175 al.map->dso->hit = 1; 181 al.map->dso->hit = 1;
176 if (map__load(al.map, NULL) >= 0) { 182 if (map__load(al.map, NULL) >= 0) {
177 dso__inject_build_id(al.map->dso, ops, session); 183 dso__inject_build_id(al.map->dso, ops, machine);
178 /* 184 /*
179 * If this fails, too bad, let the other side 185 * If this fails, too bad, let the other side
180 * account this as unresolved. 186 * account this as unresolved.
@@ -187,7 +193,7 @@ static int perf_event__inject_buildid(struct perf_event_ops *ops,
187 } 193 }
188 194
189repipe: 195repipe:
190 perf_event__repipe(ops, event, sample, session); 196 perf_event__repipe(ops, event, sample, machine);
191 return 0; 197 return 0;
192} 198}
193 199
@@ -198,13 +204,13 @@ struct perf_event_ops inject_ops = {
198 .fork = perf_event__repipe, 204 .fork = perf_event__repipe,
199 .exit = perf_event__repipe, 205 .exit = perf_event__repipe,
200 .lost = perf_event__repipe, 206 .lost = perf_event__repipe,
201 .read = perf_event__repipe, 207 .read = perf_event__repipe_sample,
202 .throttle = perf_event__repipe, 208 .throttle = perf_event__repipe,
203 .unthrottle = perf_event__repipe, 209 .unthrottle = perf_event__repipe,
204 .attr = perf_event__repipe_attr, 210 .attr = perf_event__repipe_attr,
205 .event_type = perf_event__repipe_synth, 211 .event_type = perf_event__repipe_event_type_synth,
206 .tracing_data = perf_event__repipe_tracing_data_synth, 212 .tracing_data = perf_event__repipe_tracing_data_synth,
207 .build_id = perf_event__repipe_synth, 213 .build_id = perf_event__repipe_op2_synth,
208}; 214};
209 215
210extern volatile int session_done; 216extern volatile int session_done;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 5d01218e50e0..27b2a15dc7b2 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -307,9 +307,9 @@ static int process_sample_event(struct perf_event_ops *ops __used,
307 union perf_event *event, 307 union perf_event *event,
308 struct perf_sample *sample, 308 struct perf_sample *sample,
309 struct perf_evsel *evsel __used, 309 struct perf_evsel *evsel __used,
310 struct perf_session *session) 310 struct machine *machine)
311{ 311{
312 struct thread *thread = perf_session__findnew(session, event->ip.pid); 312 struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
313 313
314 if (thread == NULL) { 314 if (thread == NULL) {
315 pr_debug("problem processing %d event, skipping it.\n", 315 pr_debug("problem processing %d event, skipping it.\n",
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index f06b0a44c7cb..99b032adb83e 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -849,9 +849,9 @@ static int process_sample_event(struct perf_event_ops *ops __used,
849 union perf_event *event, 849 union perf_event *event,
850 struct perf_sample *sample, 850 struct perf_sample *sample,
851 struct perf_evsel *evsel __used, 851 struct perf_evsel *evsel __used,
852 struct perf_session *s) 852 struct machine *machine)
853{ 853{
854 struct thread *thread = perf_session__findnew(s, sample->tid); 854 struct thread *thread = machine__findnew_thread(machine, sample->tid);
855 855
856 if (thread == NULL) { 856 if (thread == NULL) {
857 pr_debug("problem processing %d event, skipping it.\n", 857 pr_debug("problem processing %d event, skipping it.\n",
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4642d38b8d19..0af598a1059f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -79,7 +79,7 @@ static void write_output(struct perf_record *rec, void *buf, size_t size)
79static int process_synthesized_event(struct perf_event_ops *ops, 79static int process_synthesized_event(struct perf_event_ops *ops,
80 union perf_event *event, 80 union perf_event *event,
81 struct perf_sample *sample __used, 81 struct perf_sample *sample __used,
82 struct perf_session *self __used) 82 struct machine *machine __used)
83{ 83{
84 struct perf_record *rec = container_of(ops, struct perf_record, ops); 84 struct perf_record *rec = container_of(ops, struct perf_record, ops);
85 write_output(rec, event, event->header.size); 85 write_output(rec, event, event->header.size);
@@ -320,8 +320,6 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
320{ 320{
321 int err; 321 int err;
322 struct perf_event_ops *ops = data; 322 struct perf_event_ops *ops = data;
323 struct perf_record *rec = container_of(ops, struct perf_record, ops);
324 struct perf_session *psession = rec->session;
325 323
326 if (machine__is_host(machine)) 324 if (machine__is_host(machine))
327 return; 325 return;
@@ -335,7 +333,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
335 *in module instead of in guest kernel. 333 *in module instead of in guest kernel.
336 */ 334 */
337 err = perf_event__synthesize_modules(ops, process_synthesized_event, 335 err = perf_event__synthesize_modules(ops, process_synthesized_event,
338 psession, machine); 336 machine);
339 if (err < 0) 337 if (err < 0)
340 pr_err("Couldn't record guest kernel [%d]'s reference" 338 pr_err("Couldn't record guest kernel [%d]'s reference"
341 " relocation symbol.\n", machine->pid); 339 " relocation symbol.\n", machine->pid);
@@ -345,11 +343,10 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
345 * have no _text sometimes. 343 * have no _text sometimes.
346 */ 344 */
347 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event, 345 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
348 psession, machine, "_text"); 346 machine, "_text");
349 if (err < 0) 347 if (err < 0)
350 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event, 348 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
351 psession, machine, 349 machine, "_stext");
352 "_stext");
353 if (err < 0) 350 if (err < 0)
354 pr_err("Couldn't record guest kernel [%d]'s reference" 351 pr_err("Couldn't record guest kernel [%d]'s reference"
355 " relocation symbol.\n", machine->pid); 352 " relocation symbol.\n", machine->pid);
@@ -497,6 +494,12 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
497 494
498 rec->post_processing_offset = lseek(output, 0, SEEK_CUR); 495 rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
499 496
497 machine = perf_session__find_host_machine(session);
498 if (!machine) {
499 pr_err("Couldn't find native kernel information.\n");
500 return -1;
501 }
502
500 if (opts->pipe_output) { 503 if (opts->pipe_output) {
501 err = perf_event__synthesize_attrs(ops, session, 504 err = perf_event__synthesize_attrs(ops, session,
502 process_synthesized_event); 505 process_synthesized_event);
@@ -506,7 +509,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
506 } 509 }
507 510
508 err = perf_event__synthesize_event_types(ops, process_synthesized_event, 511 err = perf_event__synthesize_event_types(ops, process_synthesized_event,
509 session); 512 machine);
510 if (err < 0) { 513 if (err < 0) {
511 pr_err("Couldn't synthesize event_types.\n"); 514 pr_err("Couldn't synthesize event_types.\n");
512 return err; 515 return err;
@@ -522,8 +525,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
522 * propagate errors that now are calling die() 525 * propagate errors that now are calling die()
523 */ 526 */
524 err = perf_event__synthesize_tracing_data(ops, output, evsel_list, 527 err = perf_event__synthesize_tracing_data(ops, output, evsel_list,
525 process_synthesized_event, 528 process_synthesized_event);
526 session);
527 if (err <= 0) { 529 if (err <= 0) {
528 pr_err("Couldn't record tracing data.\n"); 530 pr_err("Couldn't record tracing data.\n");
529 return err; 531 return err;
@@ -532,24 +534,18 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
532 } 534 }
533 } 535 }
534 536
535 machine = perf_session__find_host_machine(session);
536 if (!machine) {
537 pr_err("Couldn't find native kernel information.\n");
538 return -1;
539 }
540
541 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event, 537 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
542 session, machine, "_text"); 538 machine, "_text");
543 if (err < 0) 539 if (err < 0)
544 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event, 540 err = perf_event__synthesize_kernel_mmap(ops, process_synthesized_event,
545 session, machine, "_stext"); 541 machine, "_stext");
546 if (err < 0) 542 if (err < 0)
547 pr_err("Couldn't record kernel reference relocation symbol\n" 543 pr_err("Couldn't record kernel reference relocation symbol\n"
548 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 544 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
549 "Check /proc/kallsyms permission or run as root.\n"); 545 "Check /proc/kallsyms permission or run as root.\n");
550 546
551 err = perf_event__synthesize_modules(ops, process_synthesized_event, 547 err = perf_event__synthesize_modules(ops, process_synthesized_event,
552 session, machine); 548 machine);
553 if (err < 0) 549 if (err < 0)
554 pr_err("Couldn't record kernel module information.\n" 550 pr_err("Couldn't record kernel module information.\n"
555 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 551 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
@@ -562,10 +558,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
562 if (!opts->system_wide) 558 if (!opts->system_wide)
563 perf_event__synthesize_thread_map(ops, evsel_list->threads, 559 perf_event__synthesize_thread_map(ops, evsel_list->threads,
564 process_synthesized_event, 560 process_synthesized_event,
565 session); 561 machine);
566 else 562 else
567 perf_event__synthesize_threads(ops, process_synthesized_event, 563 perf_event__synthesize_threads(ops, process_synthesized_event,
568 session); 564 machine);
569 565
570 if (rec->realtime_prio) { 566 if (rec->realtime_prio) {
571 struct sched_param param; 567 struct sched_param param;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8795520f6e1d..ea64fbbdff43 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -52,18 +52,18 @@ struct perf_report {
52 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 52 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
53}; 53};
54 54
55static int perf_session__add_hist_entry(struct perf_session *session, 55static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
56 struct addr_location *al, 56 struct addr_location *al,
57 struct perf_sample *sample, 57 struct perf_sample *sample,
58 struct perf_evsel *evsel) 58 struct machine *machine)
59{ 59{
60 struct symbol *parent = NULL; 60 struct symbol *parent = NULL;
61 int err = 0; 61 int err = 0;
62 struct hist_entry *he; 62 struct hist_entry *he;
63 63
64 if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { 64 if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
65 err = perf_session__resolve_callchain(session, evsel, al->thread, 65 err = machine__resolve_callchain(machine, evsel, al->thread,
66 sample->callchain, &parent); 66 sample->callchain, &parent);
67 if (err) 67 if (err)
68 return err; 68 return err;
69 } 69 }
@@ -107,12 +107,12 @@ static int process_sample_event(struct perf_event_ops *ops,
107 union perf_event *event, 107 union perf_event *event,
108 struct perf_sample *sample, 108 struct perf_sample *sample,
109 struct perf_evsel *evsel, 109 struct perf_evsel *evsel,
110 struct perf_session *session) 110 struct machine *machine)
111{ 111{
112 struct perf_report *rep = container_of(ops, struct perf_report, ops); 112 struct perf_report *rep = container_of(ops, struct perf_report, ops);
113 struct addr_location al; 113 struct addr_location al;
114 114
115 if (perf_event__preprocess_sample(event, session, &al, sample, 115 if (perf_event__preprocess_sample(event, machine, &al, sample,
116 rep->annotate_init) < 0) { 116 rep->annotate_init) < 0) {
117 fprintf(stderr, "problem processing %d event, skipping it.\n", 117 fprintf(stderr, "problem processing %d event, skipping it.\n",
118 event->header.type); 118 event->header.type);
@@ -128,7 +128,7 @@ static int process_sample_event(struct perf_event_ops *ops,
128 if (al.map != NULL) 128 if (al.map != NULL)
129 al.map->dso->hit = 1; 129 al.map->dso->hit = 1;
130 130
131 if (perf_session__add_hist_entry(session, &al, sample, evsel)) { 131 if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) {
132 pr_debug("problem incrementing symbol period, skipping event\n"); 132 pr_debug("problem incrementing symbol period, skipping event\n");
133 return -1; 133 return -1;
134 } 134 }
@@ -139,11 +139,11 @@ static int process_sample_event(struct perf_event_ops *ops,
139static int process_read_event(struct perf_event_ops *ops, 139static int process_read_event(struct perf_event_ops *ops,
140 union perf_event *event, 140 union perf_event *event,
141 struct perf_sample *sample __used, 141 struct perf_sample *sample __used,
142 struct perf_session *session) 142 struct perf_evsel *evsel,
143 struct machine *machine __used)
143{ 144{
144 struct perf_report *rep = container_of(ops, struct perf_report, ops); 145 struct perf_report *rep = container_of(ops, struct perf_report, ops);
145 struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, 146
146 event->read.id);
147 if (rep->show_threads) { 147 if (rep->show_threads) {
148 const char *name = evsel ? event_name(evsel) : "unknown"; 148 const char *name = evsel ? event_name(evsel) : "unknown";
149 perf_read_values_add_value(&rep->show_threads_values, 149 perf_read_values_add_value(&rep->show_threads_values,
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index b11d6283fedf..6a771f822e5d 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -724,21 +724,21 @@ struct trace_migrate_task_event {
724 724
725struct trace_sched_handler { 725struct trace_sched_handler {
726 void (*switch_event)(struct trace_switch_event *, 726 void (*switch_event)(struct trace_switch_event *,
727 struct perf_session *, 727 struct machine *,
728 struct event *, 728 struct event *,
729 int cpu, 729 int cpu,
730 u64 timestamp, 730 u64 timestamp,
731 struct thread *thread); 731 struct thread *thread);
732 732
733 void (*runtime_event)(struct trace_runtime_event *, 733 void (*runtime_event)(struct trace_runtime_event *,
734 struct perf_session *, 734 struct machine *,
735 struct event *, 735 struct event *,
736 int cpu, 736 int cpu,
737 u64 timestamp, 737 u64 timestamp,
738 struct thread *thread); 738 struct thread *thread);
739 739
740 void (*wakeup_event)(struct trace_wakeup_event *, 740 void (*wakeup_event)(struct trace_wakeup_event *,
741 struct perf_session *, 741 struct machine *,
742 struct event *, 742 struct event *,
743 int cpu, 743 int cpu,
744 u64 timestamp, 744 u64 timestamp,
@@ -751,7 +751,7 @@ struct trace_sched_handler {
751 struct thread *thread); 751 struct thread *thread);
752 752
753 void (*migrate_task_event)(struct trace_migrate_task_event *, 753 void (*migrate_task_event)(struct trace_migrate_task_event *,
754 struct perf_session *session, 754 struct machine *machine,
755 struct event *, 755 struct event *,
756 int cpu, 756 int cpu,
757 u64 timestamp, 757 u64 timestamp,
@@ -761,7 +761,7 @@ struct trace_sched_handler {
761 761
762static void 762static void
763replay_wakeup_event(struct trace_wakeup_event *wakeup_event, 763replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
764 struct perf_session *session __used, 764 struct machine *machine __used,
765 struct event *event, 765 struct event *event,
766 int cpu __used, 766 int cpu __used,
767 u64 timestamp __used, 767 u64 timestamp __used,
@@ -788,7 +788,7 @@ static u64 cpu_last_switched[MAX_CPUS];
788 788
789static void 789static void
790replay_switch_event(struct trace_switch_event *switch_event, 790replay_switch_event(struct trace_switch_event *switch_event,
791 struct perf_session *session __used, 791 struct machine *machine __used,
792 struct event *event, 792 struct event *event,
793 int cpu, 793 int cpu,
794 u64 timestamp, 794 u64 timestamp,
@@ -1022,7 +1022,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1022 1022
1023static void 1023static void
1024latency_switch_event(struct trace_switch_event *switch_event, 1024latency_switch_event(struct trace_switch_event *switch_event,
1025 struct perf_session *session, 1025 struct machine *machine,
1026 struct event *event __used, 1026 struct event *event __used,
1027 int cpu, 1027 int cpu,
1028 u64 timestamp, 1028 u64 timestamp,
@@ -1046,8 +1046,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
1046 die("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1046 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1047 1047
1048 1048
1049 sched_out = perf_session__findnew(session, switch_event->prev_pid); 1049 sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1050 sched_in = perf_session__findnew(session, switch_event->next_pid); 1050 sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1051 1051
1052 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); 1052 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1053 if (!out_events) { 1053 if (!out_events) {
@@ -1075,13 +1075,13 @@ latency_switch_event(struct trace_switch_event *switch_event,
1075 1075
1076static void 1076static void
1077latency_runtime_event(struct trace_runtime_event *runtime_event, 1077latency_runtime_event(struct trace_runtime_event *runtime_event,
1078 struct perf_session *session, 1078 struct machine *machine,
1079 struct event *event __used, 1079 struct event *event __used,
1080 int cpu, 1080 int cpu,
1081 u64 timestamp, 1081 u64 timestamp,
1082 struct thread *this_thread __used) 1082 struct thread *this_thread __used)
1083{ 1083{
1084 struct thread *thread = perf_session__findnew(session, runtime_event->pid); 1084 struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
1085 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); 1085 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1086 1086
1087 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1087 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -1098,7 +1098,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
1098 1098
1099static void 1099static void
1100latency_wakeup_event(struct trace_wakeup_event *wakeup_event, 1100latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1101 struct perf_session *session, 1101 struct machine *machine,
1102 struct event *__event __used, 1102 struct event *__event __used,
1103 int cpu __used, 1103 int cpu __used,
1104 u64 timestamp, 1104 u64 timestamp,
@@ -1112,7 +1112,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1112 if (!wakeup_event->success) 1112 if (!wakeup_event->success)
1113 return; 1113 return;
1114 1114
1115 wakee = perf_session__findnew(session, wakeup_event->pid); 1115 wakee = machine__findnew_thread(machine, wakeup_event->pid);
1116 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); 1116 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1117 if (!atoms) { 1117 if (!atoms) {
1118 thread_atoms_insert(wakee); 1118 thread_atoms_insert(wakee);
@@ -1146,7 +1146,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1146 1146
1147static void 1147static void
1148latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, 1148latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1149 struct perf_session *session, 1149 struct machine *machine,
1150 struct event *__event __used, 1150 struct event *__event __used,
1151 int cpu __used, 1151 int cpu __used,
1152 u64 timestamp, 1152 u64 timestamp,
@@ -1162,7 +1162,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1162 if (profile_cpu == -1) 1162 if (profile_cpu == -1)
1163 return; 1163 return;
1164 1164
1165 migrant = perf_session__findnew(session, migrate_task_event->pid); 1165 migrant = machine__findnew_thread(machine, migrate_task_event->pid);
1166 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); 1166 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1167 if (!atoms) { 1167 if (!atoms) {
1168 thread_atoms_insert(migrant); 1168 thread_atoms_insert(migrant);
@@ -1357,7 +1357,7 @@ static void sort_lat(void)
1357static struct trace_sched_handler *trace_handler; 1357static struct trace_sched_handler *trace_handler;
1358 1358
1359static void 1359static void
1360process_sched_wakeup_event(void *data, struct perf_session *session, 1360process_sched_wakeup_event(void *data, struct machine *machine,
1361 struct event *event, 1361 struct event *event,
1362 int cpu __used, 1362 int cpu __used,
1363 u64 timestamp __used, 1363 u64 timestamp __used,
@@ -1374,7 +1374,7 @@ process_sched_wakeup_event(void *data, struct perf_session *session,
1374 FILL_FIELD(wakeup_event, cpu, event, data); 1374 FILL_FIELD(wakeup_event, cpu, event, data);
1375 1375
1376 if (trace_handler->wakeup_event) 1376 if (trace_handler->wakeup_event)
1377 trace_handler->wakeup_event(&wakeup_event, session, event, 1377 trace_handler->wakeup_event(&wakeup_event, machine, event,
1378 cpu, timestamp, thread); 1378 cpu, timestamp, thread);
1379} 1379}
1380 1380
@@ -1393,7 +1393,7 @@ static char next_shortname2 = '0';
1393 1393
1394static void 1394static void
1395map_switch_event(struct trace_switch_event *switch_event, 1395map_switch_event(struct trace_switch_event *switch_event,
1396 struct perf_session *session, 1396 struct machine *machine,
1397 struct event *event __used, 1397 struct event *event __used,
1398 int this_cpu, 1398 int this_cpu,
1399 u64 timestamp, 1399 u64 timestamp,
@@ -1421,8 +1421,8 @@ map_switch_event(struct trace_switch_event *switch_event,
1421 die("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1421 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1422 1422
1423 1423
1424 sched_out = perf_session__findnew(session, switch_event->prev_pid); 1424 sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
1425 sched_in = perf_session__findnew(session, switch_event->next_pid); 1425 sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1426 1426
1427 curr_thread[this_cpu] = sched_in; 1427 curr_thread[this_cpu] = sched_in;
1428 1428
@@ -1472,7 +1472,7 @@ map_switch_event(struct trace_switch_event *switch_event,
1472 1472
1473 1473
1474static void 1474static void
1475process_sched_switch_event(void *data, struct perf_session *session, 1475process_sched_switch_event(void *data, struct machine *machine,
1476 struct event *event, 1476 struct event *event,
1477 int this_cpu, 1477 int this_cpu,
1478 u64 timestamp __used, 1478 u64 timestamp __used,
@@ -1499,14 +1499,14 @@ process_sched_switch_event(void *data, struct perf_session *session,
1499 nr_context_switch_bugs++; 1499 nr_context_switch_bugs++;
1500 } 1500 }
1501 if (trace_handler->switch_event) 1501 if (trace_handler->switch_event)
1502 trace_handler->switch_event(&switch_event, session, event, 1502 trace_handler->switch_event(&switch_event, machine, event,
1503 this_cpu, timestamp, thread); 1503 this_cpu, timestamp, thread);
1504 1504
1505 curr_pid[this_cpu] = switch_event.next_pid; 1505 curr_pid[this_cpu] = switch_event.next_pid;
1506} 1506}
1507 1507
1508static void 1508static void
1509process_sched_runtime_event(void *data, struct perf_session *session, 1509process_sched_runtime_event(void *data, struct machine *machine,
1510 struct event *event, 1510 struct event *event,
1511 int cpu __used, 1511 int cpu __used,
1512 u64 timestamp __used, 1512 u64 timestamp __used,
@@ -1520,7 +1520,7 @@ process_sched_runtime_event(void *data, struct perf_session *session,
1520 FILL_FIELD(runtime_event, vruntime, event, data); 1520 FILL_FIELD(runtime_event, vruntime, event, data);
1521 1521
1522 if (trace_handler->runtime_event) 1522 if (trace_handler->runtime_event)
1523 trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread); 1523 trace_handler->runtime_event(&runtime_event, machine, event, cpu, timestamp, thread);
1524} 1524}
1525 1525
1526static void 1526static void
@@ -1555,7 +1555,7 @@ process_sched_exit_event(struct event *event,
1555} 1555}
1556 1556
1557static void 1557static void
1558process_sched_migrate_task_event(void *data, struct perf_session *session, 1558process_sched_migrate_task_event(void *data, struct machine *machine,
1559 struct event *event, 1559 struct event *event,
1560 int cpu __used, 1560 int cpu __used,
1561 u64 timestamp __used, 1561 u64 timestamp __used,
@@ -1571,12 +1571,12 @@ process_sched_migrate_task_event(void *data, struct perf_session *session,
1571 FILL_FIELD(migrate_task_event, cpu, event, data); 1571 FILL_FIELD(migrate_task_event, cpu, event, data);
1572 1572
1573 if (trace_handler->migrate_task_event) 1573 if (trace_handler->migrate_task_event)
1574 trace_handler->migrate_task_event(&migrate_task_event, session, 1574 trace_handler->migrate_task_event(&migrate_task_event, machine,
1575 event, cpu, timestamp, thread); 1575 event, cpu, timestamp, thread);
1576} 1576}
1577 1577
1578static void process_raw_event(union perf_event *raw_event __used, 1578static void process_raw_event(union perf_event *raw_event __used,
1579 struct perf_session *session, void *data, int cpu, 1579 struct machine *machine, void *data, int cpu,
1580 u64 timestamp, struct thread *thread) 1580 u64 timestamp, struct thread *thread)
1581{ 1581{
1582 struct event *event; 1582 struct event *event;
@@ -1587,33 +1587,33 @@ static void process_raw_event(union perf_event *raw_event __used,
1587 event = trace_find_event(type); 1587 event = trace_find_event(type);
1588 1588
1589 if (!strcmp(event->name, "sched_switch")) 1589 if (!strcmp(event->name, "sched_switch"))
1590 process_sched_switch_event(data, session, event, cpu, timestamp, thread); 1590 process_sched_switch_event(data, machine, event, cpu, timestamp, thread);
1591 if (!strcmp(event->name, "sched_stat_runtime")) 1591 if (!strcmp(event->name, "sched_stat_runtime"))
1592 process_sched_runtime_event(data, session, event, cpu, timestamp, thread); 1592 process_sched_runtime_event(data, machine, event, cpu, timestamp, thread);
1593 if (!strcmp(event->name, "sched_wakeup")) 1593 if (!strcmp(event->name, "sched_wakeup"))
1594 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); 1594 process_sched_wakeup_event(data, machine, event, cpu, timestamp, thread);
1595 if (!strcmp(event->name, "sched_wakeup_new")) 1595 if (!strcmp(event->name, "sched_wakeup_new"))
1596 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); 1596 process_sched_wakeup_event(data, machine, event, cpu, timestamp, thread);
1597 if (!strcmp(event->name, "sched_process_fork")) 1597 if (!strcmp(event->name, "sched_process_fork"))
1598 process_sched_fork_event(data, event, cpu, timestamp, thread); 1598 process_sched_fork_event(data, event, cpu, timestamp, thread);
1599 if (!strcmp(event->name, "sched_process_exit")) 1599 if (!strcmp(event->name, "sched_process_exit"))
1600 process_sched_exit_event(event, cpu, timestamp, thread); 1600 process_sched_exit_event(event, cpu, timestamp, thread);
1601 if (!strcmp(event->name, "sched_migrate_task")) 1601 if (!strcmp(event->name, "sched_migrate_task"))
1602 process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread); 1602 process_sched_migrate_task_event(data, machine, event, cpu, timestamp, thread);
1603} 1603}
1604 1604
1605static int process_sample_event(struct perf_event_ops *ops __used, 1605static int process_sample_event(struct perf_event_ops *ops __used,
1606 union perf_event *event, 1606 union perf_event *event,
1607 struct perf_sample *sample, 1607 struct perf_sample *sample,
1608 struct perf_evsel *evsel, 1608 struct perf_evsel *evsel,
1609 struct perf_session *session) 1609 struct machine *machine)
1610{ 1610{
1611 struct thread *thread; 1611 struct thread *thread;
1612 1612
1613 if (!(evsel->attr.sample_type & PERF_SAMPLE_RAW)) 1613 if (!(evsel->attr.sample_type & PERF_SAMPLE_RAW))
1614 return 0; 1614 return 0;
1615 1615
1616 thread = perf_session__findnew(session, sample->pid); 1616 thread = machine__findnew_thread(machine, sample->pid);
1617 if (thread == NULL) { 1617 if (thread == NULL) {
1618 pr_debug("problem processing %d event, skipping it.\n", 1618 pr_debug("problem processing %d event, skipping it.\n",
1619 event->header.type); 1619 event->header.type);
@@ -1625,7 +1625,7 @@ static int process_sample_event(struct perf_event_ops *ops __used,
1625 if (profile_cpu != -1 && profile_cpu != (int)sample->cpu) 1625 if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
1626 return 0; 1626 return 0;
1627 1627
1628 process_raw_event(event, session, sample->raw_data, sample->cpu, 1628 process_raw_event(event, machine, sample->raw_data, sample->cpu,
1629 sample->time, thread); 1629 sample->time, thread);
1630 1630
1631 return 0; 1631 return 0;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 3b7820612ebf..31a8d14e5fb7 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -315,7 +315,7 @@ static bool sample_addr_correlates_sym(struct perf_event_attr *attr)
315 315
316static void print_sample_addr(union perf_event *event, 316static void print_sample_addr(union perf_event *event,
317 struct perf_sample *sample, 317 struct perf_sample *sample,
318 struct perf_session *session, 318 struct machine *machine,
319 struct thread *thread, 319 struct thread *thread,
320 struct perf_event_attr *attr) 320 struct perf_event_attr *attr)
321{ 321{
@@ -328,11 +328,11 @@ static void print_sample_addr(union perf_event *event,
328 if (!sample_addr_correlates_sym(attr)) 328 if (!sample_addr_correlates_sym(attr))
329 return; 329 return;
330 330
331 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 331 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
332 event->ip.pid, sample->addr, &al); 332 sample->addr, &al);
333 if (!al.map) 333 if (!al.map)
334 thread__find_addr_map(thread, session, cpumode, MAP__VARIABLE, 334 thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
335 event->ip.pid, sample->addr, &al); 335 sample->addr, &al);
336 336
337 al.cpu = sample->cpu; 337 al.cpu = sample->cpu;
338 al.sym = NULL; 338 al.sym = NULL;
@@ -362,7 +362,7 @@ static void print_sample_addr(union perf_event *event,
362static void process_event(union perf_event *event __unused, 362static void process_event(union perf_event *event __unused,
363 struct perf_sample *sample, 363 struct perf_sample *sample,
364 struct perf_evsel *evsel, 364 struct perf_evsel *evsel,
365 struct perf_session *session, 365 struct machine *machine,
366 struct thread *thread) 366 struct thread *thread)
367{ 367{
368 struct perf_event_attr *attr = &evsel->attr; 368 struct perf_event_attr *attr = &evsel->attr;
@@ -377,15 +377,15 @@ static void process_event(union perf_event *event __unused,
377 sample->raw_size); 377 sample->raw_size);
378 378
379 if (PRINT_FIELD(ADDR)) 379 if (PRINT_FIELD(ADDR))
380 print_sample_addr(event, sample, session, thread, attr); 380 print_sample_addr(event, sample, machine, thread, attr);
381 381
382 if (PRINT_FIELD(IP)) { 382 if (PRINT_FIELD(IP)) {
383 if (!symbol_conf.use_callchain) 383 if (!symbol_conf.use_callchain)
384 printf(" "); 384 printf(" ");
385 else 385 else
386 printf("\n"); 386 printf("\n");
387 perf_session__print_ip(event, evsel, sample, session, 387 perf_event__print_ip(event, sample, machine, evsel,
388 PRINT_FIELD(SYM), PRINT_FIELD(DSO)); 388 PRINT_FIELD(SYM), PRINT_FIELD(DSO));
389 } 389 }
390 390
391 printf("\n"); 391 printf("\n");
@@ -438,9 +438,9 @@ static int process_sample_event(struct perf_event_ops *ops __used,
438 union perf_event *event, 438 union perf_event *event,
439 struct perf_sample *sample, 439 struct perf_sample *sample,
440 struct perf_evsel *evsel, 440 struct perf_evsel *evsel,
441 struct perf_session *session) 441 struct machine *machine)
442{ 442{
443 struct thread *thread = perf_session__findnew(session, event->ip.pid); 443 struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
444 444
445 if (thread == NULL) { 445 if (thread == NULL) {
446 pr_debug("problem processing %d event, skipping it.\n", 446 pr_debug("problem processing %d event, skipping it.\n",
@@ -462,9 +462,9 @@ static int process_sample_event(struct perf_event_ops *ops __used,
462 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) 462 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
463 return 0; 463 return 0;
464 464
465 scripting_ops->process_event(event, sample, evsel, session, thread); 465 scripting_ops->process_event(event, sample, evsel, machine, thread);
466 466
467 session->hists.stats.total_period += sample->period; 467 evsel->hists.stats.total_period += sample->period;
468 return 0; 468 return 0;
469} 469}
470 470
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 62298a0d7dc9..8e6539625bc1 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -277,7 +277,7 @@ static u64 cpus_pstate_state[MAX_CPUS];
277static int process_comm_event(struct perf_event_ops *ops __used, 277static int process_comm_event(struct perf_event_ops *ops __used,
278 union perf_event *event, 278 union perf_event *event,
279 struct perf_sample *sample __used, 279 struct perf_sample *sample __used,
280 struct perf_session *session __used) 280 struct machine *machine __used)
281{ 281{
282 pid_set_comm(event->comm.tid, event->comm.comm); 282 pid_set_comm(event->comm.tid, event->comm.comm);
283 return 0; 283 return 0;
@@ -286,7 +286,7 @@ static int process_comm_event(struct perf_event_ops *ops __used,
286static int process_fork_event(struct perf_event_ops *ops __used, 286static int process_fork_event(struct perf_event_ops *ops __used,
287 union perf_event *event, 287 union perf_event *event,
288 struct perf_sample *sample __used, 288 struct perf_sample *sample __used,
289 struct perf_session *session __used) 289 struct machine *machine __used)
290{ 290{
291 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 291 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
292 return 0; 292 return 0;
@@ -295,7 +295,7 @@ static int process_fork_event(struct perf_event_ops *ops __used,
295static int process_exit_event(struct perf_event_ops *ops __used, 295static int process_exit_event(struct perf_event_ops *ops __used,
296 union perf_event *event, 296 union perf_event *event,
297 struct perf_sample *sample __used, 297 struct perf_sample *sample __used,
298 struct perf_session *session __used) 298 struct machine *machine __used)
299{ 299{
300 pid_exit(event->fork.pid, event->fork.time); 300 pid_exit(event->fork.pid, event->fork.time);
301 return 0; 301 return 0;
@@ -494,7 +494,7 @@ static int process_sample_event(struct perf_event_ops *ops __used,
494 union perf_event *event __used, 494 union perf_event *event __used,
495 struct perf_sample *sample, 495 struct perf_sample *sample,
496 struct perf_evsel *evsel, 496 struct perf_evsel *evsel,
497 struct perf_session *session __used) 497 struct machine *machine __used)
498{ 498{
499 struct trace_entry *te; 499 struct trace_entry *te;
500 500
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e8e3320602bd..31d497368ccf 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -258,11 +258,9 @@ out_unlock:
258 258
259static const char CONSOLE_CLEAR[] = ""; 259static const char CONSOLE_CLEAR[] = "";
260 260
261static struct hist_entry * 261static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
262 perf_session__add_hist_entry(struct perf_session *session, 262 struct addr_location *al,
263 struct addr_location *al, 263 struct perf_sample *sample)
264 struct perf_sample *sample,
265 struct perf_evsel *evsel)
266{ 264{
267 struct hist_entry *he; 265 struct hist_entry *he;
268 266
@@ -270,7 +268,7 @@ static struct hist_entry *
270 if (he == NULL) 268 if (he == NULL)
271 return NULL; 269 return NULL;
272 270
273 session->hists.stats.total_period += sample->period; 271 evsel->hists.stats.total_period += sample->period;
274 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); 272 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
275 return he; 273 return he;
276} 274}
@@ -675,44 +673,12 @@ static int symbol_filter(struct map *map __used, struct symbol *sym)
675static void perf_event__process_sample(const union perf_event *event, 673static void perf_event__process_sample(const union perf_event *event,
676 struct perf_evsel *evsel, 674 struct perf_evsel *evsel,
677 struct perf_sample *sample, 675 struct perf_sample *sample,
678 struct perf_session *session) 676 struct machine *machine)
679{ 677{
680 struct symbol *parent = NULL; 678 struct symbol *parent = NULL;
681 u64 ip = event->ip.ip; 679 u64 ip = event->ip.ip;
682 struct addr_location al; 680 struct addr_location al;
683 struct machine *machine;
684 int err; 681 int err;
685 u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
686
687 ++top.samples;
688
689 switch (origin) {
690 case PERF_RECORD_MISC_USER:
691 ++top.us_samples;
692 if (top.hide_user_symbols)
693 return;
694 machine = perf_session__find_host_machine(session);
695 break;
696 case PERF_RECORD_MISC_KERNEL:
697 ++top.kernel_samples;
698 if (top.hide_kernel_symbols)
699 return;
700 machine = perf_session__find_host_machine(session);
701 break;
702 case PERF_RECORD_MISC_GUEST_KERNEL:
703 ++top.guest_kernel_samples;
704 machine = perf_session__find_machine(session, event->ip.pid);
705 break;
706 case PERF_RECORD_MISC_GUEST_USER:
707 ++top.guest_us_samples;
708 /*
709 * TODO: we don't process guest user from host side
710 * except simple counting.
711 */
712 return;
713 default:
714 return;
715 }
716 682
717 if (!machine && perf_guest) { 683 if (!machine && perf_guest) {
718 pr_err("Can't find guest [%d]'s kernel information\n", 684 pr_err("Can't find guest [%d]'s kernel information\n",
@@ -723,7 +689,7 @@ static void perf_event__process_sample(const union perf_event *event,
723 if (event->header.misc & PERF_RECORD_MISC_EXACT_IP) 689 if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
724 top.exact_samples++; 690 top.exact_samples++;
725 691
726 if (perf_event__preprocess_sample(event, session, &al, sample, 692 if (perf_event__preprocess_sample(event, machine, &al, sample,
727 symbol_filter) < 0 || 693 symbol_filter) < 0 ||
728 al.filtered) 694 al.filtered)
729 return; 695 return;
@@ -777,13 +743,13 @@ static void perf_event__process_sample(const union perf_event *event,
777 743
778 if ((sort__has_parent || symbol_conf.use_callchain) && 744 if ((sort__has_parent || symbol_conf.use_callchain) &&
779 sample->callchain) { 745 sample->callchain) {
780 err = perf_session__resolve_callchain(session, evsel, al.thread, 746 err = machine__resolve_callchain(machine, evsel, al.thread,
781 sample->callchain, &parent); 747 sample->callchain, &parent);
782 if (err) 748 if (err)
783 return; 749 return;
784 } 750 }
785 751
786 he = perf_session__add_hist_entry(session, &al, sample, evsel); 752 he = perf_evsel__add_hist_entry(evsel, &al, sample);
787 if (he == NULL) { 753 if (he == NULL) {
788 pr_err("Problem incrementing symbol period, skipping event\n"); 754 pr_err("Problem incrementing symbol period, skipping event\n");
789 return; 755 return;
@@ -808,6 +774,8 @@ static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
808 struct perf_sample sample; 774 struct perf_sample sample;
809 struct perf_evsel *evsel; 775 struct perf_evsel *evsel;
810 union perf_event *event; 776 union perf_event *event;
777 struct machine *machine;
778 u8 origin;
811 int ret; 779 int ret;
812 780
813 while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { 781 while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
@@ -820,11 +788,45 @@ static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
820 evsel = perf_evlist__id2evsel(self->evlist, sample.id); 788 evsel = perf_evlist__id2evsel(self->evlist, sample.id);
821 assert(evsel != NULL); 789 assert(evsel != NULL);
822 790
791 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
792
823 if (event->header.type == PERF_RECORD_SAMPLE) 793 if (event->header.type == PERF_RECORD_SAMPLE)
824 perf_event__process_sample(event, evsel, &sample, self); 794 ++top.samples;
795
796 switch (origin) {
797 case PERF_RECORD_MISC_USER:
798 ++top.us_samples;
799 if (top.hide_user_symbols)
800 continue;
801 machine = perf_session__find_host_machine(self);
802 break;
803 case PERF_RECORD_MISC_KERNEL:
804 ++top.kernel_samples;
805 if (top.hide_kernel_symbols)
806 continue;
807 machine = perf_session__find_host_machine(self);
808 break;
809 case PERF_RECORD_MISC_GUEST_KERNEL:
810 ++top.guest_kernel_samples;
811 machine = perf_session__find_machine(self, event->ip.pid);
812 break;
813 case PERF_RECORD_MISC_GUEST_USER:
814 ++top.guest_us_samples;
815 /*
816 * TODO: we don't process guest user from host side
817 * except simple counting.
818 */
819 /* Fall thru */
820 default:
821 continue;
822 }
823
824
825 if (event->header.type == PERF_RECORD_SAMPLE)
826 perf_event__process_sample(event, evsel, &sample, machine);
825 else if (event->header.type < PERF_RECORD_MAX) { 827 else if (event->header.type < PERF_RECORD_MAX) {
826 hists__inc_nr_events(&evsel->hists, event->header.type); 828 hists__inc_nr_events(&evsel->hists, event->header.type);
827 perf_event__process(&top.ops, event, &sample, self); 829 perf_event__process(&top.ops, event, &sample, machine);
828 } else 830 } else
829 ++self->hists.stats.nr_unknown_events; 831 ++self->hists.stats.nr_unknown_events;
830 } 832 }
@@ -967,10 +969,11 @@ static int __cmd_top(void)
967 969
968 if (top.target_tid != -1) 970 if (top.target_tid != -1)
969 perf_event__synthesize_thread_map(&top.ops, top.evlist->threads, 971 perf_event__synthesize_thread_map(&top.ops, top.evlist->threads,
970 perf_event__process, top.session); 972 perf_event__process,
973 &top.session->host_machine);
971 else 974 else
972 perf_event__synthesize_threads(&top.ops, perf_event__process, top.session); 975 perf_event__synthesize_threads(&top.ops, perf_event__process,
973 976 &top.session->host_machine);
974 start_counters(top.evlist); 977 start_counters(top.evlist);
975 top.session->evlist = top.evlist; 978 top.session->evlist = top.evlist;
976 perf_session__update_sample_type(top.session); 979 perf_session__update_sample_type(top.session);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 0e4de1865013..2f84c4802aca 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -19,11 +19,11 @@ static int build_id__mark_dso_hit(struct perf_event_ops *ops __used,
19 union perf_event *event, 19 union perf_event *event,
20 struct perf_sample *sample __used, 20 struct perf_sample *sample __used,
21 struct perf_evsel *evsel __used, 21 struct perf_evsel *evsel __used,
22 struct perf_session *session) 22 struct machine *machine)
23{ 23{
24 struct addr_location al; 24 struct addr_location al;
25 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 25 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
26 struct thread *thread = perf_session__findnew(session, event->ip.pid); 26 struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
27 27
28 if (thread == NULL) { 28 if (thread == NULL) {
29 pr_err("problem processing %d event, skipping it.\n", 29 pr_err("problem processing %d event, skipping it.\n",
@@ -31,8 +31,8 @@ static int build_id__mark_dso_hit(struct perf_event_ops *ops __used,
31 return -1; 31 return -1;
32 } 32 }
33 33
34 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 34 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
35 event->ip.pid, event->ip.ip, &al); 35 event->ip.ip, &al);
36 36
37 if (al.map != NULL) 37 if (al.map != NULL)
38 al.map->dso->hit = 1; 38 al.map->dso->hit = 1;
@@ -43,16 +43,16 @@ static int build_id__mark_dso_hit(struct perf_event_ops *ops __used,
43static int perf_event__exit_del_thread(struct perf_event_ops *ops __used, 43static int perf_event__exit_del_thread(struct perf_event_ops *ops __used,
44 union perf_event *event, 44 union perf_event *event,
45 struct perf_sample *sample __used, 45 struct perf_sample *sample __used,
46 struct perf_session *session) 46 struct machine *machine)
47{ 47{
48 struct thread *thread = perf_session__findnew(session, event->fork.tid); 48 struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
49 49
50 dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, 50 dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
51 event->fork.ppid, event->fork.ptid); 51 event->fork.ppid, event->fork.ptid);
52 52
53 if (thread) { 53 if (thread) {
54 rb_erase(&thread->rb_node, &session->host_machine.threads); 54 rb_erase(&thread->rb_node, &machine->threads);
55 session->host_machine.last_match = NULL; 55 machine->last_match = NULL;
56 thread__delete(thread); 56 thread__delete(thread);
57 } 57 }
58 58
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 4800f38c7277..0cdc811c48e2 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,7 +1,6 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include "event.h" 2#include "event.h"
3#include "debug.h" 3#include "debug.h"
4#include "session.h"
5#include "sort.h" 4#include "sort.h"
6#include "string.h" 5#include "string.h"
7#include "strlist.h" 6#include "strlist.h"
@@ -47,7 +46,7 @@ static struct perf_sample synth_sample = {
47static pid_t perf_event__synthesize_comm(struct perf_event_ops *ops, 46static pid_t perf_event__synthesize_comm(struct perf_event_ops *ops,
48 union perf_event *event, pid_t pid, 47 union perf_event *event, pid_t pid,
49 int full, perf_event__handler_t process, 48 int full, perf_event__handler_t process,
50 struct perf_session *session) 49 struct machine *machine)
51{ 50{
52 char filename[PATH_MAX]; 51 char filename[PATH_MAX];
53 char bf[BUFSIZ]; 52 char bf[BUFSIZ];
@@ -93,14 +92,14 @@ out_race:
93 92
94 event->comm.header.type = PERF_RECORD_COMM; 93 event->comm.header.type = PERF_RECORD_COMM;
95 size = ALIGN(size, sizeof(u64)); 94 size = ALIGN(size, sizeof(u64));
96 memset(event->comm.comm + size, 0, session->id_hdr_size); 95 memset(event->comm.comm + size, 0, machine->id_hdr_size);
97 event->comm.header.size = (sizeof(event->comm) - 96 event->comm.header.size = (sizeof(event->comm) -
98 (sizeof(event->comm.comm) - size) + 97 (sizeof(event->comm.comm) - size) +
99 session->id_hdr_size); 98 machine->id_hdr_size);
100 if (!full) { 99 if (!full) {
101 event->comm.tid = pid; 100 event->comm.tid = pid;
102 101
103 process(ops, event, &synth_sample, session); 102 process(ops, event, &synth_sample, machine);
104 goto out; 103 goto out;
105 } 104 }
106 105
@@ -118,7 +117,7 @@ out_race:
118 117
119 event->comm.tid = pid; 118 event->comm.tid = pid;
120 119
121 process(ops, event, &synth_sample, session); 120 process(ops, event, &synth_sample, machine);
122 } 121 }
123 122
124 closedir(tasks); 123 closedir(tasks);
@@ -132,7 +131,7 @@ static int perf_event__synthesize_mmap_events(struct perf_event_ops *ops,
132 union perf_event *event, 131 union perf_event *event,
133 pid_t pid, pid_t tgid, 132 pid_t pid, pid_t tgid,
134 perf_event__handler_t process, 133 perf_event__handler_t process,
135 struct perf_session *session) 134 struct machine *machine)
136{ 135{
137 char filename[PATH_MAX]; 136 char filename[PATH_MAX];
138 FILE *fp; 137 FILE *fp;
@@ -195,12 +194,12 @@ static int perf_event__synthesize_mmap_events(struct perf_event_ops *ops,
195 event->mmap.len -= event->mmap.start; 194 event->mmap.len -= event->mmap.start;
196 event->mmap.header.size = (sizeof(event->mmap) - 195 event->mmap.header.size = (sizeof(event->mmap) -
197 (sizeof(event->mmap.filename) - size)); 196 (sizeof(event->mmap.filename) - size));
198 memset(event->mmap.filename + size, 0, session->id_hdr_size); 197 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
199 event->mmap.header.size += session->id_hdr_size; 198 event->mmap.header.size += machine->id_hdr_size;
200 event->mmap.pid = tgid; 199 event->mmap.pid = tgid;
201 event->mmap.tid = pid; 200 event->mmap.tid = pid;
202 201
203 process(ops, event, &synth_sample, session); 202 process(ops, event, &synth_sample, machine);
204 } 203 }
205 } 204 }
206 205
@@ -210,13 +209,12 @@ static int perf_event__synthesize_mmap_events(struct perf_event_ops *ops,
210 209
211int perf_event__synthesize_modules(struct perf_event_ops *ops, 210int perf_event__synthesize_modules(struct perf_event_ops *ops,
212 perf_event__handler_t process, 211 perf_event__handler_t process,
213 struct perf_session *session,
214 struct machine *machine) 212 struct machine *machine)
215{ 213{
216 struct rb_node *nd; 214 struct rb_node *nd;
217 struct map_groups *kmaps = &machine->kmaps; 215 struct map_groups *kmaps = &machine->kmaps;
218 union perf_event *event = zalloc((sizeof(event->mmap) + 216 union perf_event *event = zalloc((sizeof(event->mmap) +
219 session->id_hdr_size)); 217 machine->id_hdr_size));
220 if (event == NULL) { 218 if (event == NULL) {
221 pr_debug("Not enough memory synthesizing mmap event " 219 pr_debug("Not enough memory synthesizing mmap event "
222 "for kernel modules\n"); 220 "for kernel modules\n");
@@ -246,15 +244,15 @@ int perf_event__synthesize_modules(struct perf_event_ops *ops,
246 event->mmap.header.type = PERF_RECORD_MMAP; 244 event->mmap.header.type = PERF_RECORD_MMAP;
247 event->mmap.header.size = (sizeof(event->mmap) - 245 event->mmap.header.size = (sizeof(event->mmap) -
248 (sizeof(event->mmap.filename) - size)); 246 (sizeof(event->mmap.filename) - size));
249 memset(event->mmap.filename + size, 0, session->id_hdr_size); 247 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
250 event->mmap.header.size += session->id_hdr_size; 248 event->mmap.header.size += machine->id_hdr_size;
251 event->mmap.start = pos->start; 249 event->mmap.start = pos->start;
252 event->mmap.len = pos->end - pos->start; 250 event->mmap.len = pos->end - pos->start;
253 event->mmap.pid = machine->pid; 251 event->mmap.pid = machine->pid;
254 252
255 memcpy(event->mmap.filename, pos->dso->long_name, 253 memcpy(event->mmap.filename, pos->dso->long_name,
256 pos->dso->long_name_len + 1); 254 pos->dso->long_name_len + 1);
257 process(ops, event, &synth_sample, session); 255 process(ops, event, &synth_sample, machine);
258 } 256 }
259 257
260 free(event); 258 free(event);
@@ -265,29 +263,29 @@ static int __event__synthesize_thread(union perf_event *comm_event,
265 union perf_event *mmap_event, 263 union perf_event *mmap_event,
266 pid_t pid, perf_event__handler_t process, 264 pid_t pid, perf_event__handler_t process,
267 struct perf_event_ops *ops, 265 struct perf_event_ops *ops,
268 struct perf_session *session) 266 struct machine *machine)
269{ 267{
270 pid_t tgid = perf_event__synthesize_comm(ops, comm_event, pid, 1, process, 268 pid_t tgid = perf_event__synthesize_comm(ops, comm_event, pid, 1,
271 session); 269 process, machine);
272 if (tgid == -1) 270 if (tgid == -1)
273 return -1; 271 return -1;
274 return perf_event__synthesize_mmap_events(ops, mmap_event, pid, tgid, 272 return perf_event__synthesize_mmap_events(ops, mmap_event, pid, tgid,
275 process, session); 273 process, machine);
276} 274}
277 275
278int perf_event__synthesize_thread_map(struct perf_event_ops *ops, 276int perf_event__synthesize_thread_map(struct perf_event_ops *ops,
279 struct thread_map *threads, 277 struct thread_map *threads,
280 perf_event__handler_t process, 278 perf_event__handler_t process,
281 struct perf_session *session) 279 struct machine *machine)
282{ 280{
283 union perf_event *comm_event, *mmap_event; 281 union perf_event *comm_event, *mmap_event;
284 int err = -1, thread; 282 int err = -1, thread;
285 283
286 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); 284 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
287 if (comm_event == NULL) 285 if (comm_event == NULL)
288 goto out; 286 goto out;
289 287
290 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); 288 mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
291 if (mmap_event == NULL) 289 if (mmap_event == NULL)
292 goto out_free_comm; 290 goto out_free_comm;
293 291
@@ -295,7 +293,7 @@ int perf_event__synthesize_thread_map(struct perf_event_ops *ops,
295 for (thread = 0; thread < threads->nr; ++thread) { 293 for (thread = 0; thread < threads->nr; ++thread) {
296 if (__event__synthesize_thread(comm_event, mmap_event, 294 if (__event__synthesize_thread(comm_event, mmap_event,
297 threads->map[thread], 295 threads->map[thread],
298 process, ops, session)) { 296 process, ops, machine)) {
299 err = -1; 297 err = -1;
300 break; 298 break;
301 } 299 }
@@ -309,18 +307,18 @@ out:
309 307
310int perf_event__synthesize_threads(struct perf_event_ops *ops, 308int perf_event__synthesize_threads(struct perf_event_ops *ops,
311 perf_event__handler_t process, 309 perf_event__handler_t process,
312 struct perf_session *session) 310 struct machine *machine)
313{ 311{
314 DIR *proc; 312 DIR *proc;
315 struct dirent dirent, *next; 313 struct dirent dirent, *next;
316 union perf_event *comm_event, *mmap_event; 314 union perf_event *comm_event, *mmap_event;
317 int err = -1; 315 int err = -1;
318 316
319 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); 317 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
320 if (comm_event == NULL) 318 if (comm_event == NULL)
321 goto out; 319 goto out;
322 320
323 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); 321 mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
324 if (mmap_event == NULL) 322 if (mmap_event == NULL)
325 goto out_free_comm; 323 goto out_free_comm;
326 324
@@ -336,7 +334,7 @@ int perf_event__synthesize_threads(struct perf_event_ops *ops,
336 continue; 334 continue;
337 335
338 __event__synthesize_thread(comm_event, mmap_event, pid, 336 __event__synthesize_thread(comm_event, mmap_event, pid,
339 process, ops, session); 337 process, ops, machine);
340 } 338 }
341 339
342 closedir(proc); 340 closedir(proc);
@@ -373,7 +371,6 @@ static int find_symbol_cb(void *arg, const char *name, char type,
373 371
374int perf_event__synthesize_kernel_mmap(struct perf_event_ops *ops, 372int perf_event__synthesize_kernel_mmap(struct perf_event_ops *ops,
375 perf_event__handler_t process, 373 perf_event__handler_t process,
376 struct perf_session *session,
377 struct machine *machine, 374 struct machine *machine,
378 const char *symbol_name) 375 const char *symbol_name)
379{ 376{
@@ -390,7 +387,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_event_ops *ops,
390 */ 387 */
391 struct process_symbol_args args = { .name = symbol_name, }; 388 struct process_symbol_args args = { .name = symbol_name, };
392 union perf_event *event = zalloc((sizeof(event->mmap) + 389 union perf_event *event = zalloc((sizeof(event->mmap) +
393 session->id_hdr_size)); 390 machine->id_hdr_size));
394 if (event == NULL) { 391 if (event == NULL) {
395 pr_debug("Not enough memory synthesizing mmap event " 392 pr_debug("Not enough memory synthesizing mmap event "
396 "for kernel modules\n"); 393 "for kernel modules\n");
@@ -424,13 +421,13 @@ int perf_event__synthesize_kernel_mmap(struct perf_event_ops *ops,
424 size = ALIGN(size, sizeof(u64)); 421 size = ALIGN(size, sizeof(u64));
425 event->mmap.header.type = PERF_RECORD_MMAP; 422 event->mmap.header.type = PERF_RECORD_MMAP;
426 event->mmap.header.size = (sizeof(event->mmap) - 423 event->mmap.header.size = (sizeof(event->mmap) -
427 (sizeof(event->mmap.filename) - size) + session->id_hdr_size); 424 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
428 event->mmap.pgoff = args.start; 425 event->mmap.pgoff = args.start;
429 event->mmap.start = map->start; 426 event->mmap.start = map->start;
430 event->mmap.len = map->end - event->mmap.start; 427 event->mmap.len = map->end - event->mmap.start;
431 event->mmap.pid = machine->pid; 428 event->mmap.pid = machine->pid;
432 429
433 err = process(ops, event, &synth_sample, session); 430 err = process(ops, event, &synth_sample, machine);
434 free(event); 431 free(event);
435 432
436 return err; 433 return err;
@@ -439,9 +436,9 @@ int perf_event__synthesize_kernel_mmap(struct perf_event_ops *ops,
439int perf_event__process_comm(struct perf_event_ops *ops __used, 436int perf_event__process_comm(struct perf_event_ops *ops __used,
440 union perf_event *event, 437 union perf_event *event,
441 struct perf_sample *sample __used, 438 struct perf_sample *sample __used,
442 struct perf_session *session) 439 struct machine *machine)
443{ 440{
444 struct thread *thread = perf_session__findnew(session, event->comm.tid); 441 struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
445 442
446 dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid); 443 dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
447 444
@@ -456,11 +453,10 @@ int perf_event__process_comm(struct perf_event_ops *ops __used,
456int perf_event__process_lost(struct perf_event_ops *ops __used, 453int perf_event__process_lost(struct perf_event_ops *ops __used,
457 union perf_event *event, 454 union perf_event *event,
458 struct perf_sample *sample __used, 455 struct perf_sample *sample __used,
459 struct perf_session *session) 456 struct machine *machine __used)
460{ 457{
461 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 458 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
462 event->lost.id, event->lost.lost); 459 event->lost.id, event->lost.lost);
463 session->hists.stats.total_lost += event->lost.lost;
464 return 0; 460 return 0;
465} 461}
466 462
@@ -479,20 +475,13 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event,
479 475
480static int perf_event__process_kernel_mmap(struct perf_event_ops *ops __used, 476static int perf_event__process_kernel_mmap(struct perf_event_ops *ops __used,
481 union perf_event *event, 477 union perf_event *event,
482 struct perf_session *session) 478 struct machine *machine)
483{ 479{
484 struct map *map; 480 struct map *map;
485 char kmmap_prefix[PATH_MAX]; 481 char kmmap_prefix[PATH_MAX];
486 struct machine *machine;
487 enum dso_kernel_type kernel_type; 482 enum dso_kernel_type kernel_type;
488 bool is_kernel_mmap; 483 bool is_kernel_mmap;
489 484
490 machine = perf_session__findnew_machine(session, event->mmap.pid);
491 if (!machine) {
492 pr_err("Can't find id %d's machine\n", event->mmap.pid);
493 goto out_problem;
494 }
495
496 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 485 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
497 if (machine__is_host(machine)) 486 if (machine__is_host(machine))
498 kernel_type = DSO_TYPE_KERNEL; 487 kernel_type = DSO_TYPE_KERNEL;
@@ -559,9 +548,9 @@ static int perf_event__process_kernel_mmap(struct perf_event_ops *ops __used,
559 * time /proc/sys/kernel/kptr_restrict was non zero. 548 * time /proc/sys/kernel/kptr_restrict was non zero.
560 */ 549 */
561 if (event->mmap.pgoff != 0) { 550 if (event->mmap.pgoff != 0) {
562 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 551 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
563 symbol_name, 552 symbol_name,
564 event->mmap.pgoff); 553 event->mmap.pgoff);
565 } 554 }
566 555
567 if (machine__is_default_guest(machine)) { 556 if (machine__is_default_guest(machine)) {
@@ -580,9 +569,8 @@ out_problem:
580int perf_event__process_mmap(struct perf_event_ops *ops, 569int perf_event__process_mmap(struct perf_event_ops *ops,
581 union perf_event *event, 570 union perf_event *event,
582 struct perf_sample *sample __used, 571 struct perf_sample *sample __used,
583 struct perf_session *session) 572 struct machine *machine)
584{ 573{
585 struct machine *machine;
586 struct thread *thread; 574 struct thread *thread;
587 struct map *map; 575 struct map *map;
588 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 576 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
@@ -594,16 +582,13 @@ int perf_event__process_mmap(struct perf_event_ops *ops,
594 582
595 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 583 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
596 cpumode == PERF_RECORD_MISC_KERNEL) { 584 cpumode == PERF_RECORD_MISC_KERNEL) {
597 ret = perf_event__process_kernel_mmap(ops, event, session); 585 ret = perf_event__process_kernel_mmap(ops, event, machine);
598 if (ret < 0) 586 if (ret < 0)
599 goto out_problem; 587 goto out_problem;
600 return 0; 588 return 0;
601 } 589 }
602 590
603 machine = perf_session__find_host_machine(session); 591 thread = machine__findnew_thread(machine, event->mmap.pid);
604 if (machine == NULL)
605 goto out_problem;
606 thread = perf_session__findnew(session, event->mmap.pid);
607 if (thread == NULL) 592 if (thread == NULL)
608 goto out_problem; 593 goto out_problem;
609 map = map__new(&machine->user_dsos, event->mmap.start, 594 map = map__new(&machine->user_dsos, event->mmap.start,
@@ -624,16 +609,16 @@ out_problem:
624int perf_event__process_task(struct perf_event_ops *ops __used, 609int perf_event__process_task(struct perf_event_ops *ops __used,
625 union perf_event *event, 610 union perf_event *event,
626 struct perf_sample *sample __used, 611 struct perf_sample *sample __used,
627 struct perf_session *session) 612 struct machine *machine)
628{ 613{
629 struct thread *thread = perf_session__findnew(session, event->fork.tid); 614 struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
630 struct thread *parent = perf_session__findnew(session, event->fork.ptid); 615 struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
631 616
632 dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, 617 dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
633 event->fork.ppid, event->fork.ptid); 618 event->fork.ppid, event->fork.ptid);
634 619
635 if (event->header.type == PERF_RECORD_EXIT) { 620 if (event->header.type == PERF_RECORD_EXIT) {
636 perf_session__remove_thread(session, thread); 621 machine__remove_thread(machine, thread);
637 return 0; 622 return 0;
638 } 623 }
639 624
@@ -647,21 +632,21 @@ int perf_event__process_task(struct perf_event_ops *ops __used,
647} 632}
648 633
649int perf_event__process(struct perf_event_ops *ops, union perf_event *event, 634int perf_event__process(struct perf_event_ops *ops, union perf_event *event,
650 struct perf_sample *sample, struct perf_session *session) 635 struct perf_sample *sample, struct machine *machine)
651{ 636{
652 switch (event->header.type) { 637 switch (event->header.type) {
653 case PERF_RECORD_COMM: 638 case PERF_RECORD_COMM:
654 perf_event__process_comm(ops, event, sample, session); 639 perf_event__process_comm(ops, event, sample, machine);
655 break; 640 break;
656 case PERF_RECORD_MMAP: 641 case PERF_RECORD_MMAP:
657 perf_event__process_mmap(ops, event, sample, session); 642 perf_event__process_mmap(ops, event, sample, machine);
658 break; 643 break;
659 case PERF_RECORD_FORK: 644 case PERF_RECORD_FORK:
660 case PERF_RECORD_EXIT: 645 case PERF_RECORD_EXIT:
661 perf_event__process_task(ops, event, sample, session); 646 perf_event__process_task(ops, event, sample, machine);
662 break; 647 break;
663 case PERF_RECORD_LOST: 648 case PERF_RECORD_LOST:
664 perf_event__process_lost(ops, event, sample, session); 649 perf_event__process_lost(ops, event, sample, machine);
665 default: 650 default:
666 break; 651 break;
667 } 652 }
@@ -670,36 +655,29 @@ int perf_event__process(struct perf_event_ops *ops, union perf_event *event,
670} 655}
671 656
672void thread__find_addr_map(struct thread *self, 657void thread__find_addr_map(struct thread *self,
673 struct perf_session *session, u8 cpumode, 658 struct machine *machine, u8 cpumode,
674 enum map_type type, pid_t pid, u64 addr, 659 enum map_type type, u64 addr,
675 struct addr_location *al) 660 struct addr_location *al)
676{ 661{
677 struct map_groups *mg = &self->mg; 662 struct map_groups *mg = &self->mg;
678 struct machine *machine = NULL;
679 663
680 al->thread = self; 664 al->thread = self;
681 al->addr = addr; 665 al->addr = addr;
682 al->cpumode = cpumode; 666 al->cpumode = cpumode;
683 al->filtered = false; 667 al->filtered = false;
684 668
669 if (machine == NULL) {
670 al->map = NULL;
671 return;
672 }
673
685 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 674 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
686 al->level = 'k'; 675 al->level = 'k';
687 machine = perf_session__find_host_machine(session);
688 if (machine == NULL) {
689 al->map = NULL;
690 return;
691 }
692 mg = &machine->kmaps; 676 mg = &machine->kmaps;
693 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 677 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
694 al->level = '.'; 678 al->level = '.';
695 machine = perf_session__find_host_machine(session);
696 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 679 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
697 al->level = 'g'; 680 al->level = 'g';
698 machine = perf_session__find_machine(session, pid);
699 if (machine == NULL) {
700 al->map = NULL;
701 return;
702 }
703 mg = &machine->kmaps; 681 mg = &machine->kmaps;
704 } else { 682 } else {
705 /* 683 /*
@@ -745,13 +723,12 @@ try_again:
745 al->addr = al->map->map_ip(al->map, al->addr); 723 al->addr = al->map->map_ip(al->map, al->addr);
746} 724}
747 725
748void thread__find_addr_location(struct thread *self, 726void thread__find_addr_location(struct thread *thread, struct machine *machine,
749 struct perf_session *session, u8 cpumode, 727 u8 cpumode, enum map_type type, u64 addr,
750 enum map_type type, pid_t pid, u64 addr,
751 struct addr_location *al, 728 struct addr_location *al,
752 symbol_filter_t filter) 729 symbol_filter_t filter)
753{ 730{
754 thread__find_addr_map(self, session, cpumode, type, pid, addr, al); 731 thread__find_addr_map(thread, machine, cpumode, type, addr, al);
755 if (al->map != NULL) 732 if (al->map != NULL)
756 al->sym = map__find_symbol(al->map, al->addr, filter); 733 al->sym = map__find_symbol(al->map, al->addr, filter);
757 else 734 else
@@ -759,13 +736,13 @@ void thread__find_addr_location(struct thread *self,
759} 736}
760 737
761int perf_event__preprocess_sample(const union perf_event *event, 738int perf_event__preprocess_sample(const union perf_event *event,
762 struct perf_session *session, 739 struct machine *machine,
763 struct addr_location *al, 740 struct addr_location *al,
764 struct perf_sample *sample, 741 struct perf_sample *sample,
765 symbol_filter_t filter) 742 symbol_filter_t filter)
766{ 743{
767 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 744 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
768 struct thread *thread = perf_session__findnew(session, event->ip.pid); 745 struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
769 746
770 if (thread == NULL) 747 if (thread == NULL)
771 return -1; 748 return -1;
@@ -776,18 +753,18 @@ int perf_event__preprocess_sample(const union perf_event *event,
776 753
777 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 754 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
778 /* 755 /*
779 * Have we already created the kernel maps for the host machine? 756 * Have we already created the kernel maps for this machine?
780 * 757 *
781 * This should have happened earlier, when we processed the kernel MMAP 758 * This should have happened earlier, when we processed the kernel MMAP
782 * events, but for older perf.data files there was no such thing, so do 759 * events, but for older perf.data files there was no such thing, so do
783 * it now. 760 * it now.
784 */ 761 */
785 if (cpumode == PERF_RECORD_MISC_KERNEL && 762 if (cpumode == PERF_RECORD_MISC_KERNEL &&
786 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL) 763 machine->vmlinux_maps[MAP__FUNCTION] == NULL)
787 machine__create_kernel_maps(&session->host_machine); 764 machine__create_kernel_maps(machine);
788 765
789 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 766 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
790 event->ip.pid, event->ip.ip, al); 767 event->ip.ip, al);
791 dump_printf(" ...... dso: %s\n", 768 dump_printf(" ...... dso: %s\n",
792 al->map ? al->map->dso->long_name : 769 al->map ? al->map->dso->long_name :
793 al->level == 'H' ? "[hypervisor]" : "<not found>"); 770 al->level == 'H' ? "[hypervisor]" : "<not found>");
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 669409d35710..1564877e8703 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -142,56 +142,53 @@ union perf_event {
142void perf_event__print_totals(void); 142void perf_event__print_totals(void);
143 143
144struct perf_event_ops; 144struct perf_event_ops;
145struct perf_session;
146struct thread_map; 145struct thread_map;
147 146
148typedef int (*perf_event__handler_t)(struct perf_event_ops *ops, 147typedef int (*perf_event__handler_t)(struct perf_event_ops *ops,
149 union perf_event *event, 148 union perf_event *event,
150 struct perf_sample *sample, 149 struct perf_sample *sample,
151 struct perf_session *session); 150 struct machine *machine);
152 151
153int perf_event__synthesize_thread_map(struct perf_event_ops *ops, 152int perf_event__synthesize_thread_map(struct perf_event_ops *ops,
154 struct thread_map *threads, 153 struct thread_map *threads,
155 perf_event__handler_t process, 154 perf_event__handler_t process,
156 struct perf_session *session); 155 struct machine *machine);
157int perf_event__synthesize_threads(struct perf_event_ops *ops, 156int perf_event__synthesize_threads(struct perf_event_ops *ops,
158 perf_event__handler_t process, 157 perf_event__handler_t process,
159 struct perf_session *session); 158 struct machine *machine);
160int perf_event__synthesize_kernel_mmap(struct perf_event_ops *ops, 159int perf_event__synthesize_kernel_mmap(struct perf_event_ops *ops,
161 perf_event__handler_t process, 160 perf_event__handler_t process,
162 struct perf_session *session,
163 struct machine *machine, 161 struct machine *machine,
164 const char *symbol_name); 162 const char *symbol_name);
165 163
166int perf_event__synthesize_modules(struct perf_event_ops *ops, 164int perf_event__synthesize_modules(struct perf_event_ops *ops,
167 perf_event__handler_t process, 165 perf_event__handler_t process,
168 struct perf_session *session,
169 struct machine *machine); 166 struct machine *machine);
170 167
171int perf_event__process_comm(struct perf_event_ops *ops, 168int perf_event__process_comm(struct perf_event_ops *ops,
172 union perf_event *event, 169 union perf_event *event,
173 struct perf_sample *sample, 170 struct perf_sample *sample,
174 struct perf_session *session); 171 struct machine *machine);
175int perf_event__process_lost(struct perf_event_ops *ops, 172int perf_event__process_lost(struct perf_event_ops *ops,
176 union perf_event *event, 173 union perf_event *event,
177 struct perf_sample *sample, 174 struct perf_sample *sample,
178 struct perf_session *session); 175 struct machine *machine);
179int perf_event__process_mmap(struct perf_event_ops *ops, 176int perf_event__process_mmap(struct perf_event_ops *ops,
180 union perf_event *event, 177 union perf_event *event,
181 struct perf_sample *sample, 178 struct perf_sample *sample,
182 struct perf_session *session); 179 struct machine *machine);
183int perf_event__process_task(struct perf_event_ops *ops, 180int perf_event__process_task(struct perf_event_ops *ops,
184 union perf_event *event, 181 union perf_event *event,
185 struct perf_sample *sample, 182 struct perf_sample *sample,
186 struct perf_session *session); 183 struct machine *machine);
187int perf_event__process(struct perf_event_ops *ops, 184int perf_event__process(struct perf_event_ops *ops,
188 union perf_event *event, 185 union perf_event *event,
189 struct perf_sample *sample, 186 struct perf_sample *sample,
190 struct perf_session *session); 187 struct machine *machine);
191 188
192struct addr_location; 189struct addr_location;
193int perf_event__preprocess_sample(const union perf_event *self, 190int perf_event__preprocess_sample(const union perf_event *self,
194 struct perf_session *session, 191 struct machine *machine,
195 struct addr_location *al, 192 struct addr_location *al,
196 struct perf_sample *sample, 193 struct perf_sample *sample,
197 symbol_filter_t filter); 194 symbol_filter_t filter);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index ab3a2b0e8f06..db280d6ca898 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2072,8 +2072,7 @@ out_delete_evlist:
2072 2072
2073int perf_event__synthesize_attr(struct perf_event_ops *ops, 2073int perf_event__synthesize_attr(struct perf_event_ops *ops,
2074 struct perf_event_attr *attr, u16 ids, u64 *id, 2074 struct perf_event_attr *attr, u16 ids, u64 *id,
2075 perf_event__handler_t process, 2075 perf_event__handler_t process)
2076 struct perf_session *session)
2077{ 2076{
2078 union perf_event *ev; 2077 union perf_event *ev;
2079 size_t size; 2078 size_t size;
@@ -2095,7 +2094,7 @@ int perf_event__synthesize_attr(struct perf_event_ops *ops,
2095 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 2094 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2096 ev->attr.header.size = size; 2095 ev->attr.header.size = size;
2097 2096
2098 err = process(ops, ev, NULL, session); 2097 err = process(ops, ev, NULL, NULL);
2099 2098
2100 free(ev); 2099 free(ev);
2101 2100
@@ -2111,7 +2110,7 @@ int perf_event__synthesize_attrs(struct perf_event_ops *ops,
2111 2110
2112 list_for_each_entry(attr, &session->evlist->entries, node) { 2111 list_for_each_entry(attr, &session->evlist->entries, node) {
2113 err = perf_event__synthesize_attr(ops, &attr->attr, attr->ids, 2112 err = perf_event__synthesize_attr(ops, &attr->attr, attr->ids,
2114 attr->id, process, session); 2113 attr->id, process);
2115 if (err) { 2114 if (err) {
2116 pr_debug("failed to create perf header attribute\n"); 2115 pr_debug("failed to create perf header attribute\n");
2117 return err; 2116 return err;
@@ -2161,7 +2160,7 @@ int perf_event__process_attr(union perf_event *event,
2161int perf_event__synthesize_event_type(struct perf_event_ops *ops, 2160int perf_event__synthesize_event_type(struct perf_event_ops *ops,
2162 u64 event_id, char *name, 2161 u64 event_id, char *name,
2163 perf_event__handler_t process, 2162 perf_event__handler_t process,
2164 struct perf_session *session) 2163 struct machine *machine)
2165{ 2164{
2166 union perf_event ev; 2165 union perf_event ev;
2167 size_t size = 0; 2166 size_t size = 0;
@@ -2179,14 +2178,14 @@ int perf_event__synthesize_event_type(struct perf_event_ops *ops,
2179 ev.event_type.header.size = sizeof(ev.event_type) - 2178 ev.event_type.header.size = sizeof(ev.event_type) -
2180 (sizeof(ev.event_type.event_type.name) - size); 2179 (sizeof(ev.event_type.event_type.name) - size);
2181 2180
2182 err = process(ops, &ev, NULL, session); 2181 err = process(ops, &ev, NULL, machine);
2183 2182
2184 return err; 2183 return err;
2185} 2184}
2186 2185
2187int perf_event__synthesize_event_types(struct perf_event_ops *ops, 2186int perf_event__synthesize_event_types(struct perf_event_ops *ops,
2188 perf_event__handler_t process, 2187 perf_event__handler_t process,
2189 struct perf_session *session) 2188 struct machine *machine)
2190{ 2189{
2191 struct perf_trace_event_type *type; 2190 struct perf_trace_event_type *type;
2192 int i, err = 0; 2191 int i, err = 0;
@@ -2196,7 +2195,7 @@ int perf_event__synthesize_event_types(struct perf_event_ops *ops,
2196 2195
2197 err = perf_event__synthesize_event_type(ops, type->event_id, 2196 err = perf_event__synthesize_event_type(ops, type->event_id,
2198 type->name, process, 2197 type->name, process,
2199 session); 2198 machine);
2200 if (err) { 2199 if (err) {
2201 pr_debug("failed to create perf header event type\n"); 2200 pr_debug("failed to create perf header event type\n");
2202 return err; 2201 return err;
@@ -2207,8 +2206,7 @@ int perf_event__synthesize_event_types(struct perf_event_ops *ops,
2207} 2206}
2208 2207
2209int perf_event__process_event_type(struct perf_event_ops *ops __unused, 2208int perf_event__process_event_type(struct perf_event_ops *ops __unused,
2210 union perf_event *event, 2209 union perf_event *event)
2211 struct perf_session *session __unused)
2212{ 2210{
2213 if (perf_header__push_event(event->event_type.event_type.event_id, 2211 if (perf_header__push_event(event->event_type.event_type.event_id,
2214 event->event_type.event_type.name) < 0) 2212 event->event_type.event_type.name) < 0)
@@ -2219,8 +2217,7 @@ int perf_event__process_event_type(struct perf_event_ops *ops __unused,
2219 2217
2220int perf_event__synthesize_tracing_data(struct perf_event_ops *ops, int fd, 2218int perf_event__synthesize_tracing_data(struct perf_event_ops *ops, int fd,
2221 struct perf_evlist *evlist, 2219 struct perf_evlist *evlist,
2222 perf_event__handler_t process, 2220 perf_event__handler_t process)
2223 struct perf_session *session __unused)
2224{ 2221{
2225 union perf_event ev; 2222 union perf_event ev;
2226 struct tracing_data *tdata; 2223 struct tracing_data *tdata;
@@ -2251,7 +2248,7 @@ int perf_event__synthesize_tracing_data(struct perf_event_ops *ops, int fd,
2251 ev.tracing_data.header.size = sizeof(ev.tracing_data); 2248 ev.tracing_data.header.size = sizeof(ev.tracing_data);
2252 ev.tracing_data.size = aligned_size; 2249 ev.tracing_data.size = aligned_size;
2253 2250
2254 process(ops, &ev, NULL, session); 2251 process(ops, &ev, NULL, NULL);
2255 2252
2256 /* 2253 /*
2257 * The put function will copy all the tracing data 2254 * The put function will copy all the tracing data
@@ -2296,8 +2293,7 @@ int perf_event__process_tracing_data(union perf_event *event,
2296int perf_event__synthesize_build_id(struct perf_event_ops *ops, 2293int perf_event__synthesize_build_id(struct perf_event_ops *ops,
2297 struct dso *pos, u16 misc, 2294 struct dso *pos, u16 misc,
2298 perf_event__handler_t process, 2295 perf_event__handler_t process,
2299 struct machine *machine, 2296 struct machine *machine)
2300 struct perf_session *session)
2301{ 2297{
2302 union perf_event ev; 2298 union perf_event ev;
2303 size_t len; 2299 size_t len;
@@ -2317,7 +2313,7 @@ int perf_event__synthesize_build_id(struct perf_event_ops *ops,
2317 ev.build_id.header.size = sizeof(ev.build_id) + len; 2313 ev.build_id.header.size = sizeof(ev.build_id) + len;
2318 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 2314 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2319 2315
2320 err = process(ops, &ev, NULL, session); 2316 err = process(ops, &ev, NULL, machine);
2321 2317
2322 return err; 2318 return err;
2323} 2319}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 54dae5f09556..a604962fc431 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -99,8 +99,7 @@ int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
99 99
100int perf_event__synthesize_attr(struct perf_event_ops *ops, 100int perf_event__synthesize_attr(struct perf_event_ops *ops,
101 struct perf_event_attr *attr, u16 ids, u64 *id, 101 struct perf_event_attr *attr, u16 ids, u64 *id,
102 perf_event__handler_t process, 102 perf_event__handler_t process);
103 struct perf_session *session);
104int perf_event__synthesize_attrs(struct perf_event_ops *ops, 103int perf_event__synthesize_attrs(struct perf_event_ops *ops,
105 struct perf_session *session, 104 struct perf_session *session,
106 perf_event__handler_t process); 105 perf_event__handler_t process);
@@ -109,26 +108,23 @@ int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevli
109int perf_event__synthesize_event_type(struct perf_event_ops *ops, 108int perf_event__synthesize_event_type(struct perf_event_ops *ops,
110 u64 event_id, char *name, 109 u64 event_id, char *name,
111 perf_event__handler_t process, 110 perf_event__handler_t process,
112 struct perf_session *session); 111 struct machine *machine);
113int perf_event__synthesize_event_types(struct perf_event_ops *ops, 112int perf_event__synthesize_event_types(struct perf_event_ops *ops,
114 perf_event__handler_t process, 113 perf_event__handler_t process,
115 struct perf_session *session); 114 struct machine *machine);
116int perf_event__process_event_type(struct perf_event_ops *ops, 115int perf_event__process_event_type(struct perf_event_ops *ops,
117 union perf_event *event, 116 union perf_event *event);
118 struct perf_session *session);
119 117
120int perf_event__synthesize_tracing_data(struct perf_event_ops *ops, 118int perf_event__synthesize_tracing_data(struct perf_event_ops *ops,
121 int fd, struct perf_evlist *evlist, 119 int fd, struct perf_evlist *evlist,
122 perf_event__handler_t process, 120 perf_event__handler_t process);
123 struct perf_session *session);
124int perf_event__process_tracing_data(union perf_event *event, 121int perf_event__process_tracing_data(union perf_event *event,
125 struct perf_session *session); 122 struct perf_session *session);
126 123
127int perf_event__synthesize_build_id(struct perf_event_ops *ops, 124int perf_event__synthesize_build_id(struct perf_event_ops *ops,
128 struct dso *pos, u16 misc, 125 struct dso *pos, u16 misc,
129 perf_event__handler_t process, 126 perf_event__handler_t process,
130 struct machine *machine, 127 struct machine *machine);
131 struct perf_session *session);
132int perf_event__process_build_id(struct perf_event_ops *ops, 128int perf_event__process_build_id(struct perf_event_ops *ops,
133 union perf_event *event, 129 union perf_event *event,
134 struct perf_session *session); 130 struct perf_session *session);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index bde6835ee257..2b8017f8a930 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -18,9 +18,11 @@ enum map_type {
18extern const char *map_type__name[MAP__NR_TYPES]; 18extern const char *map_type__name[MAP__NR_TYPES];
19 19
20struct dso; 20struct dso;
21struct ip_callchain;
21struct ref_reloc_sym; 22struct ref_reloc_sym;
22struct map_groups; 23struct map_groups;
23struct machine; 24struct machine;
25struct perf_evsel;
24 26
25struct map { 27struct map {
26 union { 28 union {
@@ -61,6 +63,7 @@ struct map_groups {
61struct machine { 63struct machine {
62 struct rb_node rb_node; 64 struct rb_node rb_node;
63 pid_t pid; 65 pid_t pid;
66 u16 id_hdr_size;
64 char *root_dir; 67 char *root_dir;
65 struct rb_root threads; 68 struct rb_root threads;
66 struct list_head dead_threads; 69 struct list_head dead_threads;
@@ -151,6 +154,13 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid);
151void machine__exit(struct machine *self); 154void machine__exit(struct machine *self);
152void machine__delete(struct machine *self); 155void machine__delete(struct machine *self);
153 156
157int machine__resolve_callchain(struct machine *machine,
158 struct perf_evsel *evsel, struct thread *thread,
159 struct ip_callchain *chain,
160 struct symbol **parent);
161int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
162 u64 addr);
163
154/* 164/*
155 * Default guest kernel is defined by parameter --guestkallsyms 165 * Default guest kernel is defined by parameter --guestkallsyms
156 * and --guestmodules 166 * and --guestmodules
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 74350ffb57fe..a82ce4303ff5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -27,6 +27,8 @@
27 27
28#include "../../perf.h" 28#include "../../perf.h"
29#include "../util.h" 29#include "../util.h"
30#include "../thread.h"
31#include "../event.h"
30#include "../trace-event.h" 32#include "../trace-event.h"
31 33
32#include <EXTERN.h> 34#include <EXTERN.h>
@@ -248,7 +250,7 @@ static inline struct event *find_cache_event(int type)
248static void perl_process_event(union perf_event *pevent __unused, 250static void perl_process_event(union perf_event *pevent __unused,
249 struct perf_sample *sample, 251 struct perf_sample *sample,
250 struct perf_evsel *evsel, 252 struct perf_evsel *evsel,
251 struct perf_session *session __unused, 253 struct machine *machine __unused,
252 struct thread *thread) 254 struct thread *thread)
253{ 255{
254 struct format_field *field; 256 struct format_field *field;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 6ccf70e8d8f2..0b2a48783172 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -29,6 +29,8 @@
29 29
30#include "../../perf.h" 30#include "../../perf.h"
31#include "../util.h" 31#include "../util.h"
32#include "../event.h"
33#include "../thread.h"
32#include "../trace-event.h" 34#include "../trace-event.h"
33 35
34PyMODINIT_FUNC initperf_trace_context(void); 36PyMODINIT_FUNC initperf_trace_context(void);
@@ -207,7 +209,7 @@ static inline struct event *find_cache_event(int type)
207static void python_process_event(union perf_event *pevent __unused, 209static void python_process_event(union perf_event *pevent __unused,
208 struct perf_sample *sample, 210 struct perf_sample *sample,
209 struct perf_evsel *evsel __unused, 211 struct perf_evsel *evsel __unused,
210 struct perf_session *session __unused, 212 struct machine *machine __unused,
211 struct thread *thread) 213 struct thread *thread)
212{ 214{
213 PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; 215 PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index a36023a66779..be33606386bf 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -84,6 +84,7 @@ void perf_session__update_sample_type(struct perf_session *self)
84 self->sample_size = __perf_evsel__sample_size(self->sample_type); 84 self->sample_size = __perf_evsel__sample_size(self->sample_type);
85 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 85 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
86 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); 86 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
87 self->host_machine.id_hdr_size = self->id_hdr_size;
87} 88}
88 89
89int perf_session__create_kernel_maps(struct perf_session *self) 90int perf_session__create_kernel_maps(struct perf_session *self)
@@ -216,10 +217,10 @@ static bool symbol__match_parent_regex(struct symbol *sym)
216 return 0; 217 return 0;
217} 218}
218 219
219int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel, 220int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
220 struct thread *thread, 221 struct thread *thread,
221 struct ip_callchain *chain, 222 struct ip_callchain *chain,
222 struct symbol **parent) 223 struct symbol **parent)
223{ 224{
224 u8 cpumode = PERF_RECORD_MISC_USER; 225 u8 cpumode = PERF_RECORD_MISC_USER;
225 unsigned int i; 226 unsigned int i;
@@ -252,7 +253,7 @@ int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel
252 253
253 al.filtered = false; 254 al.filtered = false;
254 thread__find_addr_location(thread, self, cpumode, 255 thread__find_addr_location(thread, self, cpumode,
255 MAP__FUNCTION, thread->pid, ip, &al, NULL); 256 MAP__FUNCTION, ip, &al, NULL);
256 if (al.sym != NULL) { 257 if (al.sym != NULL) {
257 if (sort__has_parent && !*parent && 258 if (sort__has_parent && !*parent &&
258 symbol__match_parent_regex(al.sym)) 259 symbol__match_parent_regex(al.sym))
@@ -270,14 +271,6 @@ int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel
270 return 0; 271 return 0;
271} 272}
272 273
273static int process_event_synth_stub(struct perf_event_ops *ops __used,
274 union perf_event *event __used,
275 struct perf_session *session __used)
276{
277 dump_printf(": unhandled!\n");
278 return 0;
279}
280
281static int process_event_synth_tracing_data_stub(union perf_event *event __used, 274static int process_event_synth_tracing_data_stub(union perf_event *event __used,
282 struct perf_session *session __used) 275 struct perf_session *session __used)
283{ 276{
@@ -296,7 +289,7 @@ static int process_event_sample_stub(struct perf_event_ops *ops __used,
296 union perf_event *event __used, 289 union perf_event *event __used,
297 struct perf_sample *sample __used, 290 struct perf_sample *sample __used,
298 struct perf_evsel *evsel __used, 291 struct perf_evsel *evsel __used,
299 struct perf_session *session __used) 292 struct machine *machine __used)
300{ 293{
301 dump_printf(": unhandled!\n"); 294 dump_printf(": unhandled!\n");
302 return 0; 295 return 0;
@@ -305,7 +298,7 @@ static int process_event_sample_stub(struct perf_event_ops *ops __used,
305static int process_event_stub(struct perf_event_ops *ops __used, 298static int process_event_stub(struct perf_event_ops *ops __used,
306 union perf_event *event __used, 299 union perf_event *event __used,
307 struct perf_sample *sample __used, 300 struct perf_sample *sample __used,
308 struct perf_session *session __used) 301 struct machine *machine __used)
309{ 302{
310 dump_printf(": unhandled!\n"); 303 dump_printf(": unhandled!\n");
311 return 0; 304 return 0;
@@ -313,7 +306,14 @@ static int process_event_stub(struct perf_event_ops *ops __used,
313 306
314static int process_finished_round_stub(struct perf_event_ops *ops __used, 307static int process_finished_round_stub(struct perf_event_ops *ops __used,
315 union perf_event *event __used, 308 union perf_event *event __used,
316 struct perf_session *session __used) 309 struct perf_session *perf_session __used)
310{
311 dump_printf(": unhandled!\n");
312 return 0;
313}
314
315static int process_event_type_stub(struct perf_event_ops *ops __used,
316 union perf_event *event __used)
317{ 317{
318 dump_printf(": unhandled!\n"); 318 dump_printf(": unhandled!\n");
319 return 0; 319 return 0;
@@ -338,7 +338,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
338 if (handler->lost == NULL) 338 if (handler->lost == NULL)
339 handler->lost = perf_event__process_lost; 339 handler->lost = perf_event__process_lost;
340 if (handler->read == NULL) 340 if (handler->read == NULL)
341 handler->read = process_event_stub; 341 handler->read = process_event_sample_stub;
342 if (handler->throttle == NULL) 342 if (handler->throttle == NULL)
343 handler->throttle = process_event_stub; 343 handler->throttle = process_event_stub;
344 if (handler->unthrottle == NULL) 344 if (handler->unthrottle == NULL)
@@ -346,11 +346,11 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
346 if (handler->attr == NULL) 346 if (handler->attr == NULL)
347 handler->attr = process_event_synth_attr_stub; 347 handler->attr = process_event_synth_attr_stub;
348 if (handler->event_type == NULL) 348 if (handler->event_type == NULL)
349 handler->event_type = process_event_synth_stub; 349 handler->event_type = process_event_type_stub;
350 if (handler->tracing_data == NULL) 350 if (handler->tracing_data == NULL)
351 handler->tracing_data = process_event_synth_tracing_data_stub; 351 handler->tracing_data = process_event_synth_tracing_data_stub;
352 if (handler->build_id == NULL) 352 if (handler->build_id == NULL)
353 handler->build_id = process_event_synth_stub; 353 handler->build_id = process_finished_round_stub;
354 if (handler->finished_round == NULL) { 354 if (handler->finished_round == NULL) {
355 if (handler->ordered_samples) 355 if (handler->ordered_samples)
356 handler->finished_round = process_finished_round; 356 handler->finished_round = process_finished_round;
@@ -734,6 +734,18 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
734 callchain__printf(sample); 734 callchain__printf(sample);
735} 735}
736 736
737static struct machine *
738 perf_session__find_machine_for_cpumode(struct perf_session *session,
739 union perf_event *event)
740{
741 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
742
743 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
744 return perf_session__find_machine(session, event->ip.pid);
745
746 return perf_session__find_host_machine(session);
747}
748
737static int perf_session_deliver_event(struct perf_session *session, 749static int perf_session_deliver_event(struct perf_session *session,
738 union perf_event *event, 750 union perf_event *event,
739 struct perf_sample *sample, 751 struct perf_sample *sample,
@@ -741,6 +753,7 @@ static int perf_session_deliver_event(struct perf_session *session,
741 u64 file_offset) 753 u64 file_offset)
742{ 754{
743 struct perf_evsel *evsel; 755 struct perf_evsel *evsel;
756 struct machine *machine;
744 757
745 dump_event(session, event, file_offset, sample); 758 dump_event(session, event, file_offset, sample);
746 759
@@ -762,6 +775,8 @@ static int perf_session_deliver_event(struct perf_session *session,
762 hists__inc_nr_events(&evsel->hists, event->header.type); 775 hists__inc_nr_events(&evsel->hists, event->header.type);
763 } 776 }
764 777
778 machine = perf_session__find_machine_for_cpumode(session, event);
779
765 switch (event->header.type) { 780 switch (event->header.type) {
766 case PERF_RECORD_SAMPLE: 781 case PERF_RECORD_SAMPLE:
767 dump_sample(session, event, sample); 782 dump_sample(session, event, sample);
@@ -769,23 +784,25 @@ static int perf_session_deliver_event(struct perf_session *session,
769 ++session->hists.stats.nr_unknown_id; 784 ++session->hists.stats.nr_unknown_id;
770 return -1; 785 return -1;
771 } 786 }
772 return ops->sample(ops, event, sample, evsel, session); 787 return ops->sample(ops, event, sample, evsel, machine);
773 case PERF_RECORD_MMAP: 788 case PERF_RECORD_MMAP:
774 return ops->mmap(ops, event, sample, session); 789 return ops->mmap(ops, event, sample, machine);
775 case PERF_RECORD_COMM: 790 case PERF_RECORD_COMM:
776 return ops->comm(ops, event, sample, session); 791 return ops->comm(ops, event, sample, machine);
777 case PERF_RECORD_FORK: 792 case PERF_RECORD_FORK:
778 return ops->fork(ops, event, sample, session); 793 return ops->fork(ops, event, sample, machine);
779 case PERF_RECORD_EXIT: 794 case PERF_RECORD_EXIT:
780 return ops->exit(ops, event, sample, session); 795 return ops->exit(ops, event, sample, machine);
781 case PERF_RECORD_LOST: 796 case PERF_RECORD_LOST:
782 return ops->lost(ops, event, sample, session); 797 if (ops->lost == perf_event__process_lost)
798 session->hists.stats.total_lost += event->lost.lost;
799 return ops->lost(ops, event, sample, machine);
783 case PERF_RECORD_READ: 800 case PERF_RECORD_READ:
784 return ops->read(ops, event, sample, session); 801 return ops->read(ops, event, sample, evsel, machine);
785 case PERF_RECORD_THROTTLE: 802 case PERF_RECORD_THROTTLE:
786 return ops->throttle(ops, event, sample, session); 803 return ops->throttle(ops, event, sample, machine);
787 case PERF_RECORD_UNTHROTTLE: 804 case PERF_RECORD_UNTHROTTLE:
788 return ops->unthrottle(ops, event, sample, session); 805 return ops->unthrottle(ops, event, sample, machine);
789 default: 806 default:
790 ++session->hists.stats.nr_unknown_events; 807 ++session->hists.stats.nr_unknown_events;
791 return -1; 808 return -1;
@@ -823,7 +840,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
823 perf_session__update_sample_type(session); 840 perf_session__update_sample_type(session);
824 return err; 841 return err;
825 case PERF_RECORD_HEADER_EVENT_TYPE: 842 case PERF_RECORD_HEADER_EVENT_TYPE:
826 return ops->event_type(ops, event, session); 843 return ops->event_type(ops, event);
827 case PERF_RECORD_HEADER_TRACING_DATA: 844 case PERF_RECORD_HEADER_TRACING_DATA:
828 /* setup for reading amidst mmap */ 845 /* setup for reading amidst mmap */
829 lseek(session->fd, file_offset, SEEK_SET); 846 lseek(session->fd, file_offset, SEEK_SET);
@@ -1170,9 +1187,8 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg)
1170 return true; 1187 return true;
1171} 1188}
1172 1189
1173int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, 1190int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1174 const char *symbol_name, 1191 const char *symbol_name, u64 addr)
1175 u64 addr)
1176{ 1192{
1177 char *bracket; 1193 char *bracket;
1178 enum map_type i; 1194 enum map_type i;
@@ -1264,17 +1280,16 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1264 return NULL; 1280 return NULL;
1265} 1281}
1266 1282
1267void perf_session__print_ip(union perf_event *event, struct perf_evsel *evsel, 1283void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1268 struct perf_sample *sample, 1284 struct machine *machine, struct perf_evsel *evsel,
1269 struct perf_session *session, 1285 int print_sym, int print_dso)
1270 int print_sym, int print_dso)
1271{ 1286{
1272 struct addr_location al; 1287 struct addr_location al;
1273 const char *symname, *dsoname; 1288 const char *symname, *dsoname;
1274 struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; 1289 struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
1275 struct callchain_cursor_node *node; 1290 struct callchain_cursor_node *node;
1276 1291
1277 if (perf_event__preprocess_sample(event, session, &al, sample, 1292 if (perf_event__preprocess_sample(event, machine, &al, sample,
1278 NULL) < 0) { 1293 NULL) < 0) {
1279 error("problem processing %d event, skipping it.\n", 1294 error("problem processing %d event, skipping it.\n",
1280 event->header.type); 1295 event->header.type);
@@ -1283,7 +1298,7 @@ void perf_session__print_ip(union perf_event *event, struct perf_evsel *evsel,
1283 1298
1284 if (symbol_conf.use_callchain && sample->callchain) { 1299 if (symbol_conf.use_callchain && sample->callchain) {
1285 1300
1286 if (perf_session__resolve_callchain(session, evsel, al.thread, 1301 if (machine__resolve_callchain(machine, evsel, al.thread,
1287 sample->callchain, NULL) != 0) { 1302 sample->callchain, NULL) != 0) {
1288 if (verbose) 1303 if (verbose)
1289 error("Failed to resolve callchain. Skipping\n"); 1304 error("Failed to resolve callchain. Skipping\n");
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 6de3d1368900..1c5823c7d6dc 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -58,32 +58,34 @@ struct perf_event_ops;
58 58
59typedef int (*event_sample)(struct perf_event_ops *ops, 59typedef int (*event_sample)(struct perf_event_ops *ops,
60 union perf_event *event, struct perf_sample *sample, 60 union perf_event *event, struct perf_sample *sample,
61 struct perf_evsel *evsel, struct perf_session *session); 61 struct perf_evsel *evsel, struct machine *machine);
62typedef int (*event_op)(struct perf_event_ops *ops, union perf_event *event, 62typedef int (*event_op)(struct perf_event_ops *ops, union perf_event *event,
63 struct perf_sample *sample, 63 struct perf_sample *sample,
64 struct perf_session *session); 64 struct machine *machine);
65typedef int (*event_synth_op)(union perf_event *self, 65typedef int (*event_synth_op)(union perf_event *self,
66 struct perf_session *session); 66 struct perf_session *session);
67typedef int (*event_attr_op)(union perf_event *event, 67typedef int (*event_attr_op)(union perf_event *event,
68 struct perf_evlist **pevlist); 68 struct perf_evlist **pevlist);
69typedef int (*event_simple_op)(struct perf_event_ops *ops,
70 union perf_event *event);
69typedef int (*event_op2)(struct perf_event_ops *ops, union perf_event *event, 71typedef int (*event_op2)(struct perf_event_ops *ops, union perf_event *event,
70 struct perf_session *session); 72 struct perf_session *session);
71 73
72struct perf_event_ops { 74struct perf_event_ops {
73 event_sample sample; 75 event_sample sample,
76 read;
74 event_op mmap, 77 event_op mmap,
75 comm, 78 comm,
76 fork, 79 fork,
77 exit, 80 exit,
78 lost, 81 lost,
79 read,
80 throttle, 82 throttle,
81 unthrottle; 83 unthrottle;
82 event_attr_op attr; 84 event_attr_op attr;
83 event_synth_op tracing_data; 85 event_synth_op tracing_data;
84 event_op2 event_type, 86 event_simple_op event_type;
85 build_id, 87 event_op2 finished_round,
86 finished_round; 88 build_id;
87 bool ordered_samples; 89 bool ordered_samples;
88 bool ordering_requires_timestamps; 90 bool ordering_requires_timestamps;
89}; 91};
@@ -108,10 +110,6 @@ int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel
108 110
109bool perf_session__has_traces(struct perf_session *self, const char *msg); 111bool perf_session__has_traces(struct perf_session *self, const char *msg);
110 112
111int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
112 const char *symbol_name,
113 u64 addr);
114
115void mem_bswap_64(void *src, int byte_size); 113void mem_bswap_64(void *src, int byte_size);
116void perf_event__attr_swap(struct perf_event_attr *attr); 114void perf_event__attr_swap(struct perf_event_attr *attr);
117 115
@@ -151,6 +149,9 @@ void perf_session__process_machines(struct perf_session *self,
151 return machines__process(&self->machines, process, ops); 149 return machines__process(&self->machines, process, ops);
152} 150}
153 151
152struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
153size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
154
154size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); 155size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
155 156
156size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, 157size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
@@ -171,10 +172,9 @@ static inline int perf_session__parse_sample(struct perf_session *session,
171struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 172struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
172 unsigned int type); 173 unsigned int type);
173 174
174void perf_session__print_ip(union perf_event *event, struct perf_evsel *evsel, 175void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
175 struct perf_sample *sample, 176 struct machine *machine, struct perf_evsel *evsel,
176 struct perf_session *session, 177 int print_sym, int print_dso);
177 int print_sym, int print_dso);
178 178
179int perf_session__cpu_bitmap(struct perf_session *session, 179int perf_session__cpu_bitmap(struct perf_session *session,
180 const char *cpu_list, unsigned long *cpu_bitmap); 180 const char *cpu_list, unsigned long *cpu_bitmap);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index e5f2401c1b5e..70c2c13ff679 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -18,16 +18,14 @@ struct thread {
18 int comm_len; 18 int comm_len;
19}; 19};
20 20
21struct perf_session; 21struct machine;
22 22
23void thread__delete(struct thread *self); 23void thread__delete(struct thread *self);
24 24
25int thread__set_comm(struct thread *self, const char *comm); 25int thread__set_comm(struct thread *self, const char *comm);
26int thread__comm_len(struct thread *self); 26int thread__comm_len(struct thread *self);
27struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
28void thread__insert_map(struct thread *self, struct map *map); 27void thread__insert_map(struct thread *self, struct map *map);
29int thread__fork(struct thread *self, struct thread *parent); 28int thread__fork(struct thread *self, struct thread *parent);
30size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
31 29
32static inline struct map *thread__find_map(struct thread *self, 30static inline struct map *thread__find_map(struct thread *self,
33 enum map_type type, u64 addr) 31 enum map_type type, u64 addr)
@@ -35,14 +33,12 @@ static inline struct map *thread__find_map(struct thread *self,
35 return self ? map_groups__find(&self->mg, type, addr) : NULL; 33 return self ? map_groups__find(&self->mg, type, addr) : NULL;
36} 34}
37 35
38void thread__find_addr_map(struct thread *self, 36void thread__find_addr_map(struct thread *thread, struct machine *machine,
39 struct perf_session *session, u8 cpumode, 37 u8 cpumode, enum map_type type, u64 addr,
40 enum map_type type, pid_t pid, u64 addr,
41 struct addr_location *al); 38 struct addr_location *al);
42 39
43void thread__find_addr_location(struct thread *self, 40void thread__find_addr_location(struct thread *thread, struct machine *machine,
44 struct perf_session *session, u8 cpumode, 41 u8 cpumode, enum map_type type, u64 addr,
45 enum map_type type, pid_t pid, u64 addr,
46 struct addr_location *al, 42 struct addr_location *al,
47 symbol_filter_t filter); 43 symbol_filter_t filter);
48#endif /* __PERF_THREAD_H */ 44#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index c9dcbec7d800..a3fdf55f317b 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -39,7 +39,7 @@ static int stop_script_unsupported(void)
39static void process_event_unsupported(union perf_event *event __unused, 39static void process_event_unsupported(union perf_event *event __unused,
40 struct perf_sample *sample __unused, 40 struct perf_sample *sample __unused,
41 struct perf_evsel *evsel __unused, 41 struct perf_evsel *evsel __unused,
42 struct perf_session *session __unused, 42 struct machine *machine __unused,
43 struct thread *thread __unused) 43 struct thread *thread __unused)
44{ 44{
45} 45}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index a84100817649..58ae14c5baac 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -3,7 +3,11 @@
3 3
4#include <stdbool.h> 4#include <stdbool.h>
5#include "parse-events.h" 5#include "parse-events.h"
6#include "session.h" 6
7struct machine;
8struct perf_sample;
9union perf_event;
10struct thread;
7 11
8#define __unused __attribute__((unused)) 12#define __unused __attribute__((unused))
9 13
@@ -292,7 +296,7 @@ struct scripting_ops {
292 void (*process_event) (union perf_event *event, 296 void (*process_event) (union perf_event *event,
293 struct perf_sample *sample, 297 struct perf_sample *sample,
294 struct perf_evsel *evsel, 298 struct perf_evsel *evsel,
295 struct perf_session *session, 299 struct machine *machine,
296 struct thread *thread); 300 struct thread *thread);
297 int (*generate_script) (const char *outfile); 301 int (*generate_script) (const char *outfile);
298}; 302};