aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/session.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/session.c')
-rw-r--r--tools/perf/util/session.c326
1 files changed, 185 insertions, 141 deletions
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0f4555ce9063..d9318d8a9ba1 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -10,6 +10,7 @@
10#include "evlist.h" 10#include "evlist.h"
11#include "evsel.h" 11#include "evsel.h"
12#include "session.h" 12#include "session.h"
13#include "tool.h"
13#include "sort.h" 14#include "sort.h"
14#include "util.h" 15#include "util.h"
15#include "cpumap.h" 16#include "cpumap.h"
@@ -78,39 +79,13 @@ out_close:
78 return -1; 79 return -1;
79} 80}
80 81
81static void perf_session__id_header_size(struct perf_session *session)
82{
83 struct perf_sample *data;
84 u64 sample_type = session->sample_type;
85 u16 size = 0;
86
87 if (!session->sample_id_all)
88 goto out;
89
90 if (sample_type & PERF_SAMPLE_TID)
91 size += sizeof(data->tid) * 2;
92
93 if (sample_type & PERF_SAMPLE_TIME)
94 size += sizeof(data->time);
95
96 if (sample_type & PERF_SAMPLE_ID)
97 size += sizeof(data->id);
98
99 if (sample_type & PERF_SAMPLE_STREAM_ID)
100 size += sizeof(data->stream_id);
101
102 if (sample_type & PERF_SAMPLE_CPU)
103 size += sizeof(data->cpu) * 2;
104out:
105 session->id_hdr_size = size;
106}
107
108void perf_session__update_sample_type(struct perf_session *self) 82void perf_session__update_sample_type(struct perf_session *self)
109{ 83{
110 self->sample_type = perf_evlist__sample_type(self->evlist); 84 self->sample_type = perf_evlist__sample_type(self->evlist);
111 self->sample_size = __perf_evsel__sample_size(self->sample_type); 85 self->sample_size = __perf_evsel__sample_size(self->sample_type);
112 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 86 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
113 perf_session__id_header_size(self); 87 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
88 self->host_machine.id_hdr_size = self->id_hdr_size;
114} 89}
115 90
116int perf_session__create_kernel_maps(struct perf_session *self) 91int perf_session__create_kernel_maps(struct perf_session *self)
@@ -130,7 +105,7 @@ static void perf_session__destroy_kernel_maps(struct perf_session *self)
130 105
131struct perf_session *perf_session__new(const char *filename, int mode, 106struct perf_session *perf_session__new(const char *filename, int mode,
132 bool force, bool repipe, 107 bool force, bool repipe,
133 struct perf_event_ops *ops) 108 struct perf_tool *tool)
134{ 109{
135 size_t len = filename ? strlen(filename) + 1 : 0; 110 size_t len = filename ? strlen(filename) + 1 : 0;
136 struct perf_session *self = zalloc(sizeof(*self) + len); 111 struct perf_session *self = zalloc(sizeof(*self) + len);
@@ -139,9 +114,6 @@ struct perf_session *perf_session__new(const char *filename, int mode,
139 goto out; 114 goto out;
140 115
141 memcpy(self->filename, filename, len); 116 memcpy(self->filename, filename, len);
142 self->threads = RB_ROOT;
143 INIT_LIST_HEAD(&self->dead_threads);
144 self->last_match = NULL;
145 /* 117 /*
146 * On 64bit we can mmap the data file in one go. No need for tiny mmap 118 * On 64bit we can mmap the data file in one go. No need for tiny mmap
147 * slices. On 32bit we use 32MB. 119 * slices. On 32bit we use 32MB.
@@ -171,10 +143,10 @@ struct perf_session *perf_session__new(const char *filename, int mode,
171 goto out_delete; 143 goto out_delete;
172 } 144 }
173 145
174 if (ops && ops->ordering_requires_timestamps && 146 if (tool && tool->ordering_requires_timestamps &&
175 ops->ordered_samples && !self->sample_id_all) { 147 tool->ordered_samples && !self->sample_id_all) {
176 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 148 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
177 ops->ordered_samples = false; 149 tool->ordered_samples = false;
178 } 150 }
179 151
180out: 152out:
@@ -184,17 +156,22 @@ out_delete:
184 return NULL; 156 return NULL;
185} 157}
186 158
187static void perf_session__delete_dead_threads(struct perf_session *self) 159static void machine__delete_dead_threads(struct machine *machine)
188{ 160{
189 struct thread *n, *t; 161 struct thread *n, *t;
190 162
191 list_for_each_entry_safe(t, n, &self->dead_threads, node) { 163 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
192 list_del(&t->node); 164 list_del(&t->node);
193 thread__delete(t); 165 thread__delete(t);
194 } 166 }
195} 167}
196 168
197static void perf_session__delete_threads(struct perf_session *self) 169static void perf_session__delete_dead_threads(struct perf_session *session)
170{
171 machine__delete_dead_threads(&session->host_machine);
172}
173
174static void machine__delete_threads(struct machine *self)
198{ 175{
199 struct rb_node *nd = rb_first(&self->threads); 176 struct rb_node *nd = rb_first(&self->threads);
200 177
@@ -207,6 +184,11 @@ static void perf_session__delete_threads(struct perf_session *self)
207 } 184 }
208} 185}
209 186
187static void perf_session__delete_threads(struct perf_session *session)
188{
189 machine__delete_threads(&session->host_machine);
190}
191
210void perf_session__delete(struct perf_session *self) 192void perf_session__delete(struct perf_session *self)
211{ 193{
212 perf_session__destroy_kernel_maps(self); 194 perf_session__destroy_kernel_maps(self);
@@ -217,7 +199,7 @@ void perf_session__delete(struct perf_session *self)
217 free(self); 199 free(self);
218} 200}
219 201
220void perf_session__remove_thread(struct perf_session *self, struct thread *th) 202void machine__remove_thread(struct machine *self, struct thread *th)
221{ 203{
222 self->last_match = NULL; 204 self->last_match = NULL;
223 rb_erase(&th->rb_node, &self->threads); 205 rb_erase(&th->rb_node, &self->threads);
@@ -236,16 +218,16 @@ static bool symbol__match_parent_regex(struct symbol *sym)
236 return 0; 218 return 0;
237} 219}
238 220
239int perf_session__resolve_callchain(struct perf_session *self, 221int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
240 struct thread *thread, 222 struct thread *thread,
241 struct ip_callchain *chain, 223 struct ip_callchain *chain,
242 struct symbol **parent) 224 struct symbol **parent)
243{ 225{
244 u8 cpumode = PERF_RECORD_MISC_USER; 226 u8 cpumode = PERF_RECORD_MISC_USER;
245 unsigned int i; 227 unsigned int i;
246 int err; 228 int err;
247 229
248 callchain_cursor_reset(&self->callchain_cursor); 230 callchain_cursor_reset(&evsel->hists.callchain_cursor);
249 231
250 for (i = 0; i < chain->nr; i++) { 232 for (i = 0; i < chain->nr; i++) {
251 u64 ip; 233 u64 ip;
@@ -272,7 +254,7 @@ int perf_session__resolve_callchain(struct perf_session *self,
272 254
273 al.filtered = false; 255 al.filtered = false;
274 thread__find_addr_location(thread, self, cpumode, 256 thread__find_addr_location(thread, self, cpumode,
275 MAP__FUNCTION, thread->pid, ip, &al, NULL); 257 MAP__FUNCTION, ip, &al, NULL);
276 if (al.sym != NULL) { 258 if (al.sym != NULL) {
277 if (sort__has_parent && !*parent && 259 if (sort__has_parent && !*parent &&
278 symbol__match_parent_regex(al.sym)) 260 symbol__match_parent_regex(al.sym))
@@ -281,7 +263,7 @@ int perf_session__resolve_callchain(struct perf_session *self,
281 break; 263 break;
282 } 264 }
283 265
284 err = callchain_cursor_append(&self->callchain_cursor, 266 err = callchain_cursor_append(&evsel->hists.callchain_cursor,
285 ip, al.map, al.sym); 267 ip, al.map, al.sym);
286 if (err) 268 if (err)
287 return err; 269 return err;
@@ -290,75 +272,91 @@ int perf_session__resolve_callchain(struct perf_session *self,
290 return 0; 272 return 0;
291} 273}
292 274
293static int process_event_synth_stub(union perf_event *event __used, 275static int process_event_synth_tracing_data_stub(union perf_event *event __used,
294 struct perf_session *session __used) 276 struct perf_session *session __used)
277{
278 dump_printf(": unhandled!\n");
279 return 0;
280}
281
282static int process_event_synth_attr_stub(union perf_event *event __used,
283 struct perf_evlist **pevlist __used)
295{ 284{
296 dump_printf(": unhandled!\n"); 285 dump_printf(": unhandled!\n");
297 return 0; 286 return 0;
298} 287}
299 288
300static int process_event_sample_stub(union perf_event *event __used, 289static int process_event_sample_stub(struct perf_tool *tool __used,
290 union perf_event *event __used,
301 struct perf_sample *sample __used, 291 struct perf_sample *sample __used,
302 struct perf_evsel *evsel __used, 292 struct perf_evsel *evsel __used,
303 struct perf_session *session __used) 293 struct machine *machine __used)
304{ 294{
305 dump_printf(": unhandled!\n"); 295 dump_printf(": unhandled!\n");
306 return 0; 296 return 0;
307} 297}
308 298
309static int process_event_stub(union perf_event *event __used, 299static int process_event_stub(struct perf_tool *tool __used,
300 union perf_event *event __used,
310 struct perf_sample *sample __used, 301 struct perf_sample *sample __used,
311 struct perf_session *session __used) 302 struct machine *machine __used)
303{
304 dump_printf(": unhandled!\n");
305 return 0;
306}
307
308static int process_finished_round_stub(struct perf_tool *tool __used,
309 union perf_event *event __used,
310 struct perf_session *perf_session __used)
312{ 311{
313 dump_printf(": unhandled!\n"); 312 dump_printf(": unhandled!\n");
314 return 0; 313 return 0;
315} 314}
316 315
317static int process_finished_round_stub(union perf_event *event __used, 316static int process_event_type_stub(struct perf_tool *tool __used,
318 struct perf_session *session __used, 317 union perf_event *event __used)
319 struct perf_event_ops *ops __used)
320{ 318{
321 dump_printf(": unhandled!\n"); 319 dump_printf(": unhandled!\n");
322 return 0; 320 return 0;
323} 321}
324 322
325static int process_finished_round(union perf_event *event, 323static int process_finished_round(struct perf_tool *tool,
326 struct perf_session *session, 324 union perf_event *event,
327 struct perf_event_ops *ops); 325 struct perf_session *session);
328 326
329static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 327static void perf_tool__fill_defaults(struct perf_tool *tool)
330{ 328{
331 if (handler->sample == NULL) 329 if (tool->sample == NULL)
332 handler->sample = process_event_sample_stub; 330 tool->sample = process_event_sample_stub;
333 if (handler->mmap == NULL) 331 if (tool->mmap == NULL)
334 handler->mmap = process_event_stub; 332 tool->mmap = process_event_stub;
335 if (handler->comm == NULL) 333 if (tool->comm == NULL)
336 handler->comm = process_event_stub; 334 tool->comm = process_event_stub;
337 if (handler->fork == NULL) 335 if (tool->fork == NULL)
338 handler->fork = process_event_stub; 336 tool->fork = process_event_stub;
339 if (handler->exit == NULL) 337 if (tool->exit == NULL)
340 handler->exit = process_event_stub; 338 tool->exit = process_event_stub;
341 if (handler->lost == NULL) 339 if (tool->lost == NULL)
342 handler->lost = perf_event__process_lost; 340 tool->lost = perf_event__process_lost;
343 if (handler->read == NULL) 341 if (tool->read == NULL)
344 handler->read = process_event_stub; 342 tool->read = process_event_sample_stub;
345 if (handler->throttle == NULL) 343 if (tool->throttle == NULL)
346 handler->throttle = process_event_stub; 344 tool->throttle = process_event_stub;
347 if (handler->unthrottle == NULL) 345 if (tool->unthrottle == NULL)
348 handler->unthrottle = process_event_stub; 346 tool->unthrottle = process_event_stub;
349 if (handler->attr == NULL) 347 if (tool->attr == NULL)
350 handler->attr = process_event_synth_stub; 348 tool->attr = process_event_synth_attr_stub;
351 if (handler->event_type == NULL) 349 if (tool->event_type == NULL)
352 handler->event_type = process_event_synth_stub; 350 tool->event_type = process_event_type_stub;
353 if (handler->tracing_data == NULL) 351 if (tool->tracing_data == NULL)
354 handler->tracing_data = process_event_synth_stub; 352 tool->tracing_data = process_event_synth_tracing_data_stub;
355 if (handler->build_id == NULL) 353 if (tool->build_id == NULL)
356 handler->build_id = process_event_synth_stub; 354 tool->build_id = process_finished_round_stub;
357 if (handler->finished_round == NULL) { 355 if (tool->finished_round == NULL) {
358 if (handler->ordered_samples) 356 if (tool->ordered_samples)
359 handler->finished_round = process_finished_round; 357 tool->finished_round = process_finished_round;
360 else 358 else
361 handler->finished_round = process_finished_round_stub; 359 tool->finished_round = process_finished_round_stub;
362 } 360 }
363} 361}
364 362
@@ -490,11 +488,11 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
490static int perf_session_deliver_event(struct perf_session *session, 488static int perf_session_deliver_event(struct perf_session *session,
491 union perf_event *event, 489 union perf_event *event,
492 struct perf_sample *sample, 490 struct perf_sample *sample,
493 struct perf_event_ops *ops, 491 struct perf_tool *tool,
494 u64 file_offset); 492 u64 file_offset);
495 493
496static void flush_sample_queue(struct perf_session *s, 494static void flush_sample_queue(struct perf_session *s,
497 struct perf_event_ops *ops) 495 struct perf_tool *tool)
498{ 496{
499 struct ordered_samples *os = &s->ordered_samples; 497 struct ordered_samples *os = &s->ordered_samples;
500 struct list_head *head = &os->samples; 498 struct list_head *head = &os->samples;
@@ -505,7 +503,7 @@ static void flush_sample_queue(struct perf_session *s,
505 unsigned idx = 0, progress_next = os->nr_samples / 16; 503 unsigned idx = 0, progress_next = os->nr_samples / 16;
506 int ret; 504 int ret;
507 505
508 if (!ops->ordered_samples || !limit) 506 if (!tool->ordered_samples || !limit)
509 return; 507 return;
510 508
511 list_for_each_entry_safe(iter, tmp, head, list) { 509 list_for_each_entry_safe(iter, tmp, head, list) {
@@ -516,7 +514,7 @@ static void flush_sample_queue(struct perf_session *s,
516 if (ret) 514 if (ret)
517 pr_err("Can't parse sample, err = %d\n", ret); 515 pr_err("Can't parse sample, err = %d\n", ret);
518 else 516 else
519 perf_session_deliver_event(s, iter->event, &sample, ops, 517 perf_session_deliver_event(s, iter->event, &sample, tool,
520 iter->file_offset); 518 iter->file_offset);
521 519
522 os->last_flush = iter->timestamp; 520 os->last_flush = iter->timestamp;
@@ -578,11 +576,11 @@ static void flush_sample_queue(struct perf_session *s,
578 * Flush every events below timestamp 7 576 * Flush every events below timestamp 7
579 * etc... 577 * etc...
580 */ 578 */
581static int process_finished_round(union perf_event *event __used, 579static int process_finished_round(struct perf_tool *tool,
582 struct perf_session *session, 580 union perf_event *event __used,
583 struct perf_event_ops *ops) 581 struct perf_session *session)
584{ 582{
585 flush_sample_queue(session, ops); 583 flush_sample_queue(session, tool);
586 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 584 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
587 585
588 return 0; 586 return 0;
@@ -737,13 +735,26 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
737 callchain__printf(sample); 735 callchain__printf(sample);
738} 736}
739 737
738static struct machine *
739 perf_session__find_machine_for_cpumode(struct perf_session *session,
740 union perf_event *event)
741{
742 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
743
744 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
745 return perf_session__find_machine(session, event->ip.pid);
746
747 return perf_session__find_host_machine(session);
748}
749
740static int perf_session_deliver_event(struct perf_session *session, 750static int perf_session_deliver_event(struct perf_session *session,
741 union perf_event *event, 751 union perf_event *event,
742 struct perf_sample *sample, 752 struct perf_sample *sample,
743 struct perf_event_ops *ops, 753 struct perf_tool *tool,
744 u64 file_offset) 754 u64 file_offset)
745{ 755{
746 struct perf_evsel *evsel; 756 struct perf_evsel *evsel;
757 struct machine *machine;
747 758
748 dump_event(session, event, file_offset, sample); 759 dump_event(session, event, file_offset, sample);
749 760
@@ -765,6 +776,8 @@ static int perf_session_deliver_event(struct perf_session *session,
765 hists__inc_nr_events(&evsel->hists, event->header.type); 776 hists__inc_nr_events(&evsel->hists, event->header.type);
766 } 777 }
767 778
779 machine = perf_session__find_machine_for_cpumode(session, event);
780
768 switch (event->header.type) { 781 switch (event->header.type) {
769 case PERF_RECORD_SAMPLE: 782 case PERF_RECORD_SAMPLE:
770 dump_sample(session, event, sample); 783 dump_sample(session, event, sample);
@@ -772,23 +785,25 @@ static int perf_session_deliver_event(struct perf_session *session,
772 ++session->hists.stats.nr_unknown_id; 785 ++session->hists.stats.nr_unknown_id;
773 return -1; 786 return -1;
774 } 787 }
775 return ops->sample(event, sample, evsel, session); 788 return tool->sample(tool, event, sample, evsel, machine);
776 case PERF_RECORD_MMAP: 789 case PERF_RECORD_MMAP:
777 return ops->mmap(event, sample, session); 790 return tool->mmap(tool, event, sample, machine);
778 case PERF_RECORD_COMM: 791 case PERF_RECORD_COMM:
779 return ops->comm(event, sample, session); 792 return tool->comm(tool, event, sample, machine);
780 case PERF_RECORD_FORK: 793 case PERF_RECORD_FORK:
781 return ops->fork(event, sample, session); 794 return tool->fork(tool, event, sample, machine);
782 case PERF_RECORD_EXIT: 795 case PERF_RECORD_EXIT:
783 return ops->exit(event, sample, session); 796 return tool->exit(tool, event, sample, machine);
784 case PERF_RECORD_LOST: 797 case PERF_RECORD_LOST:
785 return ops->lost(event, sample, session); 798 if (tool->lost == perf_event__process_lost)
799 session->hists.stats.total_lost += event->lost.lost;
800 return tool->lost(tool, event, sample, machine);
786 case PERF_RECORD_READ: 801 case PERF_RECORD_READ:
787 return ops->read(event, sample, session); 802 return tool->read(tool, event, sample, evsel, machine);
788 case PERF_RECORD_THROTTLE: 803 case PERF_RECORD_THROTTLE:
789 return ops->throttle(event, sample, session); 804 return tool->throttle(tool, event, sample, machine);
790 case PERF_RECORD_UNTHROTTLE: 805 case PERF_RECORD_UNTHROTTLE:
791 return ops->unthrottle(event, sample, session); 806 return tool->unthrottle(tool, event, sample, machine);
792 default: 807 default:
793 ++session->hists.stats.nr_unknown_events; 808 ++session->hists.stats.nr_unknown_events;
794 return -1; 809 return -1;
@@ -812,24 +827,29 @@ static int perf_session__preprocess_sample(struct perf_session *session,
812} 827}
813 828
814static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 829static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
815 struct perf_event_ops *ops, u64 file_offset) 830 struct perf_tool *tool, u64 file_offset)
816{ 831{
832 int err;
833
817 dump_event(session, event, file_offset, NULL); 834 dump_event(session, event, file_offset, NULL);
818 835
819 /* These events are processed right away */ 836 /* These events are processed right away */
820 switch (event->header.type) { 837 switch (event->header.type) {
821 case PERF_RECORD_HEADER_ATTR: 838 case PERF_RECORD_HEADER_ATTR:
822 return ops->attr(event, session); 839 err = tool->attr(event, &session->evlist);
840 if (err == 0)
841 perf_session__update_sample_type(session);
842 return err;
823 case PERF_RECORD_HEADER_EVENT_TYPE: 843 case PERF_RECORD_HEADER_EVENT_TYPE:
824 return ops->event_type(event, session); 844 return tool->event_type(tool, event);
825 case PERF_RECORD_HEADER_TRACING_DATA: 845 case PERF_RECORD_HEADER_TRACING_DATA:
826 /* setup for reading amidst mmap */ 846 /* setup for reading amidst mmap */
827 lseek(session->fd, file_offset, SEEK_SET); 847 lseek(session->fd, file_offset, SEEK_SET);
828 return ops->tracing_data(event, session); 848 return tool->tracing_data(event, session);
829 case PERF_RECORD_HEADER_BUILD_ID: 849 case PERF_RECORD_HEADER_BUILD_ID:
830 return ops->build_id(event, session); 850 return tool->build_id(tool, event, session);
831 case PERF_RECORD_FINISHED_ROUND: 851 case PERF_RECORD_FINISHED_ROUND:
832 return ops->finished_round(event, session, ops); 852 return tool->finished_round(tool, event, session);
833 default: 853 default:
834 return -EINVAL; 854 return -EINVAL;
835 } 855 }
@@ -837,7 +857,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
837 857
838static int perf_session__process_event(struct perf_session *session, 858static int perf_session__process_event(struct perf_session *session,
839 union perf_event *event, 859 union perf_event *event,
840 struct perf_event_ops *ops, 860 struct perf_tool *tool,
841 u64 file_offset) 861 u64 file_offset)
842{ 862{
843 struct perf_sample sample; 863 struct perf_sample sample;
@@ -853,7 +873,7 @@ static int perf_session__process_event(struct perf_session *session,
853 hists__inc_nr_events(&session->hists, event->header.type); 873 hists__inc_nr_events(&session->hists, event->header.type);
854 874
855 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 875 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
856 return perf_session__process_user_event(session, event, ops, file_offset); 876 return perf_session__process_user_event(session, event, tool, file_offset);
857 877
858 /* 878 /*
859 * For all kernel events we get the sample data 879 * For all kernel events we get the sample data
@@ -866,14 +886,14 @@ static int perf_session__process_event(struct perf_session *session,
866 if (perf_session__preprocess_sample(session, event, &sample)) 886 if (perf_session__preprocess_sample(session, event, &sample))
867 return 0; 887 return 0;
868 888
869 if (ops->ordered_samples) { 889 if (tool->ordered_samples) {
870 ret = perf_session_queue_event(session, event, &sample, 890 ret = perf_session_queue_event(session, event, &sample,
871 file_offset); 891 file_offset);
872 if (ret != -ETIME) 892 if (ret != -ETIME)
873 return ret; 893 return ret;
874 } 894 }
875 895
876 return perf_session_deliver_event(session, event, &sample, ops, 896 return perf_session_deliver_event(session, event, &sample, tool,
877 file_offset); 897 file_offset);
878} 898}
879 899
@@ -884,6 +904,11 @@ void perf_event_header__bswap(struct perf_event_header *self)
884 self->size = bswap_16(self->size); 904 self->size = bswap_16(self->size);
885} 905}
886 906
907struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
908{
909 return machine__findnew_thread(&session->host_machine, pid);
910}
911
887static struct thread *perf_session__register_idle_thread(struct perf_session *self) 912static struct thread *perf_session__register_idle_thread(struct perf_session *self)
888{ 913{
889 struct thread *thread = perf_session__findnew(self, 0); 914 struct thread *thread = perf_session__findnew(self, 0);
@@ -897,9 +922,9 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
897} 922}
898 923
899static void perf_session__warn_about_errors(const struct perf_session *session, 924static void perf_session__warn_about_errors(const struct perf_session *session,
900 const struct perf_event_ops *ops) 925 const struct perf_tool *tool)
901{ 926{
902 if (ops->lost == perf_event__process_lost && 927 if (tool->lost == perf_event__process_lost &&
903 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { 928 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
904 ui__warning("Processed %d events and lost %d chunks!\n\n" 929 ui__warning("Processed %d events and lost %d chunks!\n\n"
905 "Check IO/CPU overload!\n\n", 930 "Check IO/CPU overload!\n\n",
@@ -934,7 +959,7 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
934volatile int session_done; 959volatile int session_done;
935 960
936static int __perf_session__process_pipe_events(struct perf_session *self, 961static int __perf_session__process_pipe_events(struct perf_session *self,
937 struct perf_event_ops *ops) 962 struct perf_tool *tool)
938{ 963{
939 union perf_event event; 964 union perf_event event;
940 uint32_t size; 965 uint32_t size;
@@ -943,7 +968,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self,
943 int err; 968 int err;
944 void *p; 969 void *p;
945 970
946 perf_event_ops__fill_defaults(ops); 971 perf_tool__fill_defaults(tool);
947 972
948 head = 0; 973 head = 0;
949more: 974more:
@@ -980,7 +1005,7 @@ more:
980 } 1005 }
981 1006
982 if (size == 0 || 1007 if (size == 0 ||
983 (skip = perf_session__process_event(self, &event, ops, head)) < 0) { 1008 (skip = perf_session__process_event(self, &event, tool, head)) < 0) {
984 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", 1009 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
985 head, event.header.size, event.header.type); 1010 head, event.header.size, event.header.type);
986 /* 1011 /*
@@ -1003,7 +1028,7 @@ more:
1003done: 1028done:
1004 err = 0; 1029 err = 0;
1005out_err: 1030out_err:
1006 perf_session__warn_about_errors(self, ops); 1031 perf_session__warn_about_errors(self, tool);
1007 perf_session_free_sample_buffers(self); 1032 perf_session_free_sample_buffers(self);
1008 return err; 1033 return err;
1009} 1034}
@@ -1034,7 +1059,7 @@ fetch_mmaped_event(struct perf_session *session,
1034 1059
1035int __perf_session__process_events(struct perf_session *session, 1060int __perf_session__process_events(struct perf_session *session,
1036 u64 data_offset, u64 data_size, 1061 u64 data_offset, u64 data_size,
1037 u64 file_size, struct perf_event_ops *ops) 1062 u64 file_size, struct perf_tool *tool)
1038{ 1063{
1039 u64 head, page_offset, file_offset, file_pos, progress_next; 1064 u64 head, page_offset, file_offset, file_pos, progress_next;
1040 int err, mmap_prot, mmap_flags, map_idx = 0; 1065 int err, mmap_prot, mmap_flags, map_idx = 0;
@@ -1043,7 +1068,7 @@ int __perf_session__process_events(struct perf_session *session,
1043 union perf_event *event; 1068 union perf_event *event;
1044 uint32_t size; 1069 uint32_t size;
1045 1070
1046 perf_event_ops__fill_defaults(ops); 1071 perf_tool__fill_defaults(tool);
1047 1072
1048 page_size = sysconf(_SC_PAGESIZE); 1073 page_size = sysconf(_SC_PAGESIZE);
1049 1074
@@ -1098,7 +1123,7 @@ more:
1098 size = event->header.size; 1123 size = event->header.size;
1099 1124
1100 if (size == 0 || 1125 if (size == 0 ||
1101 perf_session__process_event(session, event, ops, file_pos) < 0) { 1126 perf_session__process_event(session, event, tool, file_pos) < 0) {
1102 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", 1127 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1103 file_offset + head, event->header.size, 1128 file_offset + head, event->header.size,
1104 event->header.type); 1129 event->header.type);
@@ -1127,15 +1152,15 @@ more:
1127 err = 0; 1152 err = 0;
1128 /* do the final flush for ordered samples */ 1153 /* do the final flush for ordered samples */
1129 session->ordered_samples.next_flush = ULLONG_MAX; 1154 session->ordered_samples.next_flush = ULLONG_MAX;
1130 flush_sample_queue(session, ops); 1155 flush_sample_queue(session, tool);
1131out_err: 1156out_err:
1132 perf_session__warn_about_errors(session, ops); 1157 perf_session__warn_about_errors(session, tool);
1133 perf_session_free_sample_buffers(session); 1158 perf_session_free_sample_buffers(session);
1134 return err; 1159 return err;
1135} 1160}
1136 1161
1137int perf_session__process_events(struct perf_session *self, 1162int perf_session__process_events(struct perf_session *self,
1138 struct perf_event_ops *ops) 1163 struct perf_tool *tool)
1139{ 1164{
1140 int err; 1165 int err;
1141 1166
@@ -1146,9 +1171,9 @@ int perf_session__process_events(struct perf_session *self,
1146 err = __perf_session__process_events(self, 1171 err = __perf_session__process_events(self,
1147 self->header.data_offset, 1172 self->header.data_offset,
1148 self->header.data_size, 1173 self->header.data_size,
1149 self->size, ops); 1174 self->size, tool);
1150 else 1175 else
1151 err = __perf_session__process_pipe_events(self, ops); 1176 err = __perf_session__process_pipe_events(self, tool);
1152 1177
1153 return err; 1178 return err;
1154} 1179}
@@ -1163,9 +1188,8 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg)
1163 return true; 1188 return true;
1164} 1189}
1165 1190
1166int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, 1191int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1167 const char *symbol_name, 1192 const char *symbol_name, u64 addr)
1168 u64 addr)
1169{ 1193{
1170 char *bracket; 1194 char *bracket;
1171 enum map_type i; 1195 enum map_type i;
@@ -1224,6 +1248,27 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1224 return ret; 1248 return ret;
1225} 1249}
1226 1250
1251size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1252{
1253 /*
1254 * FIXME: Here we have to actually print all the machines in this
1255 * session, not just the host...
1256 */
1257 return machine__fprintf(&session->host_machine, fp);
1258}
1259
1260void perf_session__remove_thread(struct perf_session *session,
1261 struct thread *th)
1262{
1263 /*
1264 * FIXME: This one makes no sense, we need to remove the thread from
1265 * the machine it belongs to, perf_session can have many machines, so
1266 * doing it always on ->host_machine is wrong. Fix when auditing all
1267 * the 'perf kvm' code.
1268 */
1269 machine__remove_thread(&session->host_machine, th);
1270}
1271
1227struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1272struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1228 unsigned int type) 1273 unsigned int type)
1229{ 1274{
@@ -1236,17 +1281,16 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1236 return NULL; 1281 return NULL;
1237} 1282}
1238 1283
1239void perf_session__print_ip(union perf_event *event, 1284void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1240 struct perf_sample *sample, 1285 struct machine *machine, struct perf_evsel *evsel,
1241 struct perf_session *session, 1286 int print_sym, int print_dso)
1242 int print_sym, int print_dso)
1243{ 1287{
1244 struct addr_location al; 1288 struct addr_location al;
1245 const char *symname, *dsoname; 1289 const char *symname, *dsoname;
1246 struct callchain_cursor *cursor = &session->callchain_cursor; 1290 struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
1247 struct callchain_cursor_node *node; 1291 struct callchain_cursor_node *node;
1248 1292
1249 if (perf_event__preprocess_sample(event, session, &al, sample, 1293 if (perf_event__preprocess_sample(event, machine, &al, sample,
1250 NULL) < 0) { 1294 NULL) < 0) {
1251 error("problem processing %d event, skipping it.\n", 1295 error("problem processing %d event, skipping it.\n",
1252 event->header.type); 1296 event->header.type);
@@ -1255,7 +1299,7 @@ void perf_session__print_ip(union perf_event *event,
1255 1299
1256 if (symbol_conf.use_callchain && sample->callchain) { 1300 if (symbol_conf.use_callchain && sample->callchain) {
1257 1301
1258 if (perf_session__resolve_callchain(session, al.thread, 1302 if (machine__resolve_callchain(machine, evsel, al.thread,
1259 sample->callchain, NULL) != 0) { 1303 sample->callchain, NULL) != 0) {
1260 if (verbose) 1304 if (verbose)
1261 error("Failed to resolve callchain. Skipping\n"); 1305 error("Failed to resolve callchain. Skipping\n");