diff options
Diffstat (limited to 'tools/perf/util/session.c')
-rw-r--r-- | tools/perf/util/session.c | 266 |
1 files changed, 133 insertions, 133 deletions
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 105f00bfd555..f26639fa0fb3 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <sys/types.h> | 7 | #include <sys/types.h> |
8 | #include <sys/mman.h> | 8 | #include <sys/mman.h> |
9 | 9 | ||
10 | #include "evlist.h" | ||
11 | #include "evsel.h" | ||
10 | #include "session.h" | 12 | #include "session.h" |
11 | #include "sort.h" | 13 | #include "sort.h" |
12 | #include "util.h" | 14 | #include "util.h" |
@@ -19,7 +21,7 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
19 | self->fd_pipe = true; | 21 | self->fd_pipe = true; |
20 | self->fd = STDIN_FILENO; | 22 | self->fd = STDIN_FILENO; |
21 | 23 | ||
22 | if (perf_header__read(self, self->fd) < 0) | 24 | if (perf_session__read_header(self, self->fd) < 0) |
23 | pr_err("incompatible file format"); | 25 | pr_err("incompatible file format"); |
24 | 26 | ||
25 | return 0; | 27 | return 0; |
@@ -51,7 +53,7 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
51 | goto out_close; | 53 | goto out_close; |
52 | } | 54 | } |
53 | 55 | ||
54 | if (perf_header__read(self, self->fd) < 0) { | 56 | if (perf_session__read_header(self, self->fd) < 0) { |
55 | pr_err("incompatible file format"); | 57 | pr_err("incompatible file format"); |
56 | goto out_close; | 58 | goto out_close; |
57 | } | 59 | } |
@@ -67,7 +69,7 @@ out_close: | |||
67 | 69 | ||
68 | static void perf_session__id_header_size(struct perf_session *session) | 70 | static void perf_session__id_header_size(struct perf_session *session) |
69 | { | 71 | { |
70 | struct sample_data *data; | 72 | struct perf_sample *data; |
71 | u64 sample_type = session->sample_type; | 73 | u64 sample_type = session->sample_type; |
72 | u16 size = 0; | 74 | u16 size = 0; |
73 | 75 | ||
@@ -92,21 +94,10 @@ out: | |||
92 | session->id_hdr_size = size; | 94 | session->id_hdr_size = size; |
93 | } | 95 | } |
94 | 96 | ||
95 | void perf_session__set_sample_id_all(struct perf_session *session, bool value) | ||
96 | { | ||
97 | session->sample_id_all = value; | ||
98 | perf_session__id_header_size(session); | ||
99 | } | ||
100 | |||
101 | void perf_session__set_sample_type(struct perf_session *session, u64 type) | ||
102 | { | ||
103 | session->sample_type = type; | ||
104 | } | ||
105 | |||
106 | void perf_session__update_sample_type(struct perf_session *self) | 97 | void perf_session__update_sample_type(struct perf_session *self) |
107 | { | 98 | { |
108 | self->sample_type = perf_header__sample_type(&self->header); | 99 | self->sample_type = perf_evlist__sample_type(self->evlist); |
109 | self->sample_id_all = perf_header__sample_id_all(&self->header); | 100 | self->sample_id_all = perf_evlist__sample_id_all(self->evlist); |
110 | perf_session__id_header_size(self); | 101 | perf_session__id_header_size(self); |
111 | } | 102 | } |
112 | 103 | ||
@@ -135,13 +126,9 @@ struct perf_session *perf_session__new(const char *filename, int mode, | |||
135 | if (self == NULL) | 126 | if (self == NULL) |
136 | goto out; | 127 | goto out; |
137 | 128 | ||
138 | if (perf_header__init(&self->header) < 0) | ||
139 | goto out_free; | ||
140 | |||
141 | memcpy(self->filename, filename, len); | 129 | memcpy(self->filename, filename, len); |
142 | self->threads = RB_ROOT; | 130 | self->threads = RB_ROOT; |
143 | INIT_LIST_HEAD(&self->dead_threads); | 131 | INIT_LIST_HEAD(&self->dead_threads); |
144 | self->hists_tree = RB_ROOT; | ||
145 | self->last_match = NULL; | 132 | self->last_match = NULL; |
146 | /* | 133 | /* |
147 | * On 64bit we can mmap the data file in one go. No need for tiny mmap | 134 | * On 64bit we can mmap the data file in one go. No need for tiny mmap |
@@ -162,17 +149,16 @@ struct perf_session *perf_session__new(const char *filename, int mode, | |||
162 | if (mode == O_RDONLY) { | 149 | if (mode == O_RDONLY) { |
163 | if (perf_session__open(self, force) < 0) | 150 | if (perf_session__open(self, force) < 0) |
164 | goto out_delete; | 151 | goto out_delete; |
152 | perf_session__update_sample_type(self); | ||
165 | } else if (mode == O_WRONLY) { | 153 | } else if (mode == O_WRONLY) { |
166 | /* | 154 | /* |
167 | * In O_RDONLY mode this will be performed when reading the | 155 | * In O_RDONLY mode this will be performed when reading the |
168 | * kernel MMAP event, in event__process_mmap(). | 156 | * kernel MMAP event, in perf_event__process_mmap(). |
169 | */ | 157 | */ |
170 | if (perf_session__create_kernel_maps(self) < 0) | 158 | if (perf_session__create_kernel_maps(self) < 0) |
171 | goto out_delete; | 159 | goto out_delete; |
172 | } | 160 | } |
173 | 161 | ||
174 | perf_session__update_sample_type(self); | ||
175 | |||
176 | if (ops && ops->ordering_requires_timestamps && | 162 | if (ops && ops->ordering_requires_timestamps && |
177 | ops->ordered_samples && !self->sample_id_all) { | 163 | ops->ordered_samples && !self->sample_id_all) { |
178 | dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); | 164 | dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); |
@@ -181,9 +167,6 @@ struct perf_session *perf_session__new(const char *filename, int mode, | |||
181 | 167 | ||
182 | out: | 168 | out: |
183 | return self; | 169 | return self; |
184 | out_free: | ||
185 | free(self); | ||
186 | return NULL; | ||
187 | out_delete: | 170 | out_delete: |
188 | perf_session__delete(self); | 171 | perf_session__delete(self); |
189 | return NULL; | 172 | return NULL; |
@@ -214,7 +197,6 @@ static void perf_session__delete_threads(struct perf_session *self) | |||
214 | 197 | ||
215 | void perf_session__delete(struct perf_session *self) | 198 | void perf_session__delete(struct perf_session *self) |
216 | { | 199 | { |
217 | perf_header__exit(&self->header); | ||
218 | perf_session__destroy_kernel_maps(self); | 200 | perf_session__destroy_kernel_maps(self); |
219 | perf_session__delete_dead_threads(self); | 201 | perf_session__delete_dead_threads(self); |
220 | perf_session__delete_threads(self); | 202 | perf_session__delete_threads(self); |
@@ -242,17 +224,16 @@ static bool symbol__match_parent_regex(struct symbol *sym) | |||
242 | return 0; | 224 | return 0; |
243 | } | 225 | } |
244 | 226 | ||
245 | struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | 227 | int perf_session__resolve_callchain(struct perf_session *self, |
246 | struct thread *thread, | 228 | struct thread *thread, |
247 | struct ip_callchain *chain, | 229 | struct ip_callchain *chain, |
248 | struct symbol **parent) | 230 | struct symbol **parent) |
249 | { | 231 | { |
250 | u8 cpumode = PERF_RECORD_MISC_USER; | 232 | u8 cpumode = PERF_RECORD_MISC_USER; |
251 | unsigned int i; | 233 | unsigned int i; |
252 | struct map_symbol *syms = calloc(chain->nr, sizeof(*syms)); | 234 | int err; |
253 | 235 | ||
254 | if (!syms) | 236 | callchain_cursor_reset(&self->callchain_cursor); |
255 | return NULL; | ||
256 | 237 | ||
257 | for (i = 0; i < chain->nr; i++) { | 238 | for (i = 0; i < chain->nr; i++) { |
258 | u64 ip = chain->ips[i]; | 239 | u64 ip = chain->ips[i]; |
@@ -281,30 +262,33 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | |||
281 | *parent = al.sym; | 262 | *parent = al.sym; |
282 | if (!symbol_conf.use_callchain) | 263 | if (!symbol_conf.use_callchain) |
283 | break; | 264 | break; |
284 | syms[i].map = al.map; | ||
285 | syms[i].sym = al.sym; | ||
286 | } | 265 | } |
266 | |||
267 | err = callchain_cursor_append(&self->callchain_cursor, | ||
268 | ip, al.map, al.sym); | ||
269 | if (err) | ||
270 | return err; | ||
287 | } | 271 | } |
288 | 272 | ||
289 | return syms; | 273 | return 0; |
290 | } | 274 | } |
291 | 275 | ||
292 | static int process_event_synth_stub(event_t *event __used, | 276 | static int process_event_synth_stub(union perf_event *event __used, |
293 | struct perf_session *session __used) | 277 | struct perf_session *session __used) |
294 | { | 278 | { |
295 | dump_printf(": unhandled!\n"); | 279 | dump_printf(": unhandled!\n"); |
296 | return 0; | 280 | return 0; |
297 | } | 281 | } |
298 | 282 | ||
299 | static int process_event_stub(event_t *event __used, | 283 | static int process_event_stub(union perf_event *event __used, |
300 | struct sample_data *sample __used, | 284 | struct perf_sample *sample __used, |
301 | struct perf_session *session __used) | 285 | struct perf_session *session __used) |
302 | { | 286 | { |
303 | dump_printf(": unhandled!\n"); | 287 | dump_printf(": unhandled!\n"); |
304 | return 0; | 288 | return 0; |
305 | } | 289 | } |
306 | 290 | ||
307 | static int process_finished_round_stub(event_t *event __used, | 291 | static int process_finished_round_stub(union perf_event *event __used, |
308 | struct perf_session *session __used, | 292 | struct perf_session *session __used, |
309 | struct perf_event_ops *ops __used) | 293 | struct perf_event_ops *ops __used) |
310 | { | 294 | { |
@@ -312,7 +296,7 @@ static int process_finished_round_stub(event_t *event __used, | |||
312 | return 0; | 296 | return 0; |
313 | } | 297 | } |
314 | 298 | ||
315 | static int process_finished_round(event_t *event, | 299 | static int process_finished_round(union perf_event *event, |
316 | struct perf_session *session, | 300 | struct perf_session *session, |
317 | struct perf_event_ops *ops); | 301 | struct perf_event_ops *ops); |
318 | 302 | ||
@@ -329,7 +313,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) | |||
329 | if (handler->exit == NULL) | 313 | if (handler->exit == NULL) |
330 | handler->exit = process_event_stub; | 314 | handler->exit = process_event_stub; |
331 | if (handler->lost == NULL) | 315 | if (handler->lost == NULL) |
332 | handler->lost = event__process_lost; | 316 | handler->lost = perf_event__process_lost; |
333 | if (handler->read == NULL) | 317 | if (handler->read == NULL) |
334 | handler->read = process_event_stub; | 318 | handler->read = process_event_stub; |
335 | if (handler->throttle == NULL) | 319 | if (handler->throttle == NULL) |
@@ -363,98 +347,98 @@ void mem_bswap_64(void *src, int byte_size) | |||
363 | } | 347 | } |
364 | } | 348 | } |
365 | 349 | ||
366 | static void event__all64_swap(event_t *self) | 350 | static void perf_event__all64_swap(union perf_event *event) |
367 | { | 351 | { |
368 | struct perf_event_header *hdr = &self->header; | 352 | struct perf_event_header *hdr = &event->header; |
369 | mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr)); | 353 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); |
370 | } | 354 | } |
371 | 355 | ||
372 | static void event__comm_swap(event_t *self) | 356 | static void perf_event__comm_swap(union perf_event *event) |
373 | { | 357 | { |
374 | self->comm.pid = bswap_32(self->comm.pid); | 358 | event->comm.pid = bswap_32(event->comm.pid); |
375 | self->comm.tid = bswap_32(self->comm.tid); | 359 | event->comm.tid = bswap_32(event->comm.tid); |
376 | } | 360 | } |
377 | 361 | ||
378 | static void event__mmap_swap(event_t *self) | 362 | static void perf_event__mmap_swap(union perf_event *event) |
379 | { | 363 | { |
380 | self->mmap.pid = bswap_32(self->mmap.pid); | 364 | event->mmap.pid = bswap_32(event->mmap.pid); |
381 | self->mmap.tid = bswap_32(self->mmap.tid); | 365 | event->mmap.tid = bswap_32(event->mmap.tid); |
382 | self->mmap.start = bswap_64(self->mmap.start); | 366 | event->mmap.start = bswap_64(event->mmap.start); |
383 | self->mmap.len = bswap_64(self->mmap.len); | 367 | event->mmap.len = bswap_64(event->mmap.len); |
384 | self->mmap.pgoff = bswap_64(self->mmap.pgoff); | 368 | event->mmap.pgoff = bswap_64(event->mmap.pgoff); |
385 | } | 369 | } |
386 | 370 | ||
387 | static void event__task_swap(event_t *self) | 371 | static void perf_event__task_swap(union perf_event *event) |
388 | { | 372 | { |
389 | self->fork.pid = bswap_32(self->fork.pid); | 373 | event->fork.pid = bswap_32(event->fork.pid); |
390 | self->fork.tid = bswap_32(self->fork.tid); | 374 | event->fork.tid = bswap_32(event->fork.tid); |
391 | self->fork.ppid = bswap_32(self->fork.ppid); | 375 | event->fork.ppid = bswap_32(event->fork.ppid); |
392 | self->fork.ptid = bswap_32(self->fork.ptid); | 376 | event->fork.ptid = bswap_32(event->fork.ptid); |
393 | self->fork.time = bswap_64(self->fork.time); | 377 | event->fork.time = bswap_64(event->fork.time); |
394 | } | 378 | } |
395 | 379 | ||
396 | static void event__read_swap(event_t *self) | 380 | static void perf_event__read_swap(union perf_event *event) |
397 | { | 381 | { |
398 | self->read.pid = bswap_32(self->read.pid); | 382 | event->read.pid = bswap_32(event->read.pid); |
399 | self->read.tid = bswap_32(self->read.tid); | 383 | event->read.tid = bswap_32(event->read.tid); |
400 | self->read.value = bswap_64(self->read.value); | 384 | event->read.value = bswap_64(event->read.value); |
401 | self->read.time_enabled = bswap_64(self->read.time_enabled); | 385 | event->read.time_enabled = bswap_64(event->read.time_enabled); |
402 | self->read.time_running = bswap_64(self->read.time_running); | 386 | event->read.time_running = bswap_64(event->read.time_running); |
403 | self->read.id = bswap_64(self->read.id); | 387 | event->read.id = bswap_64(event->read.id); |
404 | } | 388 | } |
405 | 389 | ||
406 | static void event__attr_swap(event_t *self) | 390 | static void perf_event__attr_swap(union perf_event *event) |
407 | { | 391 | { |
408 | size_t size; | 392 | size_t size; |
409 | 393 | ||
410 | self->attr.attr.type = bswap_32(self->attr.attr.type); | 394 | event->attr.attr.type = bswap_32(event->attr.attr.type); |
411 | self->attr.attr.size = bswap_32(self->attr.attr.size); | 395 | event->attr.attr.size = bswap_32(event->attr.attr.size); |
412 | self->attr.attr.config = bswap_64(self->attr.attr.config); | 396 | event->attr.attr.config = bswap_64(event->attr.attr.config); |
413 | self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period); | 397 | event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period); |
414 | self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type); | 398 | event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type); |
415 | self->attr.attr.read_format = bswap_64(self->attr.attr.read_format); | 399 | event->attr.attr.read_format = bswap_64(event->attr.attr.read_format); |
416 | self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events); | 400 | event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events); |
417 | self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type); | 401 | event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type); |
418 | self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr); | 402 | event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr); |
419 | self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len); | 403 | event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len); |
420 | 404 | ||
421 | size = self->header.size; | 405 | size = event->header.size; |
422 | size -= (void *)&self->attr.id - (void *)self; | 406 | size -= (void *)&event->attr.id - (void *)event; |
423 | mem_bswap_64(self->attr.id, size); | 407 | mem_bswap_64(event->attr.id, size); |
424 | } | 408 | } |
425 | 409 | ||
426 | static void event__event_type_swap(event_t *self) | 410 | static void perf_event__event_type_swap(union perf_event *event) |
427 | { | 411 | { |
428 | self->event_type.event_type.event_id = | 412 | event->event_type.event_type.event_id = |
429 | bswap_64(self->event_type.event_type.event_id); | 413 | bswap_64(event->event_type.event_type.event_id); |
430 | } | 414 | } |
431 | 415 | ||
432 | static void event__tracing_data_swap(event_t *self) | 416 | static void perf_event__tracing_data_swap(union perf_event *event) |
433 | { | 417 | { |
434 | self->tracing_data.size = bswap_32(self->tracing_data.size); | 418 | event->tracing_data.size = bswap_32(event->tracing_data.size); |
435 | } | 419 | } |
436 | 420 | ||
437 | typedef void (*event__swap_op)(event_t *self); | 421 | typedef void (*perf_event__swap_op)(union perf_event *event); |
438 | 422 | ||
439 | static event__swap_op event__swap_ops[] = { | 423 | static perf_event__swap_op perf_event__swap_ops[] = { |
440 | [PERF_RECORD_MMAP] = event__mmap_swap, | 424 | [PERF_RECORD_MMAP] = perf_event__mmap_swap, |
441 | [PERF_RECORD_COMM] = event__comm_swap, | 425 | [PERF_RECORD_COMM] = perf_event__comm_swap, |
442 | [PERF_RECORD_FORK] = event__task_swap, | 426 | [PERF_RECORD_FORK] = perf_event__task_swap, |
443 | [PERF_RECORD_EXIT] = event__task_swap, | 427 | [PERF_RECORD_EXIT] = perf_event__task_swap, |
444 | [PERF_RECORD_LOST] = event__all64_swap, | 428 | [PERF_RECORD_LOST] = perf_event__all64_swap, |
445 | [PERF_RECORD_READ] = event__read_swap, | 429 | [PERF_RECORD_READ] = perf_event__read_swap, |
446 | [PERF_RECORD_SAMPLE] = event__all64_swap, | 430 | [PERF_RECORD_SAMPLE] = perf_event__all64_swap, |
447 | [PERF_RECORD_HEADER_ATTR] = event__attr_swap, | 431 | [PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap, |
448 | [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap, | 432 | [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, |
449 | [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap, | 433 | [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, |
450 | [PERF_RECORD_HEADER_BUILD_ID] = NULL, | 434 | [PERF_RECORD_HEADER_BUILD_ID] = NULL, |
451 | [PERF_RECORD_HEADER_MAX] = NULL, | 435 | [PERF_RECORD_HEADER_MAX] = NULL, |
452 | }; | 436 | }; |
453 | 437 | ||
454 | struct sample_queue { | 438 | struct sample_queue { |
455 | u64 timestamp; | 439 | u64 timestamp; |
456 | u64 file_offset; | 440 | u64 file_offset; |
457 | event_t *event; | 441 | union perf_event *event; |
458 | struct list_head list; | 442 | struct list_head list; |
459 | }; | 443 | }; |
460 | 444 | ||
@@ -472,8 +456,8 @@ static void perf_session_free_sample_buffers(struct perf_session *session) | |||
472 | } | 456 | } |
473 | 457 | ||
474 | static int perf_session_deliver_event(struct perf_session *session, | 458 | static int perf_session_deliver_event(struct perf_session *session, |
475 | event_t *event, | 459 | union perf_event *event, |
476 | struct sample_data *sample, | 460 | struct perf_sample *sample, |
477 | struct perf_event_ops *ops, | 461 | struct perf_event_ops *ops, |
478 | u64 file_offset); | 462 | u64 file_offset); |
479 | 463 | ||
@@ -483,7 +467,7 @@ static void flush_sample_queue(struct perf_session *s, | |||
483 | struct ordered_samples *os = &s->ordered_samples; | 467 | struct ordered_samples *os = &s->ordered_samples; |
484 | struct list_head *head = &os->samples; | 468 | struct list_head *head = &os->samples; |
485 | struct sample_queue *tmp, *iter; | 469 | struct sample_queue *tmp, *iter; |
486 | struct sample_data sample; | 470 | struct perf_sample sample; |
487 | u64 limit = os->next_flush; | 471 | u64 limit = os->next_flush; |
488 | u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; | 472 | u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; |
489 | 473 | ||
@@ -494,7 +478,7 @@ static void flush_sample_queue(struct perf_session *s, | |||
494 | if (iter->timestamp > limit) | 478 | if (iter->timestamp > limit) |
495 | break; | 479 | break; |
496 | 480 | ||
497 | event__parse_sample(iter->event, s, &sample); | 481 | perf_session__parse_sample(s, iter->event, &sample); |
498 | perf_session_deliver_event(s, iter->event, &sample, ops, | 482 | perf_session_deliver_event(s, iter->event, &sample, ops, |
499 | iter->file_offset); | 483 | iter->file_offset); |
500 | 484 | ||
@@ -550,7 +534,7 @@ static void flush_sample_queue(struct perf_session *s, | |||
550 | * Flush every events below timestamp 7 | 534 | * Flush every events below timestamp 7 |
551 | * etc... | 535 | * etc... |
552 | */ | 536 | */ |
553 | static int process_finished_round(event_t *event __used, | 537 | static int process_finished_round(union perf_event *event __used, |
554 | struct perf_session *session, | 538 | struct perf_session *session, |
555 | struct perf_event_ops *ops) | 539 | struct perf_event_ops *ops) |
556 | { | 540 | { |
@@ -607,12 +591,12 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s) | |||
607 | 591 | ||
608 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) | 592 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) |
609 | 593 | ||
610 | static int perf_session_queue_event(struct perf_session *s, event_t *event, | 594 | static int perf_session_queue_event(struct perf_session *s, union perf_event *event, |
611 | struct sample_data *data, u64 file_offset) | 595 | struct perf_sample *sample, u64 file_offset) |
612 | { | 596 | { |
613 | struct ordered_samples *os = &s->ordered_samples; | 597 | struct ordered_samples *os = &s->ordered_samples; |
614 | struct list_head *sc = &os->sample_cache; | 598 | struct list_head *sc = &os->sample_cache; |
615 | u64 timestamp = data->time; | 599 | u64 timestamp = sample->time; |
616 | struct sample_queue *new; | 600 | struct sample_queue *new; |
617 | 601 | ||
618 | if (!timestamp || timestamp == ~0ULL) | 602 | if (!timestamp || timestamp == ~0ULL) |
@@ -648,7 +632,7 @@ static int perf_session_queue_event(struct perf_session *s, event_t *event, | |||
648 | return 0; | 632 | return 0; |
649 | } | 633 | } |
650 | 634 | ||
651 | static void callchain__printf(struct sample_data *sample) | 635 | static void callchain__printf(struct perf_sample *sample) |
652 | { | 636 | { |
653 | unsigned int i; | 637 | unsigned int i; |
654 | 638 | ||
@@ -660,8 +644,8 @@ static void callchain__printf(struct sample_data *sample) | |||
660 | } | 644 | } |
661 | 645 | ||
662 | static void perf_session__print_tstamp(struct perf_session *session, | 646 | static void perf_session__print_tstamp(struct perf_session *session, |
663 | event_t *event, | 647 | union perf_event *event, |
664 | struct sample_data *sample) | 648 | struct perf_sample *sample) |
665 | { | 649 | { |
666 | if (event->header.type != PERF_RECORD_SAMPLE && | 650 | if (event->header.type != PERF_RECORD_SAMPLE && |
667 | !session->sample_id_all) { | 651 | !session->sample_id_all) { |
@@ -676,8 +660,8 @@ static void perf_session__print_tstamp(struct perf_session *session, | |||
676 | printf("%" PRIu64 " ", sample->time); | 660 | printf("%" PRIu64 " ", sample->time); |
677 | } | 661 | } |
678 | 662 | ||
679 | static void dump_event(struct perf_session *session, event_t *event, | 663 | static void dump_event(struct perf_session *session, union perf_event *event, |
680 | u64 file_offset, struct sample_data *sample) | 664 | u64 file_offset, struct perf_sample *sample) |
681 | { | 665 | { |
682 | if (!dump_trace) | 666 | if (!dump_trace) |
683 | return; | 667 | return; |
@@ -691,11 +675,11 @@ static void dump_event(struct perf_session *session, event_t *event, | |||
691 | perf_session__print_tstamp(session, event, sample); | 675 | perf_session__print_tstamp(session, event, sample); |
692 | 676 | ||
693 | printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, | 677 | printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, |
694 | event->header.size, event__get_event_name(event->header.type)); | 678 | event->header.size, perf_event__name(event->header.type)); |
695 | } | 679 | } |
696 | 680 | ||
697 | static void dump_sample(struct perf_session *session, event_t *event, | 681 | static void dump_sample(struct perf_session *session, union perf_event *event, |
698 | struct sample_data *sample) | 682 | struct perf_sample *sample) |
699 | { | 683 | { |
700 | if (!dump_trace) | 684 | if (!dump_trace) |
701 | return; | 685 | return; |
@@ -709,8 +693,8 @@ static void dump_sample(struct perf_session *session, event_t *event, | |||
709 | } | 693 | } |
710 | 694 | ||
711 | static int perf_session_deliver_event(struct perf_session *session, | 695 | static int perf_session_deliver_event(struct perf_session *session, |
712 | event_t *event, | 696 | union perf_event *event, |
713 | struct sample_data *sample, | 697 | struct perf_sample *sample, |
714 | struct perf_event_ops *ops, | 698 | struct perf_event_ops *ops, |
715 | u64 file_offset) | 699 | u64 file_offset) |
716 | { | 700 | { |
@@ -743,7 +727,7 @@ static int perf_session_deliver_event(struct perf_session *session, | |||
743 | } | 727 | } |
744 | 728 | ||
745 | static int perf_session__preprocess_sample(struct perf_session *session, | 729 | static int perf_session__preprocess_sample(struct perf_session *session, |
746 | event_t *event, struct sample_data *sample) | 730 | union perf_event *event, struct perf_sample *sample) |
747 | { | 731 | { |
748 | if (event->header.type != PERF_RECORD_SAMPLE || | 732 | if (event->header.type != PERF_RECORD_SAMPLE || |
749 | !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) | 733 | !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) |
@@ -758,7 +742,7 @@ static int perf_session__preprocess_sample(struct perf_session *session, | |||
758 | return 0; | 742 | return 0; |
759 | } | 743 | } |
760 | 744 | ||
761 | static int perf_session__process_user_event(struct perf_session *session, event_t *event, | 745 | static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, |
762 | struct perf_event_ops *ops, u64 file_offset) | 746 | struct perf_event_ops *ops, u64 file_offset) |
763 | { | 747 | { |
764 | dump_event(session, event, file_offset, NULL); | 748 | dump_event(session, event, file_offset, NULL); |
@@ -783,15 +767,16 @@ static int perf_session__process_user_event(struct perf_session *session, event_ | |||
783 | } | 767 | } |
784 | 768 | ||
785 | static int perf_session__process_event(struct perf_session *session, | 769 | static int perf_session__process_event(struct perf_session *session, |
786 | event_t *event, | 770 | union perf_event *event, |
787 | struct perf_event_ops *ops, | 771 | struct perf_event_ops *ops, |
788 | u64 file_offset) | 772 | u64 file_offset) |
789 | { | 773 | { |
790 | struct sample_data sample; | 774 | struct perf_sample sample; |
791 | int ret; | 775 | int ret; |
792 | 776 | ||
793 | if (session->header.needs_swap && event__swap_ops[event->header.type]) | 777 | if (session->header.needs_swap && |
794 | event__swap_ops[event->header.type](event); | 778 | perf_event__swap_ops[event->header.type]) |
779 | perf_event__swap_ops[event->header.type](event); | ||
795 | 780 | ||
796 | if (event->header.type >= PERF_RECORD_HEADER_MAX) | 781 | if (event->header.type >= PERF_RECORD_HEADER_MAX) |
797 | return -EINVAL; | 782 | return -EINVAL; |
@@ -804,7 +789,7 @@ static int perf_session__process_event(struct perf_session *session, | |||
804 | /* | 789 | /* |
805 | * For all kernel events we get the sample data | 790 | * For all kernel events we get the sample data |
806 | */ | 791 | */ |
807 | event__parse_sample(event, session, &sample); | 792 | perf_session__parse_sample(session, event, &sample); |
808 | 793 | ||
809 | /* Preprocess sample records - precheck callchains */ | 794 | /* Preprocess sample records - precheck callchains */ |
810 | if (perf_session__preprocess_sample(session, event, &sample)) | 795 | if (perf_session__preprocess_sample(session, event, &sample)) |
@@ -843,7 +828,7 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se | |||
843 | static void perf_session__warn_about_errors(const struct perf_session *session, | 828 | static void perf_session__warn_about_errors(const struct perf_session *session, |
844 | const struct perf_event_ops *ops) | 829 | const struct perf_event_ops *ops) |
845 | { | 830 | { |
846 | if (ops->lost == event__process_lost && | 831 | if (ops->lost == perf_event__process_lost && |
847 | session->hists.stats.total_lost != 0) { | 832 | session->hists.stats.total_lost != 0) { |
848 | ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 | 833 | ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 |
849 | "!\n\nCheck IO/CPU overload!\n\n", | 834 | "!\n\nCheck IO/CPU overload!\n\n", |
@@ -875,7 +860,7 @@ volatile int session_done; | |||
875 | static int __perf_session__process_pipe_events(struct perf_session *self, | 860 | static int __perf_session__process_pipe_events(struct perf_session *self, |
876 | struct perf_event_ops *ops) | 861 | struct perf_event_ops *ops) |
877 | { | 862 | { |
878 | event_t event; | 863 | union perf_event event; |
879 | uint32_t size; | 864 | uint32_t size; |
880 | int skip = 0; | 865 | int skip = 0; |
881 | u64 head; | 866 | u64 head; |
@@ -956,7 +941,7 @@ int __perf_session__process_events(struct perf_session *session, | |||
956 | struct ui_progress *progress; | 941 | struct ui_progress *progress; |
957 | size_t page_size, mmap_size; | 942 | size_t page_size, mmap_size; |
958 | char *buf, *mmaps[8]; | 943 | char *buf, *mmaps[8]; |
959 | event_t *event; | 944 | union perf_event *event; |
960 | uint32_t size; | 945 | uint32_t size; |
961 | 946 | ||
962 | perf_event_ops__fill_defaults(ops); | 947 | perf_event_ops__fill_defaults(ops); |
@@ -1001,7 +986,7 @@ remap: | |||
1001 | file_pos = file_offset + head; | 986 | file_pos = file_offset + head; |
1002 | 987 | ||
1003 | more: | 988 | more: |
1004 | event = (event_t *)(buf + head); | 989 | event = (union perf_event *)(buf + head); |
1005 | 990 | ||
1006 | if (session->header.needs_swap) | 991 | if (session->header.needs_swap) |
1007 | perf_event_header__bswap(&event->header); | 992 | perf_event_header__bswap(&event->header); |
@@ -1134,3 +1119,18 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, | |||
1134 | size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); | 1119 | size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); |
1135 | return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); | 1120 | return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); |
1136 | } | 1121 | } |
1122 | |||
1123 | size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) | ||
1124 | { | ||
1125 | struct perf_evsel *pos; | ||
1126 | size_t ret = fprintf(fp, "Aggregated stats:\n"); | ||
1127 | |||
1128 | ret += hists__fprintf_nr_events(&session->hists, fp); | ||
1129 | |||
1130 | list_for_each_entry(pos, &session->evlist->entries, node) { | ||
1131 | ret += fprintf(fp, "%s stats:\n", event_name(pos)); | ||
1132 | ret += hists__fprintf_nr_events(&pos->hists, fp); | ||
1133 | } | ||
1134 | |||
1135 | return ret; | ||
1136 | } | ||