aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/util/ordered-events.c196
-rw-r--r--tools/perf/util/ordered-events.h41
-rw-r--r--tools/perf/util/session.c206
-rw-r--r--tools/perf/util/session.h17
5 files changed, 240 insertions, 222 deletions
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 2240974b7745..1ea31e275b4d 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -263,6 +263,7 @@ LIB_H += util/xyarray.h
263LIB_H += util/header.h 263LIB_H += util/header.h
264LIB_H += util/help.h 264LIB_H += util/help.h
265LIB_H += util/session.h 265LIB_H += util/session.h
266LIB_H += util/ordered-events.h
266LIB_H += util/strbuf.h 267LIB_H += util/strbuf.h
267LIB_H += util/strlist.h 268LIB_H += util/strlist.h
268LIB_H += util/strfilter.h 269LIB_H += util/strfilter.h
@@ -347,6 +348,7 @@ LIB_OBJS += $(OUTPUT)util/machine.o
347LIB_OBJS += $(OUTPUT)util/map.o 348LIB_OBJS += $(OUTPUT)util/map.o
348LIB_OBJS += $(OUTPUT)util/pstack.o 349LIB_OBJS += $(OUTPUT)util/pstack.o
349LIB_OBJS += $(OUTPUT)util/session.o 350LIB_OBJS += $(OUTPUT)util/session.o
351LIB_OBJS += $(OUTPUT)util/ordered-events.o
350LIB_OBJS += $(OUTPUT)util/comm.o 352LIB_OBJS += $(OUTPUT)util/comm.o
351LIB_OBJS += $(OUTPUT)util/thread.o 353LIB_OBJS += $(OUTPUT)util/thread.o
352LIB_OBJS += $(OUTPUT)util/thread_map.o 354LIB_OBJS += $(OUTPUT)util/thread_map.o
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
new file mode 100644
index 000000000000..95f8211ccdde
--- /dev/null
+++ b/tools/perf/util/ordered-events.c
@@ -0,0 +1,196 @@
1#include <linux/list.h>
2#include "ordered-events.h"
3#include "evlist.h"
4#include "session.h"
5#include "asm/bug.h"
6#include "debug.h"
7
8static void queue_event(struct ordered_events *oe, struct ordered_event *new)
9{
10 struct ordered_event *last = oe->last;
11 u64 timestamp = new->timestamp;
12 struct list_head *p;
13
14 ++oe->nr_events;
15 oe->last = new;
16
17 if (!last) {
18 list_add(&new->list, &oe->events);
19 oe->max_timestamp = timestamp;
20 return;
21 }
22
23 /*
24 * last event might point to some random place in the list as it's
25 * the last queued event. We expect that the new event is close to
26 * this.
27 */
28 if (last->timestamp <= timestamp) {
29 while (last->timestamp <= timestamp) {
30 p = last->list.next;
31 if (p == &oe->events) {
32 list_add_tail(&new->list, &oe->events);
33 oe->max_timestamp = timestamp;
34 return;
35 }
36 last = list_entry(p, struct ordered_event, list);
37 }
38 list_add_tail(&new->list, &last->list);
39 } else {
40 while (last->timestamp > timestamp) {
41 p = last->list.prev;
42 if (p == &oe->events) {
43 list_add(&new->list, &oe->events);
44 return;
45 }
46 last = list_entry(p, struct ordered_event, list);
47 }
48 list_add(&new->list, &last->list);
49 }
50}
51
52#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
53static struct ordered_event *alloc_event(struct ordered_events *oe)
54{
55 struct list_head *cache = &oe->cache;
56 struct ordered_event *new = NULL;
57
58 if (!list_empty(cache)) {
59 new = list_entry(cache->next, struct ordered_event, list);
60 list_del(&new->list);
61 } else if (oe->buffer) {
62 new = oe->buffer + oe->buffer_idx;
63 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
64 oe->buffer = NULL;
65 } else if (oe->cur_alloc_size < oe->max_alloc_size) {
66 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
67
68 oe->buffer = malloc(size);
69 if (!oe->buffer)
70 return NULL;
71
72 oe->cur_alloc_size += size;
73 list_add(&oe->buffer->list, &oe->to_free);
74
75 /* First entry is abused to maintain the to_free list. */
76 oe->buffer_idx = 2;
77 new = oe->buffer + 1;
78 }
79
80 return new;
81}
82
83struct ordered_event *
84ordered_events__new(struct ordered_events *oe, u64 timestamp)
85{
86 struct ordered_event *new;
87
88 new = alloc_event(oe);
89 if (new) {
90 new->timestamp = timestamp;
91 queue_event(oe, new);
92 }
93
94 return new;
95}
96
97void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
98{
99 list_del(&event->list);
100 list_add(&event->list, &oe->cache);
101 oe->nr_events--;
102}
103
104static int __ordered_events__flush(struct perf_session *s,
105 struct perf_tool *tool)
106{
107 struct ordered_events *oe = &s->ordered_events;
108 struct list_head *head = &oe->events;
109 struct ordered_event *tmp, *iter;
110 struct perf_sample sample;
111 u64 limit = oe->next_flush;
112 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
113 bool show_progress = limit == ULLONG_MAX;
114 struct ui_progress prog;
115 int ret;
116
117 if (!tool->ordered_events || !limit)
118 return 0;
119
120 if (show_progress)
121 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
122
123 list_for_each_entry_safe(iter, tmp, head, list) {
124 if (session_done())
125 return 0;
126
127 if (iter->timestamp > limit)
128 break;
129
130 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
131 if (ret)
132 pr_err("Can't parse sample, err = %d\n", ret);
133 else {
134 ret = perf_session__deliver_event(s, iter->event, &sample, tool,
135 iter->file_offset);
136 if (ret)
137 return ret;
138 }
139
140 ordered_events__delete(oe, iter);
141 oe->last_flush = iter->timestamp;
142
143 if (show_progress)
144 ui_progress__update(&prog, 1);
145 }
146
147 if (list_empty(head))
148 oe->last = NULL;
149 else if (last_ts <= limit)
150 oe->last = list_entry(head->prev, struct ordered_event, list);
151
152 return 0;
153}
154
155int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
156 enum oe_flush how)
157{
158 struct ordered_events *oe = &s->ordered_events;
159 int err;
160
161 switch (how) {
162 case OE_FLUSH__FINAL:
163 oe->next_flush = ULLONG_MAX;
164 break;
165
166 case OE_FLUSH__HALF:
167 {
168 struct ordered_event *first, *last;
169 struct list_head *head = &oe->events;
170
171 first = list_entry(head->next, struct ordered_event, list);
172 last = oe->last;
173
174 /* Warn if we are called before any event got allocated. */
175 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
176 return 0;
177
178 oe->next_flush = first->timestamp;
179 oe->next_flush += (last->timestamp - first->timestamp) / 2;
180 break;
181 }
182
183 case OE_FLUSH__ROUND:
184 default:
185 break;
186 };
187
188 err = __ordered_events__flush(s, tool);
189
190 if (!err) {
191 if (how == OE_FLUSH__ROUND)
192 oe->next_flush = oe->max_timestamp;
193 }
194
195 return err;
196}
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
new file mode 100644
index 000000000000..8309983bdd70
--- /dev/null
+++ b/tools/perf/util/ordered-events.h
@@ -0,0 +1,41 @@
1#ifndef __ORDERED_EVENTS_H
2#define __ORDERED_EVENTS_H
3
4#include <linux/types.h>
5#include "tool.h"
6
7struct perf_session;
8
9struct ordered_event {
10 u64 timestamp;
11 u64 file_offset;
12 union perf_event *event;
13 struct list_head list;
14};
15
16enum oe_flush {
17 OE_FLUSH__FINAL,
18 OE_FLUSH__ROUND,
19 OE_FLUSH__HALF,
20};
21
22struct ordered_events {
23 u64 last_flush;
24 u64 next_flush;
25 u64 max_timestamp;
26 u64 max_alloc_size;
27 u64 cur_alloc_size;
28 struct list_head events;
29 struct list_head cache;
30 struct list_head to_free;
31 struct ordered_event *buffer;
32 struct ordered_event *last;
33 int buffer_idx;
34 unsigned int nr_events;
35};
36
37struct ordered_event *ordered_events__new(struct ordered_events *oe, u64 timestamp);
38void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
39int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
40 enum oe_flush how);
41#endif /* __ORDERED_EVENTS_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ed6b7f14631f..0ccf051247f6 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -14,7 +14,6 @@
14#include "util.h" 14#include "util.h"
15#include "cpumap.h" 15#include "cpumap.h"
16#include "perf_regs.h" 16#include "perf_regs.h"
17#include "asm/bug.h"
18 17
19static int perf_session__open(struct perf_session *session) 18static int perf_session__open(struct perf_session *session)
20{ 19{
@@ -447,19 +446,6 @@ static perf_event__swap_op perf_event__swap_ops[] = {
447 [PERF_RECORD_HEADER_MAX] = NULL, 446 [PERF_RECORD_HEADER_MAX] = NULL,
448}; 447};
449 448
450struct ordered_event {
451 u64 timestamp;
452 u64 file_offset;
453 union perf_event *event;
454 struct list_head list;
455};
456
457enum oe_flush {
458 OE_FLUSH__FINAL,
459 OE_FLUSH__ROUND,
460 OE_FLUSH__HALF,
461};
462
463static void perf_session_free_sample_buffers(struct perf_session *session) 449static void perf_session_free_sample_buffers(struct perf_session *session)
464{ 450{
465 struct ordered_events *oe = &session->ordered_events; 451 struct ordered_events *oe = &session->ordered_events;
@@ -473,198 +459,6 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
473 } 459 }
474} 460}
475 461
476/* The queue is ordered by time */
477static void queue_event(struct ordered_events *oe, struct ordered_event *new)
478{
479 struct ordered_event *last = oe->last;
480 u64 timestamp = new->timestamp;
481 struct list_head *p;
482
483 ++oe->nr_events;
484 oe->last = new;
485
486 if (!last) {
487 list_add(&new->list, &oe->events);
488 oe->max_timestamp = timestamp;
489 return;
490 }
491
492 /*
493 * last event might point to some random place in the list as it's
494 * the last queued event. We expect that the new event is close to
495 * this.
496 */
497 if (last->timestamp <= timestamp) {
498 while (last->timestamp <= timestamp) {
499 p = last->list.next;
500 if (p == &oe->events) {
501 list_add_tail(&new->list, &oe->events);
502 oe->max_timestamp = timestamp;
503 return;
504 }
505 last = list_entry(p, struct ordered_event, list);
506 }
507 list_add_tail(&new->list, &last->list);
508 } else {
509 while (last->timestamp > timestamp) {
510 p = last->list.prev;
511 if (p == &oe->events) {
512 list_add(&new->list, &oe->events);
513 return;
514 }
515 last = list_entry(p, struct ordered_event, list);
516 }
517 list_add(&new->list, &last->list);
518 }
519}
520
521#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
522static struct ordered_event *alloc_event(struct ordered_events *oe)
523{
524 struct list_head *cache = &oe->cache;
525 struct ordered_event *new = NULL;
526
527 if (!list_empty(cache)) {
528 new = list_entry(cache->next, struct ordered_event, list);
529 list_del(&new->list);
530 } else if (oe->buffer) {
531 new = oe->buffer + oe->buffer_idx;
532 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
533 oe->buffer = NULL;
534 } else if (oe->cur_alloc_size < oe->max_alloc_size) {
535 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
536
537 oe->buffer = malloc(size);
538 if (!oe->buffer)
539 return NULL;
540
541 oe->cur_alloc_size += size;
542 list_add(&oe->buffer->list, &oe->to_free);
543
544 /* First entry is abused to maintain the to_free list. */
545 oe->buffer_idx = 2;
546 new = oe->buffer + 1;
547 }
548
549 return new;
550}
551
552static struct ordered_event *
553ordered_events__new(struct ordered_events *oe, u64 timestamp)
554{
555 struct ordered_event *new;
556
557 new = alloc_event(oe);
558 if (new) {
559 new->timestamp = timestamp;
560 queue_event(oe, new);
561 }
562
563 return new;
564}
565
566static void
567ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
568{
569 list_del(&event->list);
570 list_add(&event->list, &oe->cache);
571 oe->nr_events--;
572}
573
574static int __ordered_events__flush(struct perf_session *s,
575 struct perf_tool *tool)
576{
577 struct ordered_events *oe = &s->ordered_events;
578 struct list_head *head = &oe->events;
579 struct ordered_event *tmp, *iter;
580 struct perf_sample sample;
581 u64 limit = oe->next_flush;
582 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
583 bool show_progress = limit == ULLONG_MAX;
584 struct ui_progress prog;
585 int ret;
586
587 if (!tool->ordered_events || !limit)
588 return 0;
589
590 if (show_progress)
591 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
592
593 list_for_each_entry_safe(iter, tmp, head, list) {
594 if (session_done())
595 return 0;
596
597 if (iter->timestamp > limit)
598 break;
599
600 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
601 if (ret)
602 pr_err("Can't parse sample, err = %d\n", ret);
603 else {
604 ret = perf_session__deliver_event(s, iter->event, &sample, tool,
605 iter->file_offset);
606 if (ret)
607 return ret;
608 }
609
610 ordered_events__delete(oe, iter);
611 oe->last_flush = iter->timestamp;
612
613 if (show_progress)
614 ui_progress__update(&prog, 1);
615 }
616
617 if (list_empty(head))
618 oe->last = NULL;
619 else if (last_ts <= limit)
620 oe->last = list_entry(head->prev, struct ordered_event, list);
621
622 return 0;
623}
624
625static int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
626 enum oe_flush how)
627{
628 struct ordered_events *oe = &s->ordered_events;
629 int err;
630
631 switch (how) {
632 case OE_FLUSH__FINAL:
633 oe->next_flush = ULLONG_MAX;
634 break;
635
636 case OE_FLUSH__HALF:
637 {
638 struct ordered_event *first, *last;
639 struct list_head *head = &oe->events;
640
641 first = list_entry(head->next, struct ordered_event, list);
642 last = oe->last;
643
644 /* Warn if we are called before any event got allocated. */
645 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
646 return 0;
647
648 oe->next_flush = first->timestamp;
649 oe->next_flush += (last->timestamp - first->timestamp) / 2;
650 break;
651 }
652
653 case OE_FLUSH__ROUND:
654 default:
655 break;
656 };
657
658 err = __ordered_events__flush(s, tool);
659
660 if (!err) {
661 if (how == OE_FLUSH__ROUND)
662 oe->next_flush = oe->max_timestamp;
663 }
664
665 return err;
666}
667
668/* 462/*
669 * When perf record finishes a pass on every buffers, it records this pseudo 463 * When perf record finishes a pass on every buffers, it records this pseudo
670 * event. 464 * event.
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 03da1cb14dc1..0630e658f8be 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -9,28 +9,13 @@
9#include "symbol.h" 9#include "symbol.h"
10#include "thread.h" 10#include "thread.h"
11#include "data.h" 11#include "data.h"
12#include "ordered-events.h"
12#include <linux/rbtree.h> 13#include <linux/rbtree.h>
13#include <linux/perf_event.h> 14#include <linux/perf_event.h>
14 15
15struct ordered_event;
16struct ip_callchain; 16struct ip_callchain;
17struct thread; 17struct thread;
18 18
19struct ordered_events {
20 u64 last_flush;
21 u64 next_flush;
22 u64 max_timestamp;
23 u64 max_alloc_size;
24 u64 cur_alloc_size;
25 struct list_head events;
26 struct list_head cache;
27 struct list_head to_free;
28 struct ordered_event *buffer;
29 struct ordered_event *last;
30 int buffer_idx;
31 unsigned int nr_events;
32};
33
34struct perf_session { 19struct perf_session {
35 struct perf_header header; 20 struct perf_header header;
36 struct machines machines; 21 struct machines machines;