diff options
author | Jiri Olsa <jolsa@kernel.org> | 2014-08-01 12:02:58 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2014-08-12 11:02:58 -0400 |
commit | 5f86b80b85f0dcd05fd1471eac6984181a707c4f (patch) | |
tree | 4894e3ca1638822b97d3f9bc4a41f5f56e932260 /tools/perf/util/ordered-events.c | |
parent | 79a30fe4f3758c98e1b7a474952b9701d513e580 (diff) |
perf tools: Create ordered-events object
Move ordered events code into separated object ordered-events.[ch].
No functional change was intended.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jean Pihet <jean.pihet@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-1ge3rilgudszbl87cejm1tfg@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/ordered-events.c')
-rw-r--r-- | tools/perf/util/ordered-events.c | 196 |
1 files changed, 196 insertions, 0 deletions
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c new file mode 100644 index 000000000000..95f8211ccdde --- /dev/null +++ b/tools/perf/util/ordered-events.c | |||
@@ -0,0 +1,196 @@ | |||
1 | #include <linux/list.h> | ||
2 | #include "ordered-events.h" | ||
3 | #include "evlist.h" | ||
4 | #include "session.h" | ||
5 | #include "asm/bug.h" | ||
6 | #include "debug.h" | ||
7 | |||
8 | static void queue_event(struct ordered_events *oe, struct ordered_event *new) | ||
9 | { | ||
10 | struct ordered_event *last = oe->last; | ||
11 | u64 timestamp = new->timestamp; | ||
12 | struct list_head *p; | ||
13 | |||
14 | ++oe->nr_events; | ||
15 | oe->last = new; | ||
16 | |||
17 | if (!last) { | ||
18 | list_add(&new->list, &oe->events); | ||
19 | oe->max_timestamp = timestamp; | ||
20 | return; | ||
21 | } | ||
22 | |||
23 | /* | ||
24 | * last event might point to some random place in the list as it's | ||
25 | * the last queued event. We expect that the new event is close to | ||
26 | * this. | ||
27 | */ | ||
28 | if (last->timestamp <= timestamp) { | ||
29 | while (last->timestamp <= timestamp) { | ||
30 | p = last->list.next; | ||
31 | if (p == &oe->events) { | ||
32 | list_add_tail(&new->list, &oe->events); | ||
33 | oe->max_timestamp = timestamp; | ||
34 | return; | ||
35 | } | ||
36 | last = list_entry(p, struct ordered_event, list); | ||
37 | } | ||
38 | list_add_tail(&new->list, &last->list); | ||
39 | } else { | ||
40 | while (last->timestamp > timestamp) { | ||
41 | p = last->list.prev; | ||
42 | if (p == &oe->events) { | ||
43 | list_add(&new->list, &oe->events); | ||
44 | return; | ||
45 | } | ||
46 | last = list_entry(p, struct ordered_event, list); | ||
47 | } | ||
48 | list_add(&new->list, &last->list); | ||
49 | } | ||
50 | } | ||
51 | |||
52 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event)) | ||
53 | static struct ordered_event *alloc_event(struct ordered_events *oe) | ||
54 | { | ||
55 | struct list_head *cache = &oe->cache; | ||
56 | struct ordered_event *new = NULL; | ||
57 | |||
58 | if (!list_empty(cache)) { | ||
59 | new = list_entry(cache->next, struct ordered_event, list); | ||
60 | list_del(&new->list); | ||
61 | } else if (oe->buffer) { | ||
62 | new = oe->buffer + oe->buffer_idx; | ||
63 | if (++oe->buffer_idx == MAX_SAMPLE_BUFFER) | ||
64 | oe->buffer = NULL; | ||
65 | } else if (oe->cur_alloc_size < oe->max_alloc_size) { | ||
66 | size_t size = MAX_SAMPLE_BUFFER * sizeof(*new); | ||
67 | |||
68 | oe->buffer = malloc(size); | ||
69 | if (!oe->buffer) | ||
70 | return NULL; | ||
71 | |||
72 | oe->cur_alloc_size += size; | ||
73 | list_add(&oe->buffer->list, &oe->to_free); | ||
74 | |||
75 | /* First entry is abused to maintain the to_free list. */ | ||
76 | oe->buffer_idx = 2; | ||
77 | new = oe->buffer + 1; | ||
78 | } | ||
79 | |||
80 | return new; | ||
81 | } | ||
82 | |||
83 | struct ordered_event * | ||
84 | ordered_events__new(struct ordered_events *oe, u64 timestamp) | ||
85 | { | ||
86 | struct ordered_event *new; | ||
87 | |||
88 | new = alloc_event(oe); | ||
89 | if (new) { | ||
90 | new->timestamp = timestamp; | ||
91 | queue_event(oe, new); | ||
92 | } | ||
93 | |||
94 | return new; | ||
95 | } | ||
96 | |||
97 | void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event) | ||
98 | { | ||
99 | list_del(&event->list); | ||
100 | list_add(&event->list, &oe->cache); | ||
101 | oe->nr_events--; | ||
102 | } | ||
103 | |||
104 | static int __ordered_events__flush(struct perf_session *s, | ||
105 | struct perf_tool *tool) | ||
106 | { | ||
107 | struct ordered_events *oe = &s->ordered_events; | ||
108 | struct list_head *head = &oe->events; | ||
109 | struct ordered_event *tmp, *iter; | ||
110 | struct perf_sample sample; | ||
111 | u64 limit = oe->next_flush; | ||
112 | u64 last_ts = oe->last ? oe->last->timestamp : 0ULL; | ||
113 | bool show_progress = limit == ULLONG_MAX; | ||
114 | struct ui_progress prog; | ||
115 | int ret; | ||
116 | |||
117 | if (!tool->ordered_events || !limit) | ||
118 | return 0; | ||
119 | |||
120 | if (show_progress) | ||
121 | ui_progress__init(&prog, oe->nr_events, "Processing time ordered events..."); | ||
122 | |||
123 | list_for_each_entry_safe(iter, tmp, head, list) { | ||
124 | if (session_done()) | ||
125 | return 0; | ||
126 | |||
127 | if (iter->timestamp > limit) | ||
128 | break; | ||
129 | |||
130 | ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); | ||
131 | if (ret) | ||
132 | pr_err("Can't parse sample, err = %d\n", ret); | ||
133 | else { | ||
134 | ret = perf_session__deliver_event(s, iter->event, &sample, tool, | ||
135 | iter->file_offset); | ||
136 | if (ret) | ||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | ordered_events__delete(oe, iter); | ||
141 | oe->last_flush = iter->timestamp; | ||
142 | |||
143 | if (show_progress) | ||
144 | ui_progress__update(&prog, 1); | ||
145 | } | ||
146 | |||
147 | if (list_empty(head)) | ||
148 | oe->last = NULL; | ||
149 | else if (last_ts <= limit) | ||
150 | oe->last = list_entry(head->prev, struct ordered_event, list); | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | int ordered_events__flush(struct perf_session *s, struct perf_tool *tool, | ||
156 | enum oe_flush how) | ||
157 | { | ||
158 | struct ordered_events *oe = &s->ordered_events; | ||
159 | int err; | ||
160 | |||
161 | switch (how) { | ||
162 | case OE_FLUSH__FINAL: | ||
163 | oe->next_flush = ULLONG_MAX; | ||
164 | break; | ||
165 | |||
166 | case OE_FLUSH__HALF: | ||
167 | { | ||
168 | struct ordered_event *first, *last; | ||
169 | struct list_head *head = &oe->events; | ||
170 | |||
171 | first = list_entry(head->next, struct ordered_event, list); | ||
172 | last = oe->last; | ||
173 | |||
174 | /* Warn if we are called before any event got allocated. */ | ||
175 | if (WARN_ONCE(!last || list_empty(head), "empty queue")) | ||
176 | return 0; | ||
177 | |||
178 | oe->next_flush = first->timestamp; | ||
179 | oe->next_flush += (last->timestamp - first->timestamp) / 2; | ||
180 | break; | ||
181 | } | ||
182 | |||
183 | case OE_FLUSH__ROUND: | ||
184 | default: | ||
185 | break; | ||
186 | }; | ||
187 | |||
188 | err = __ordered_events__flush(s, tool); | ||
189 | |||
190 | if (!err) { | ||
191 | if (how == OE_FLUSH__ROUND) | ||
192 | oe->next_flush = oe->max_timestamp; | ||
193 | } | ||
194 | |||
195 | return err; | ||
196 | } | ||