aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/tests/builtin-test.c
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2012-11-09 19:46:45 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-11-14 14:49:43 -0500
commit16d00fee703866c61c9006eff097952289335479 (patch)
treea9923a2095d37ea1a7f236ee33f22eb1d390ab75 /tools/perf/tests/builtin-test.c
parenta65b9c62be044b7956022e2823c5f079cf35b069 (diff)
perf tests: Move test__PERF_RECORD into separate object
Separating test__PERF_RECORD test from the builtin-test into perf-record object. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1352508412-16914-6-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/tests/builtin-test.c')
-rw-r--r--tools/perf/tests/builtin-test.c307
1 files changed, 0 insertions, 307 deletions
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 609f59295326..7cb3928d896a 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -31,313 +31,6 @@
31 31
32 32
33 33
34static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
35{
36 int i, cpu = -1, nrcpus = 1024;
37realloc:
38 CPU_ZERO(maskp);
39
40 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
41 if (errno == EINVAL && nrcpus < (1024 << 8)) {
42 nrcpus = nrcpus << 2;
43 goto realloc;
44 }
45 perror("sched_getaffinity");
46 return -1;
47 }
48
49 for (i = 0; i < nrcpus; i++) {
50 if (CPU_ISSET(i, maskp)) {
51 if (cpu == -1)
52 cpu = i;
53 else
54 CPU_CLR(i, maskp);
55 }
56 }
57
58 return cpu;
59}
60
61static int test__PERF_RECORD(void)
62{
63 struct perf_record_opts opts = {
64 .target = {
65 .uid = UINT_MAX,
66 .uses_mmap = true,
67 },
68 .no_delay = true,
69 .freq = 10,
70 .mmap_pages = 256,
71 };
72 cpu_set_t cpu_mask;
73 size_t cpu_mask_size = sizeof(cpu_mask);
74 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
75 struct perf_evsel *evsel;
76 struct perf_sample sample;
77 const char *cmd = "sleep";
78 const char *argv[] = { cmd, "1", NULL, };
79 char *bname;
80 u64 prev_time = 0;
81 bool found_cmd_mmap = false,
82 found_libc_mmap = false,
83 found_vdso_mmap = false,
84 found_ld_mmap = false;
85 int err = -1, errs = 0, i, wakeups = 0;
86 u32 cpu;
87 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
88
89 if (evlist == NULL || argv == NULL) {
90 pr_debug("Not enough memory to create evlist\n");
91 goto out;
92 }
93
94 /*
95 * We need at least one evsel in the evlist, use the default
96 * one: "cycles".
97 */
98 err = perf_evlist__add_default(evlist);
99 if (err < 0) {
100 pr_debug("Not enough memory to create evsel\n");
101 goto out_delete_evlist;
102 }
103
104 /*
105 * Create maps of threads and cpus to monitor. In this case
106 * we start with all threads and cpus (-1, -1) but then in
107 * perf_evlist__prepare_workload we'll fill in the only thread
108 * we're monitoring, the one forked there.
109 */
110 err = perf_evlist__create_maps(evlist, &opts.target);
111 if (err < 0) {
112 pr_debug("Not enough memory to create thread/cpu maps\n");
113 goto out_delete_evlist;
114 }
115
116 /*
117 * Prepare the workload in argv[] to run, it'll fork it, and then wait
118 * for perf_evlist__start_workload() to exec it. This is done this way
119 * so that we have time to open the evlist (calling sys_perf_event_open
120 * on all the fds) and then mmap them.
121 */
122 err = perf_evlist__prepare_workload(evlist, &opts, argv);
123 if (err < 0) {
124 pr_debug("Couldn't run the workload!\n");
125 goto out_delete_evlist;
126 }
127
128 /*
129 * Config the evsels, setting attr->comm on the first one, etc.
130 */
131 evsel = perf_evlist__first(evlist);
132 evsel->attr.sample_type |= PERF_SAMPLE_CPU;
133 evsel->attr.sample_type |= PERF_SAMPLE_TID;
134 evsel->attr.sample_type |= PERF_SAMPLE_TIME;
135 perf_evlist__config_attrs(evlist, &opts);
136
137 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
138 if (err < 0) {
139 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
140 goto out_delete_evlist;
141 }
142
143 cpu = err;
144
145 /*
146 * So that we can check perf_sample.cpu on all the samples.
147 */
148 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
149 pr_debug("sched_setaffinity: %s\n", strerror(errno));
150 goto out_delete_evlist;
151 }
152
153 /*
154 * Call sys_perf_event_open on all the fds on all the evsels,
155 * grouping them if asked to.
156 */
157 err = perf_evlist__open(evlist);
158 if (err < 0) {
159 pr_debug("perf_evlist__open: %s\n", strerror(errno));
160 goto out_delete_evlist;
161 }
162
163 /*
164 * mmap the first fd on a given CPU and ask for events for the other
165 * fds in the same CPU to be injected in the same mmap ring buffer
166 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
167 */
168 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
169 if (err < 0) {
170 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
171 goto out_delete_evlist;
172 }
173
174 /*
175 * Now that all is properly set up, enable the events, they will
176 * count just on workload.pid, which will start...
177 */
178 perf_evlist__enable(evlist);
179
180 /*
181 * Now!
182 */
183 perf_evlist__start_workload(evlist);
184
185 while (1) {
186 int before = total_events;
187
188 for (i = 0; i < evlist->nr_mmaps; i++) {
189 union perf_event *event;
190
191 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
192 const u32 type = event->header.type;
193 const char *name = perf_event__name(type);
194
195 ++total_events;
196 if (type < PERF_RECORD_MAX)
197 nr_events[type]++;
198
199 err = perf_evlist__parse_sample(evlist, event, &sample);
200 if (err < 0) {
201 if (verbose)
202 perf_event__fprintf(event, stderr);
203 pr_debug("Couldn't parse sample\n");
204 goto out_err;
205 }
206
207 if (verbose) {
208 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
209 perf_event__fprintf(event, stderr);
210 }
211
212 if (prev_time > sample.time) {
213 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
214 name, prev_time, sample.time);
215 ++errs;
216 }
217
218 prev_time = sample.time;
219
220 if (sample.cpu != cpu) {
221 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
222 name, cpu, sample.cpu);
223 ++errs;
224 }
225
226 if ((pid_t)sample.pid != evlist->workload.pid) {
227 pr_debug("%s with unexpected pid, expected %d, got %d\n",
228 name, evlist->workload.pid, sample.pid);
229 ++errs;
230 }
231
232 if ((pid_t)sample.tid != evlist->workload.pid) {
233 pr_debug("%s with unexpected tid, expected %d, got %d\n",
234 name, evlist->workload.pid, sample.tid);
235 ++errs;
236 }
237
238 if ((type == PERF_RECORD_COMM ||
239 type == PERF_RECORD_MMAP ||
240 type == PERF_RECORD_FORK ||
241 type == PERF_RECORD_EXIT) &&
242 (pid_t)event->comm.pid != evlist->workload.pid) {
243 pr_debug("%s with unexpected pid/tid\n", name);
244 ++errs;
245 }
246
247 if ((type == PERF_RECORD_COMM ||
248 type == PERF_RECORD_MMAP) &&
249 event->comm.pid != event->comm.tid) {
250 pr_debug("%s with different pid/tid!\n", name);
251 ++errs;
252 }
253
254 switch (type) {
255 case PERF_RECORD_COMM:
256 if (strcmp(event->comm.comm, cmd)) {
257 pr_debug("%s with unexpected comm!\n", name);
258 ++errs;
259 }
260 break;
261 case PERF_RECORD_EXIT:
262 goto found_exit;
263 case PERF_RECORD_MMAP:
264 bname = strrchr(event->mmap.filename, '/');
265 if (bname != NULL) {
266 if (!found_cmd_mmap)
267 found_cmd_mmap = !strcmp(bname + 1, cmd);
268 if (!found_libc_mmap)
269 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
270 if (!found_ld_mmap)
271 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
272 } else if (!found_vdso_mmap)
273 found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
274 break;
275
276 case PERF_RECORD_SAMPLE:
277 /* Just ignore samples for now */
278 break;
279 default:
280 pr_debug("Unexpected perf_event->header.type %d!\n",
281 type);
282 ++errs;
283 }
284 }
285 }
286
287 /*
288 * We don't use poll here because at least at 3.1 times the
289 * PERF_RECORD_{!SAMPLE} events don't honour
290 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
291 */
292 if (total_events == before && false)
293 poll(evlist->pollfd, evlist->nr_fds, -1);
294
295 sleep(1);
296 if (++wakeups > 5) {
297 pr_debug("No PERF_RECORD_EXIT event!\n");
298 break;
299 }
300 }
301
302found_exit:
303 if (nr_events[PERF_RECORD_COMM] > 1) {
304 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
305 ++errs;
306 }
307
308 if (nr_events[PERF_RECORD_COMM] == 0) {
309 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
310 ++errs;
311 }
312
313 if (!found_cmd_mmap) {
314 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
315 ++errs;
316 }
317
318 if (!found_libc_mmap) {
319 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
320 ++errs;
321 }
322
323 if (!found_ld_mmap) {
324 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
325 ++errs;
326 }
327
328 if (!found_vdso_mmap) {
329 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
330 ++errs;
331 }
332out_err:
333 perf_evlist__munmap(evlist);
334out_delete_evlist:
335 perf_evlist__delete(evlist);
336out:
337 return (err < 0 || errs > 0) ? -1 : 0;
338}
339
340
341#if defined(__x86_64__) || defined(__i386__) 34#if defined(__x86_64__) || defined(__i386__)
342 35
343#define barrier() asm volatile("" ::: "memory") 36#define barrier() asm volatile("" ::: "memory")