diff options
Diffstat (limited to 'tools/perf/builtin-top.c')
-rw-r--r-- | tools/perf/builtin-top.c | 736 |
1 files changed, 736 insertions, 0 deletions
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c new file mode 100644 index 00000000000..5352b5e352e --- /dev/null +++ b/tools/perf/builtin-top.c | |||
@@ -0,0 +1,736 @@ | |||
1 | /* | ||
2 | * builtin-top.c | ||
3 | * | ||
4 | * Builtin top command: Display a continuously updated profile of | ||
5 | * any workload, CPU or specific PID. | ||
6 | * | ||
7 | * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com> | ||
8 | * | ||
9 | * Improvements and fixes by: | ||
10 | * | ||
11 | * Arjan van de Ven <arjan@linux.intel.com> | ||
12 | * Yanmin Zhang <yanmin.zhang@intel.com> | ||
13 | * Wu Fengguang <fengguang.wu@intel.com> | ||
14 | * Mike Galbraith <efault@gmx.de> | ||
15 | * Paul Mackerras <paulus@samba.org> | ||
16 | * | ||
17 | * Released under the GPL v2. (and only v2, not any later version) | ||
18 | */ | ||
19 | #include "builtin.h" | ||
20 | |||
21 | #include "perf.h" | ||
22 | |||
23 | #include "util/symbol.h" | ||
24 | #include "util/color.h" | ||
25 | #include "util/util.h" | ||
26 | #include "util/rbtree.h" | ||
27 | #include "util/parse-options.h" | ||
28 | #include "util/parse-events.h" | ||
29 | |||
30 | #include <assert.h> | ||
31 | #include <fcntl.h> | ||
32 | |||
33 | #include <stdio.h> | ||
34 | |||
35 | #include <errno.h> | ||
36 | #include <time.h> | ||
37 | #include <sched.h> | ||
38 | #include <pthread.h> | ||
39 | |||
40 | #include <sys/syscall.h> | ||
41 | #include <sys/ioctl.h> | ||
42 | #include <sys/poll.h> | ||
43 | #include <sys/prctl.h> | ||
44 | #include <sys/wait.h> | ||
45 | #include <sys/uio.h> | ||
46 | #include <sys/mman.h> | ||
47 | |||
48 | #include <linux/unistd.h> | ||
49 | #include <linux/types.h> | ||
50 | |||
51 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | ||
52 | |||
53 | static int system_wide = 0; | ||
54 | |||
55 | static int default_interval = 100000; | ||
56 | |||
57 | static u64 count_filter = 5; | ||
58 | static int print_entries = 15; | ||
59 | |||
60 | static int target_pid = -1; | ||
61 | static int profile_cpu = -1; | ||
62 | static int nr_cpus = 0; | ||
63 | static unsigned int realtime_prio = 0; | ||
64 | static int group = 0; | ||
65 | static unsigned int page_size; | ||
66 | static unsigned int mmap_pages = 16; | ||
67 | static int freq = 0; | ||
68 | static int verbose = 0; | ||
69 | |||
70 | static char *sym_filter; | ||
71 | static unsigned long filter_start; | ||
72 | static unsigned long filter_end; | ||
73 | |||
74 | static int delay_secs = 2; | ||
75 | static int zero; | ||
76 | static int dump_symtab; | ||
77 | |||
78 | /* | ||
79 | * Symbols | ||
80 | */ | ||
81 | |||
82 | static u64 min_ip; | ||
83 | static u64 max_ip = -1ll; | ||
84 | |||
85 | struct sym_entry { | ||
86 | struct rb_node rb_node; | ||
87 | struct list_head node; | ||
88 | unsigned long count[MAX_COUNTERS]; | ||
89 | unsigned long snap_count; | ||
90 | double weight; | ||
91 | int skip; | ||
92 | }; | ||
93 | |||
94 | struct sym_entry *sym_filter_entry; | ||
95 | |||
96 | struct dso *kernel_dso; | ||
97 | |||
98 | /* | ||
99 | * Symbols will be added here in record_ip and will get out | ||
100 | * after decayed. | ||
101 | */ | ||
102 | static LIST_HEAD(active_symbols); | ||
103 | static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER; | ||
104 | |||
105 | /* | ||
106 | * Ordering weight: count-1 * count-2 * ... / count-n | ||
107 | */ | ||
108 | static double sym_weight(const struct sym_entry *sym) | ||
109 | { | ||
110 | double weight = sym->snap_count; | ||
111 | int counter; | ||
112 | |||
113 | for (counter = 1; counter < nr_counters-1; counter++) | ||
114 | weight *= sym->count[counter]; | ||
115 | |||
116 | weight /= (sym->count[counter] + 1); | ||
117 | |||
118 | return weight; | ||
119 | } | ||
120 | |||
121 | static long samples; | ||
122 | static long userspace_samples; | ||
123 | static const char CONSOLE_CLEAR[] = "[H[2J"; | ||
124 | |||
125 | static void __list_insert_active_sym(struct sym_entry *syme) | ||
126 | { | ||
127 | list_add(&syme->node, &active_symbols); | ||
128 | } | ||
129 | |||
130 | static void list_remove_active_sym(struct sym_entry *syme) | ||
131 | { | ||
132 | pthread_mutex_lock(&active_symbols_lock); | ||
133 | list_del_init(&syme->node); | ||
134 | pthread_mutex_unlock(&active_symbols_lock); | ||
135 | } | ||
136 | |||
137 | static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) | ||
138 | { | ||
139 | struct rb_node **p = &tree->rb_node; | ||
140 | struct rb_node *parent = NULL; | ||
141 | struct sym_entry *iter; | ||
142 | |||
143 | while (*p != NULL) { | ||
144 | parent = *p; | ||
145 | iter = rb_entry(parent, struct sym_entry, rb_node); | ||
146 | |||
147 | if (se->weight > iter->weight) | ||
148 | p = &(*p)->rb_left; | ||
149 | else | ||
150 | p = &(*p)->rb_right; | ||
151 | } | ||
152 | |||
153 | rb_link_node(&se->rb_node, parent, p); | ||
154 | rb_insert_color(&se->rb_node, tree); | ||
155 | } | ||
156 | |||
157 | static void print_sym_table(void) | ||
158 | { | ||
159 | int printed = 0, j; | ||
160 | int counter; | ||
161 | float samples_per_sec = samples/delay_secs; | ||
162 | float ksamples_per_sec = (samples-userspace_samples)/delay_secs; | ||
163 | float sum_ksamples = 0.0; | ||
164 | struct sym_entry *syme, *n; | ||
165 | struct rb_root tmp = RB_ROOT; | ||
166 | struct rb_node *nd; | ||
167 | |||
168 | samples = userspace_samples = 0; | ||
169 | |||
170 | /* Sort the active symbols */ | ||
171 | pthread_mutex_lock(&active_symbols_lock); | ||
172 | syme = list_entry(active_symbols.next, struct sym_entry, node); | ||
173 | pthread_mutex_unlock(&active_symbols_lock); | ||
174 | |||
175 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { | ||
176 | syme->snap_count = syme->count[0]; | ||
177 | if (syme->snap_count != 0) { | ||
178 | syme->weight = sym_weight(syme); | ||
179 | rb_insert_active_sym(&tmp, syme); | ||
180 | sum_ksamples += syme->snap_count; | ||
181 | |||
182 | for (j = 0; j < nr_counters; j++) | ||
183 | syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8; | ||
184 | } else | ||
185 | list_remove_active_sym(syme); | ||
186 | } | ||
187 | |||
188 | puts(CONSOLE_CLEAR); | ||
189 | |||
190 | printf( | ||
191 | "------------------------------------------------------------------------------\n"); | ||
192 | printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", | ||
193 | samples_per_sec, | ||
194 | 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); | ||
195 | |||
196 | if (nr_counters == 1) { | ||
197 | printf("%Ld", (u64)attrs[0].sample_period); | ||
198 | if (freq) | ||
199 | printf("Hz "); | ||
200 | else | ||
201 | printf(" "); | ||
202 | } | ||
203 | |||
204 | for (counter = 0; counter < nr_counters; counter++) { | ||
205 | if (counter) | ||
206 | printf("/"); | ||
207 | |||
208 | printf("%s", event_name(counter)); | ||
209 | } | ||
210 | |||
211 | printf( "], "); | ||
212 | |||
213 | if (target_pid != -1) | ||
214 | printf(" (target_pid: %d", target_pid); | ||
215 | else | ||
216 | printf(" (all"); | ||
217 | |||
218 | if (profile_cpu != -1) | ||
219 | printf(", cpu: %d)\n", profile_cpu); | ||
220 | else { | ||
221 | if (target_pid != -1) | ||
222 | printf(")\n"); | ||
223 | else | ||
224 | printf(", %d CPUs)\n", nr_cpus); | ||
225 | } | ||
226 | |||
227 | printf("------------------------------------------------------------------------------\n\n"); | ||
228 | |||
229 | if (nr_counters == 1) | ||
230 | printf(" samples pcnt"); | ||
231 | else | ||
232 | printf(" weight samples pcnt"); | ||
233 | |||
234 | printf(" RIP kernel function\n" | ||
235 | " ______ _______ _____ ________________ _______________\n\n" | ||
236 | ); | ||
237 | |||
238 | for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { | ||
239 | struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); | ||
240 | struct symbol *sym = (struct symbol *)(syme + 1); | ||
241 | char *color = PERF_COLOR_NORMAL; | ||
242 | double pcnt; | ||
243 | |||
244 | if (++printed > print_entries || syme->snap_count < count_filter) | ||
245 | continue; | ||
246 | |||
247 | pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) / | ||
248 | sum_ksamples)); | ||
249 | |||
250 | /* | ||
251 | * We color high-overhead entries in red, mid-overhead | ||
252 | * entries in green - and keep the low overhead places | ||
253 | * normal: | ||
254 | */ | ||
255 | if (pcnt >= 5.0) { | ||
256 | color = PERF_COLOR_RED; | ||
257 | } else { | ||
258 | if (pcnt >= 0.5) | ||
259 | color = PERF_COLOR_GREEN; | ||
260 | } | ||
261 | |||
262 | if (nr_counters == 1) | ||
263 | printf("%20.2f - ", syme->weight); | ||
264 | else | ||
265 | printf("%9.1f %10ld - ", syme->weight, syme->snap_count); | ||
266 | |||
267 | color_fprintf(stdout, color, "%4.1f%%", pcnt); | ||
268 | printf(" - %016llx : %s\n", sym->start, sym->name); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | static void *display_thread(void *arg) | ||
273 | { | ||
274 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; | ||
275 | int delay_msecs = delay_secs * 1000; | ||
276 | |||
277 | printf("PerfTop refresh period: %d seconds\n", delay_secs); | ||
278 | |||
279 | do { | ||
280 | print_sym_table(); | ||
281 | } while (!poll(&stdin_poll, 1, delay_msecs) == 1); | ||
282 | |||
283 | printf("key pressed - exiting.\n"); | ||
284 | exit(0); | ||
285 | |||
286 | return NULL; | ||
287 | } | ||
288 | |||
289 | static int symbol_filter(struct dso *self, struct symbol *sym) | ||
290 | { | ||
291 | static int filter_match; | ||
292 | struct sym_entry *syme; | ||
293 | const char *name = sym->name; | ||
294 | |||
295 | if (!strcmp(name, "_text") || | ||
296 | !strcmp(name, "_etext") || | ||
297 | !strcmp(name, "_sinittext") || | ||
298 | !strncmp("init_module", name, 11) || | ||
299 | !strncmp("cleanup_module", name, 14) || | ||
300 | strstr(name, "_text_start") || | ||
301 | strstr(name, "_text_end")) | ||
302 | return 1; | ||
303 | |||
304 | syme = dso__sym_priv(self, sym); | ||
305 | /* Tag samples to be skipped. */ | ||
306 | if (!strcmp("default_idle", name) || | ||
307 | !strcmp("cpu_idle", name) || | ||
308 | !strcmp("enter_idle", name) || | ||
309 | !strcmp("exit_idle", name) || | ||
310 | !strcmp("mwait_idle", name)) | ||
311 | syme->skip = 1; | ||
312 | |||
313 | if (filter_match == 1) { | ||
314 | filter_end = sym->start; | ||
315 | filter_match = -1; | ||
316 | if (filter_end - filter_start > 10000) { | ||
317 | fprintf(stderr, | ||
318 | "hm, too large filter symbol <%s> - skipping.\n", | ||
319 | sym_filter); | ||
320 | fprintf(stderr, "symbol filter start: %016lx\n", | ||
321 | filter_start); | ||
322 | fprintf(stderr, " end: %016lx\n", | ||
323 | filter_end); | ||
324 | filter_end = filter_start = 0; | ||
325 | sym_filter = NULL; | ||
326 | sleep(1); | ||
327 | } | ||
328 | } | ||
329 | |||
330 | if (filter_match == 0 && sym_filter && !strcmp(name, sym_filter)) { | ||
331 | filter_match = 1; | ||
332 | filter_start = sym->start; | ||
333 | } | ||
334 | |||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static int parse_symbols(void) | ||
340 | { | ||
341 | struct rb_node *node; | ||
342 | struct symbol *sym; | ||
343 | |||
344 | kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); | ||
345 | if (kernel_dso == NULL) | ||
346 | return -1; | ||
347 | |||
348 | if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0) | ||
349 | goto out_delete_dso; | ||
350 | |||
351 | node = rb_first(&kernel_dso->syms); | ||
352 | sym = rb_entry(node, struct symbol, rb_node); | ||
353 | min_ip = sym->start; | ||
354 | |||
355 | node = rb_last(&kernel_dso->syms); | ||
356 | sym = rb_entry(node, struct symbol, rb_node); | ||
357 | max_ip = sym->end; | ||
358 | |||
359 | if (dump_symtab) | ||
360 | dso__fprintf(kernel_dso, stderr); | ||
361 | |||
362 | return 0; | ||
363 | |||
364 | out_delete_dso: | ||
365 | dso__delete(kernel_dso); | ||
366 | kernel_dso = NULL; | ||
367 | return -1; | ||
368 | } | ||
369 | |||
370 | #define TRACE_COUNT 3 | ||
371 | |||
372 | /* | ||
373 | * Binary search in the histogram table and record the hit: | ||
374 | */ | ||
375 | static void record_ip(u64 ip, int counter) | ||
376 | { | ||
377 | struct symbol *sym = dso__find_symbol(kernel_dso, ip); | ||
378 | |||
379 | if (sym != NULL) { | ||
380 | struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); | ||
381 | |||
382 | if (!syme->skip) { | ||
383 | syme->count[counter]++; | ||
384 | pthread_mutex_lock(&active_symbols_lock); | ||
385 | if (list_empty(&syme->node) || !syme->node.next) | ||
386 | __list_insert_active_sym(syme); | ||
387 | pthread_mutex_unlock(&active_symbols_lock); | ||
388 | return; | ||
389 | } | ||
390 | } | ||
391 | |||
392 | samples--; | ||
393 | } | ||
394 | |||
395 | static void process_event(u64 ip, int counter) | ||
396 | { | ||
397 | samples++; | ||
398 | |||
399 | if (ip < min_ip || ip > max_ip) { | ||
400 | userspace_samples++; | ||
401 | return; | ||
402 | } | ||
403 | |||
404 | record_ip(ip, counter); | ||
405 | } | ||
406 | |||
407 | struct mmap_data { | ||
408 | int counter; | ||
409 | void *base; | ||
410 | unsigned int mask; | ||
411 | unsigned int prev; | ||
412 | }; | ||
413 | |||
414 | static unsigned int mmap_read_head(struct mmap_data *md) | ||
415 | { | ||
416 | struct perf_counter_mmap_page *pc = md->base; | ||
417 | int head; | ||
418 | |||
419 | head = pc->data_head; | ||
420 | rmb(); | ||
421 | |||
422 | return head; | ||
423 | } | ||
424 | |||
425 | struct timeval last_read, this_read; | ||
426 | |||
427 | static void mmap_read_counter(struct mmap_data *md) | ||
428 | { | ||
429 | unsigned int head = mmap_read_head(md); | ||
430 | unsigned int old = md->prev; | ||
431 | unsigned char *data = md->base + page_size; | ||
432 | int diff; | ||
433 | |||
434 | gettimeofday(&this_read, NULL); | ||
435 | |||
436 | /* | ||
437 | * If we're further behind than half the buffer, there's a chance | ||
438 | * the writer will bite our tail and mess up the samples under us. | ||
439 | * | ||
440 | * If we somehow ended up ahead of the head, we got messed up. | ||
441 | * | ||
442 | * In either case, truncate and restart at head. | ||
443 | */ | ||
444 | diff = head - old; | ||
445 | if (diff > md->mask / 2 || diff < 0) { | ||
446 | struct timeval iv; | ||
447 | unsigned long msecs; | ||
448 | |||
449 | timersub(&this_read, &last_read, &iv); | ||
450 | msecs = iv.tv_sec*1000 + iv.tv_usec/1000; | ||
451 | |||
452 | fprintf(stderr, "WARNING: failed to keep up with mmap data." | ||
453 | " Last read %lu msecs ago.\n", msecs); | ||
454 | |||
455 | /* | ||
456 | * head points to a known good entry, start there. | ||
457 | */ | ||
458 | old = head; | ||
459 | } | ||
460 | |||
461 | last_read = this_read; | ||
462 | |||
463 | for (; old != head;) { | ||
464 | struct ip_event { | ||
465 | struct perf_event_header header; | ||
466 | u64 ip; | ||
467 | u32 pid, target_pid; | ||
468 | }; | ||
469 | struct mmap_event { | ||
470 | struct perf_event_header header; | ||
471 | u32 pid, target_pid; | ||
472 | u64 start; | ||
473 | u64 len; | ||
474 | u64 pgoff; | ||
475 | char filename[PATH_MAX]; | ||
476 | }; | ||
477 | |||
478 | typedef union event_union { | ||
479 | struct perf_event_header header; | ||
480 | struct ip_event ip; | ||
481 | struct mmap_event mmap; | ||
482 | } event_t; | ||
483 | |||
484 | event_t *event = (event_t *)&data[old & md->mask]; | ||
485 | |||
486 | event_t event_copy; | ||
487 | |||
488 | size_t size = event->header.size; | ||
489 | |||
490 | /* | ||
491 | * Event straddles the mmap boundary -- header should always | ||
492 | * be inside due to u64 alignment of output. | ||
493 | */ | ||
494 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | ||
495 | unsigned int offset = old; | ||
496 | unsigned int len = min(sizeof(*event), size), cpy; | ||
497 | void *dst = &event_copy; | ||
498 | |||
499 | do { | ||
500 | cpy = min(md->mask + 1 - (offset & md->mask), len); | ||
501 | memcpy(dst, &data[offset & md->mask], cpy); | ||
502 | offset += cpy; | ||
503 | dst += cpy; | ||
504 | len -= cpy; | ||
505 | } while (len); | ||
506 | |||
507 | event = &event_copy; | ||
508 | } | ||
509 | |||
510 | old += size; | ||
511 | |||
512 | if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) { | ||
513 | if (event->header.type & PERF_SAMPLE_IP) | ||
514 | process_event(event->ip.ip, md->counter); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | md->prev = old; | ||
519 | } | ||
520 | |||
521 | static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; | ||
522 | static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; | ||
523 | |||
524 | static void mmap_read(void) | ||
525 | { | ||
526 | int i, counter; | ||
527 | |||
528 | for (i = 0; i < nr_cpus; i++) { | ||
529 | for (counter = 0; counter < nr_counters; counter++) | ||
530 | mmap_read_counter(&mmap_array[i][counter]); | ||
531 | } | ||
532 | } | ||
533 | |||
534 | int nr_poll; | ||
535 | int group_fd; | ||
536 | |||
537 | static void start_counter(int i, int counter) | ||
538 | { | ||
539 | struct perf_counter_attr *attr; | ||
540 | unsigned int cpu; | ||
541 | |||
542 | cpu = profile_cpu; | ||
543 | if (target_pid == -1 && profile_cpu == -1) | ||
544 | cpu = i; | ||
545 | |||
546 | attr = attrs + counter; | ||
547 | |||
548 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | ||
549 | attr->freq = freq; | ||
550 | |||
551 | try_again: | ||
552 | fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); | ||
553 | |||
554 | if (fd[i][counter] < 0) { | ||
555 | int err = errno; | ||
556 | |||
557 | if (err == EPERM) | ||
558 | die("No permission - are you root?\n"); | ||
559 | /* | ||
560 | * If it's cycles then fall back to hrtimer | ||
561 | * based cpu-clock-tick sw counter, which | ||
562 | * is always available even if no PMU support: | ||
563 | */ | ||
564 | if (attr->type == PERF_TYPE_HARDWARE | ||
565 | && attr->config == PERF_COUNT_HW_CPU_CYCLES) { | ||
566 | |||
567 | if (verbose) | ||
568 | warning(" ... trying to fall back to cpu-clock-ticks\n"); | ||
569 | |||
570 | attr->type = PERF_TYPE_SOFTWARE; | ||
571 | attr->config = PERF_COUNT_SW_CPU_CLOCK; | ||
572 | goto try_again; | ||
573 | } | ||
574 | printf("\n"); | ||
575 | error("perfcounter syscall returned with %d (%s)\n", | ||
576 | fd[i][counter], strerror(err)); | ||
577 | die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); | ||
578 | exit(-1); | ||
579 | } | ||
580 | assert(fd[i][counter] >= 0); | ||
581 | fcntl(fd[i][counter], F_SETFL, O_NONBLOCK); | ||
582 | |||
583 | /* | ||
584 | * First counter acts as the group leader: | ||
585 | */ | ||
586 | if (group && group_fd == -1) | ||
587 | group_fd = fd[i][counter]; | ||
588 | |||
589 | event_array[nr_poll].fd = fd[i][counter]; | ||
590 | event_array[nr_poll].events = POLLIN; | ||
591 | nr_poll++; | ||
592 | |||
593 | mmap_array[i][counter].counter = counter; | ||
594 | mmap_array[i][counter].prev = 0; | ||
595 | mmap_array[i][counter].mask = mmap_pages*page_size - 1; | ||
596 | mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size, | ||
597 | PROT_READ, MAP_SHARED, fd[i][counter], 0); | ||
598 | if (mmap_array[i][counter].base == MAP_FAILED) | ||
599 | die("failed to mmap with %d (%s)\n", errno, strerror(errno)); | ||
600 | } | ||
601 | |||
602 | static int __cmd_top(void) | ||
603 | { | ||
604 | pthread_t thread; | ||
605 | int i, counter; | ||
606 | int ret; | ||
607 | |||
608 | for (i = 0; i < nr_cpus; i++) { | ||
609 | group_fd = -1; | ||
610 | for (counter = 0; counter < nr_counters; counter++) | ||
611 | start_counter(i, counter); | ||
612 | } | ||
613 | |||
614 | /* Wait for a minimal set of events before starting the snapshot */ | ||
615 | poll(event_array, nr_poll, 100); | ||
616 | |||
617 | mmap_read(); | ||
618 | |||
619 | if (pthread_create(&thread, NULL, display_thread, NULL)) { | ||
620 | printf("Could not create display thread.\n"); | ||
621 | exit(-1); | ||
622 | } | ||
623 | |||
624 | if (realtime_prio) { | ||
625 | struct sched_param param; | ||
626 | |||
627 | param.sched_priority = realtime_prio; | ||
628 | if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { | ||
629 | printf("Could not set realtime priority.\n"); | ||
630 | exit(-1); | ||
631 | } | ||
632 | } | ||
633 | |||
634 | while (1) { | ||
635 | int hits = samples; | ||
636 | |||
637 | mmap_read(); | ||
638 | |||
639 | if (hits == samples) | ||
640 | ret = poll(event_array, nr_poll, 100); | ||
641 | } | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static const char * const top_usage[] = { | ||
647 | "perf top [<options>]", | ||
648 | NULL | ||
649 | }; | ||
650 | |||
651 | static const struct option options[] = { | ||
652 | OPT_CALLBACK('e', "event", NULL, "event", | ||
653 | "event selector. use 'perf list' to list available events", | ||
654 | parse_events), | ||
655 | OPT_INTEGER('c', "count", &default_interval, | ||
656 | "event period to sample"), | ||
657 | OPT_INTEGER('p', "pid", &target_pid, | ||
658 | "profile events on existing pid"), | ||
659 | OPT_BOOLEAN('a', "all-cpus", &system_wide, | ||
660 | "system-wide collection from all CPUs"), | ||
661 | OPT_INTEGER('C', "CPU", &profile_cpu, | ||
662 | "CPU to profile on"), | ||
663 | OPT_INTEGER('m', "mmap-pages", &mmap_pages, | ||
664 | "number of mmap data pages"), | ||
665 | OPT_INTEGER('r', "realtime", &realtime_prio, | ||
666 | "collect data with this RT SCHED_FIFO priority"), | ||
667 | OPT_INTEGER('d', "delay", &delay_secs, | ||
668 | "number of seconds to delay between refreshes"), | ||
669 | OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, | ||
670 | "dump the symbol table used for profiling"), | ||
671 | OPT_INTEGER('f', "count-filter", &count_filter, | ||
672 | "only display functions with more events than this"), | ||
673 | OPT_BOOLEAN('g', "group", &group, | ||
674 | "put the counters into a counter group"), | ||
675 | OPT_STRING('s', "sym-filter", &sym_filter, "pattern", | ||
676 | "only display symbols matchig this pattern"), | ||
677 | OPT_BOOLEAN('z', "zero", &group, | ||
678 | "zero history across updates"), | ||
679 | OPT_INTEGER('F', "freq", &freq, | ||
680 | "profile at this frequency"), | ||
681 | OPT_INTEGER('E', "entries", &print_entries, | ||
682 | "display this many functions"), | ||
683 | OPT_BOOLEAN('v', "verbose", &verbose, | ||
684 | "be more verbose (show counter open errors, etc)"), | ||
685 | OPT_END() | ||
686 | }; | ||
687 | |||
688 | int cmd_top(int argc, const char **argv, const char *prefix) | ||
689 | { | ||
690 | int counter; | ||
691 | |||
692 | page_size = sysconf(_SC_PAGE_SIZE); | ||
693 | |||
694 | argc = parse_options(argc, argv, options, top_usage, 0); | ||
695 | if (argc) | ||
696 | usage_with_options(top_usage, options); | ||
697 | |||
698 | if (freq) { | ||
699 | default_interval = freq; | ||
700 | freq = 1; | ||
701 | } | ||
702 | |||
703 | /* CPU and PID are mutually exclusive */ | ||
704 | if (target_pid != -1 && profile_cpu != -1) { | ||
705 | printf("WARNING: PID switch overriding CPU\n"); | ||
706 | sleep(1); | ||
707 | profile_cpu = -1; | ||
708 | } | ||
709 | |||
710 | if (!nr_counters) | ||
711 | nr_counters = 1; | ||
712 | |||
713 | if (delay_secs < 1) | ||
714 | delay_secs = 1; | ||
715 | |||
716 | parse_symbols(); | ||
717 | |||
718 | /* | ||
719 | * Fill in the ones not specifically initialized via -c: | ||
720 | */ | ||
721 | for (counter = 0; counter < nr_counters; counter++) { | ||
722 | if (attrs[counter].sample_period) | ||
723 | continue; | ||
724 | |||
725 | attrs[counter].sample_period = default_interval; | ||
726 | } | ||
727 | |||
728 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | ||
729 | assert(nr_cpus <= MAX_NR_CPUS); | ||
730 | assert(nr_cpus >= 0); | ||
731 | |||
732 | if (target_pid != -1 || profile_cpu != -1) | ||
733 | nr_cpus = 1; | ||
734 | |||
735 | return __cmd_top(); | ||
736 | } | ||