diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-06-20 07:41:42 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-06-20 07:41:53 -0400 |
commit | 32c46e579b68c7ac0cd19d0803898a841d99833d (patch) | |
tree | 64a15c3b6eca5b302ef5aff0e045f52035c33eb7 /tools/perf/util/parse-events.c | |
parent | 2992c542fcd40777ed253f57362c65711fb8acaf (diff) | |
parent | c0a58fb2bdf033df433cad9009c7dac4c6b872b0 (diff) |
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf improvements from Arnaldo Carvalho de Melo:
* Replace event_name with perf_evsel__name, that handles the event
modifiers and doesn't use static variables.
* GTK browser improvements, from Namhyung Kim
* Fix possible NULL pointer deref in the TUI annotate browser, from
Samuel Liao
* Add sort by source file:line number, using addr2line.
* Allow printing histogram text snapshots at any point in top/report.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/perf/util/parse-events.c')
-rw-r--r-- | tools/perf/util/parse-events.c | 203 |
1 files changed, 17 insertions, 186 deletions
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 3339424cc421..0cc27da30ddb 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -64,63 +64,6 @@ static struct event_symbol event_symbols[] = { | |||
64 | #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) | 64 | #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) |
65 | #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) | 65 | #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) |
66 | 66 | ||
67 | static const char *sw_event_names[PERF_COUNT_SW_MAX] = { | ||
68 | "cpu-clock", | ||
69 | "task-clock", | ||
70 | "page-faults", | ||
71 | "context-switches", | ||
72 | "CPU-migrations", | ||
73 | "minor-faults", | ||
74 | "major-faults", | ||
75 | "alignment-faults", | ||
76 | "emulation-faults", | ||
77 | }; | ||
78 | |||
79 | #define MAX_ALIASES 8 | ||
80 | |||
81 | static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = { | ||
82 | { "L1-dcache", "l1-d", "l1d", "L1-data", }, | ||
83 | { "L1-icache", "l1-i", "l1i", "L1-instruction", }, | ||
84 | { "LLC", "L2", }, | ||
85 | { "dTLB", "d-tlb", "Data-TLB", }, | ||
86 | { "iTLB", "i-tlb", "Instruction-TLB", }, | ||
87 | { "branch", "branches", "bpu", "btb", "bpc", }, | ||
88 | { "node", }, | ||
89 | }; | ||
90 | |||
91 | static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = { | ||
92 | { "load", "loads", "read", }, | ||
93 | { "store", "stores", "write", }, | ||
94 | { "prefetch", "prefetches", "speculative-read", "speculative-load", }, | ||
95 | }; | ||
96 | |||
97 | static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] | ||
98 | [MAX_ALIASES] = { | ||
99 | { "refs", "Reference", "ops", "access", }, | ||
100 | { "misses", "miss", }, | ||
101 | }; | ||
102 | |||
103 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
104 | #define CACHE_READ (1 << C(OP_READ)) | ||
105 | #define CACHE_WRITE (1 << C(OP_WRITE)) | ||
106 | #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) | ||
107 | #define COP(x) (1 << x) | ||
108 | |||
109 | /* | ||
110 | * cache operartion stat | ||
111 | * L1I : Read and prefetch only | ||
112 | * ITLB and BPU : Read-only | ||
113 | */ | ||
114 | static unsigned long hw_cache_stat[C(MAX)] = { | ||
115 | [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | ||
116 | [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), | ||
117 | [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | ||
118 | [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | ||
119 | [C(ITLB)] = (CACHE_READ), | ||
120 | [C(BPU)] = (CACHE_READ), | ||
121 | [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | ||
122 | }; | ||
123 | |||
124 | #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ | 67 | #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ |
125 | while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ | 68 | while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ |
126 | if (sys_dirent.d_type == DT_DIR && \ | 69 | if (sys_dirent.d_type == DT_DIR && \ |
@@ -220,48 +163,6 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) | |||
220 | return NULL; | 163 | return NULL; |
221 | } | 164 | } |
222 | 165 | ||
223 | #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1) | ||
224 | static const char *tracepoint_id_to_name(u64 config) | ||
225 | { | ||
226 | static char buf[TP_PATH_LEN]; | ||
227 | struct tracepoint_path *path; | ||
228 | |||
229 | path = tracepoint_id_to_path(config); | ||
230 | if (path) { | ||
231 | snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name); | ||
232 | free(path->name); | ||
233 | free(path->system); | ||
234 | free(path); | ||
235 | } else | ||
236 | snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown"); | ||
237 | |||
238 | return buf; | ||
239 | } | ||
240 | |||
241 | static int is_cache_op_valid(u8 cache_type, u8 cache_op) | ||
242 | { | ||
243 | if (hw_cache_stat[cache_type] & COP(cache_op)) | ||
244 | return 1; /* valid */ | ||
245 | else | ||
246 | return 0; /* invalid */ | ||
247 | } | ||
248 | |||
249 | static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) | ||
250 | { | ||
251 | static char name[50]; | ||
252 | |||
253 | if (cache_result) { | ||
254 | sprintf(name, "%s-%s-%s", hw_cache[cache_type][0], | ||
255 | hw_cache_op[cache_op][0], | ||
256 | hw_cache_result[cache_result][0]); | ||
257 | } else { | ||
258 | sprintf(name, "%s-%s", hw_cache[cache_type][0], | ||
259 | hw_cache_op[cache_op][1]); | ||
260 | } | ||
261 | |||
262 | return name; | ||
263 | } | ||
264 | |||
265 | const char *event_type(int type) | 166 | const char *event_type(int type) |
266 | { | 167 | { |
267 | switch (type) { | 168 | switch (type) { |
@@ -284,76 +185,6 @@ const char *event_type(int type) | |||
284 | return "unknown"; | 185 | return "unknown"; |
285 | } | 186 | } |
286 | 187 | ||
287 | const char *event_name(struct perf_evsel *evsel) | ||
288 | { | ||
289 | u64 config = evsel->attr.config; | ||
290 | int type = evsel->attr.type; | ||
291 | |||
292 | if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) { | ||
293 | /* | ||
294 | * XXX minimal fix, see comment on perf_evsen__name, this static buffer | ||
295 | * will go away together with event_name in the next devel cycle. | ||
296 | */ | ||
297 | static char bf[128]; | ||
298 | perf_evsel__name(evsel, bf, sizeof(bf)); | ||
299 | return bf; | ||
300 | } | ||
301 | |||
302 | if (evsel->name) | ||
303 | return evsel->name; | ||
304 | |||
305 | return __event_name(type, config); | ||
306 | } | ||
307 | |||
308 | const char *__event_name(int type, u64 config) | ||
309 | { | ||
310 | static char buf[32]; | ||
311 | |||
312 | if (type == PERF_TYPE_RAW) { | ||
313 | sprintf(buf, "raw 0x%" PRIx64, config); | ||
314 | return buf; | ||
315 | } | ||
316 | |||
317 | switch (type) { | ||
318 | case PERF_TYPE_HARDWARE: | ||
319 | return __perf_evsel__hw_name(config); | ||
320 | |||
321 | case PERF_TYPE_HW_CACHE: { | ||
322 | u8 cache_type, cache_op, cache_result; | ||
323 | |||
324 | cache_type = (config >> 0) & 0xff; | ||
325 | if (cache_type > PERF_COUNT_HW_CACHE_MAX) | ||
326 | return "unknown-ext-hardware-cache-type"; | ||
327 | |||
328 | cache_op = (config >> 8) & 0xff; | ||
329 | if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX) | ||
330 | return "unknown-ext-hardware-cache-op"; | ||
331 | |||
332 | cache_result = (config >> 16) & 0xff; | ||
333 | if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
334 | return "unknown-ext-hardware-cache-result"; | ||
335 | |||
336 | if (!is_cache_op_valid(cache_type, cache_op)) | ||
337 | return "invalid-cache"; | ||
338 | |||
339 | return event_cache_name(cache_type, cache_op, cache_result); | ||
340 | } | ||
341 | |||
342 | case PERF_TYPE_SOFTWARE: | ||
343 | if (config < PERF_COUNT_SW_MAX && sw_event_names[config]) | ||
344 | return sw_event_names[config]; | ||
345 | return "unknown-software"; | ||
346 | |||
347 | case PERF_TYPE_TRACEPOINT: | ||
348 | return tracepoint_id_to_name(config); | ||
349 | |||
350 | default: | ||
351 | break; | ||
352 | } | ||
353 | |||
354 | return "unknown"; | ||
355 | } | ||
356 | |||
357 | static int add_event(struct list_head **_list, int *idx, | 188 | static int add_event(struct list_head **_list, int *idx, |
358 | struct perf_event_attr *attr, char *name) | 189 | struct perf_event_attr *attr, char *name) |
359 | { | 190 | { |
@@ -375,19 +206,20 @@ static int add_event(struct list_head **_list, int *idx, | |||
375 | return -ENOMEM; | 206 | return -ENOMEM; |
376 | } | 207 | } |
377 | 208 | ||
378 | evsel->name = strdup(name); | 209 | if (name) |
210 | evsel->name = strdup(name); | ||
379 | list_add_tail(&evsel->node, list); | 211 | list_add_tail(&evsel->node, list); |
380 | *_list = list; | 212 | *_list = list; |
381 | return 0; | 213 | return 0; |
382 | } | 214 | } |
383 | 215 | ||
384 | static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size) | 216 | static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) |
385 | { | 217 | { |
386 | int i, j; | 218 | int i, j; |
387 | int n, longest = -1; | 219 | int n, longest = -1; |
388 | 220 | ||
389 | for (i = 0; i < size; i++) { | 221 | for (i = 0; i < size; i++) { |
390 | for (j = 0; j < MAX_ALIASES && names[i][j]; j++) { | 222 | for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) { |
391 | n = strlen(names[i][j]); | 223 | n = strlen(names[i][j]); |
392 | if (n > longest && !strncasecmp(str, names[i][j], n)) | 224 | if (n > longest && !strncasecmp(str, names[i][j], n)) |
393 | longest = n; | 225 | longest = n; |
@@ -412,7 +244,7 @@ int parse_events_add_cache(struct list_head **list, int *idx, | |||
412 | * No fallback - if we cannot get a clear cache type | 244 | * No fallback - if we cannot get a clear cache type |
413 | * then bail out: | 245 | * then bail out: |
414 | */ | 246 | */ |
415 | cache_type = parse_aliases(type, hw_cache, | 247 | cache_type = parse_aliases(type, perf_evsel__hw_cache, |
416 | PERF_COUNT_HW_CACHE_MAX); | 248 | PERF_COUNT_HW_CACHE_MAX); |
417 | if (cache_type == -1) | 249 | if (cache_type == -1) |
418 | return -EINVAL; | 250 | return -EINVAL; |
@@ -425,18 +257,18 @@ int parse_events_add_cache(struct list_head **list, int *idx, | |||
425 | snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str); | 257 | snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str); |
426 | 258 | ||
427 | if (cache_op == -1) { | 259 | if (cache_op == -1) { |
428 | cache_op = parse_aliases(str, hw_cache_op, | 260 | cache_op = parse_aliases(str, perf_evsel__hw_cache_op, |
429 | PERF_COUNT_HW_CACHE_OP_MAX); | 261 | PERF_COUNT_HW_CACHE_OP_MAX); |
430 | if (cache_op >= 0) { | 262 | if (cache_op >= 0) { |
431 | if (!is_cache_op_valid(cache_type, cache_op)) | 263 | if (!perf_evsel__is_cache_op_valid(cache_type, cache_op)) |
432 | return -EINVAL; | 264 | return -EINVAL; |
433 | continue; | 265 | continue; |
434 | } | 266 | } |
435 | } | 267 | } |
436 | 268 | ||
437 | if (cache_result == -1) { | 269 | if (cache_result == -1) { |
438 | cache_result = parse_aliases(str, hw_cache_result, | 270 | cache_result = parse_aliases(str, perf_evsel__hw_cache_result, |
439 | PERF_COUNT_HW_CACHE_RESULT_MAX); | 271 | PERF_COUNT_HW_CACHE_RESULT_MAX); |
440 | if (cache_result >= 0) | 272 | if (cache_result >= 0) |
441 | continue; | 273 | continue; |
442 | } | 274 | } |
@@ -668,8 +500,7 @@ int parse_events_add_numeric(struct list_head **list, int *idx, | |||
668 | config_attr(&attr, head_config, 1)) | 500 | config_attr(&attr, head_config, 1)) |
669 | return -EINVAL; | 501 | return -EINVAL; |
670 | 502 | ||
671 | return add_event(list, idx, &attr, | 503 | return add_event(list, idx, &attr, NULL); |
672 | (char *) __event_name(type, config)); | ||
673 | } | 504 | } |
674 | 505 | ||
675 | static int parse_events__is_name_term(struct parse_events__term *term) | 506 | static int parse_events__is_name_term(struct parse_events__term *term) |
@@ -677,8 +508,7 @@ static int parse_events__is_name_term(struct parse_events__term *term) | |||
677 | return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; | 508 | return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; |
678 | } | 509 | } |
679 | 510 | ||
680 | static char *pmu_event_name(struct perf_event_attr *attr, | 511 | static char *pmu_event_name(struct list_head *head_terms) |
681 | struct list_head *head_terms) | ||
682 | { | 512 | { |
683 | struct parse_events__term *term; | 513 | struct parse_events__term *term; |
684 | 514 | ||
@@ -686,7 +516,7 @@ static char *pmu_event_name(struct perf_event_attr *attr, | |||
686 | if (parse_events__is_name_term(term)) | 516 | if (parse_events__is_name_term(term)) |
687 | return term->val.str; | 517 | return term->val.str; |
688 | 518 | ||
689 | return (char *) __event_name(PERF_TYPE_RAW, attr->config); | 519 | return NULL; |
690 | } | 520 | } |
691 | 521 | ||
692 | int parse_events_add_pmu(struct list_head **list, int *idx, | 522 | int parse_events_add_pmu(struct list_head **list, int *idx, |
@@ -714,7 +544,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx, | |||
714 | return -EINVAL; | 544 | return -EINVAL; |
715 | 545 | ||
716 | return add_event(list, idx, &attr, | 546 | return add_event(list, idx, &attr, |
717 | pmu_event_name(&attr, head_config)); | 547 | pmu_event_name(head_config)); |
718 | } | 548 | } |
719 | 549 | ||
720 | void parse_events_update_lists(struct list_head *list_event, | 550 | void parse_events_update_lists(struct list_head *list_event, |
@@ -1010,16 +840,17 @@ void print_events_type(u8 type) | |||
1010 | int print_hwcache_events(const char *event_glob) | 840 | int print_hwcache_events(const char *event_glob) |
1011 | { | 841 | { |
1012 | unsigned int type, op, i, printed = 0; | 842 | unsigned int type, op, i, printed = 0; |
843 | char name[64]; | ||
1013 | 844 | ||
1014 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | 845 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { |
1015 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | 846 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { |
1016 | /* skip invalid cache type */ | 847 | /* skip invalid cache type */ |
1017 | if (!is_cache_op_valid(type, op)) | 848 | if (!perf_evsel__is_cache_op_valid(type, op)) |
1018 | continue; | 849 | continue; |
1019 | 850 | ||
1020 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | 851 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { |
1021 | char *name = event_cache_name(type, op, i); | 852 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, |
1022 | 853 | name, sizeof(name)); | |
1023 | if (event_glob != NULL && !strglobmatch(name, event_glob)) | 854 | if (event_glob != NULL && !strglobmatch(name, event_glob)) |
1024 | continue; | 855 | continue; |
1025 | 856 | ||