diff options
Diffstat (limited to 'tools')
165 files changed, 12243 insertions, 4655 deletions
diff --git a/tools/hv/hv_get_dhcp_info.sh b/tools/hv/hv_get_dhcp_info.sh new file mode 100755 index 000000000000..ccd3e9532764 --- /dev/null +++ b/tools/hv/hv_get_dhcp_info.sh | |||
@@ -0,0 +1,28 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | # This example script retrieves the DHCP state of a given interface. | ||
4 | # In the interest of keeping the KVP daemon code free of distro specific | ||
5 | # information; the kvp daemon code invokes this external script to gather | ||
6 | # DHCP setting for the specific interface. | ||
7 | # | ||
8 | # Input: Name of the interface | ||
9 | # | ||
10 | # Output: The script prints the string "Enabled" to stdout to indicate | ||
11 | # that DHCP is enabled on the interface. If DHCP is not enabled, | ||
12 | # the script prints the string "Disabled" to stdout. | ||
13 | # | ||
14 | # Each Distro is expected to implement this script in a distro specific | ||
15 | # fashion. For instance on Distros that ship with Network Manager enabled, | ||
16 | # this script can be based on the Network Manager APIs for retrieving DHCP | ||
17 | # information. | ||
18 | |||
19 | if_file="/etc/sysconfig/network-scripts/ifcfg-"$1 | ||
20 | |||
21 | dhcp=$(grep "dhcp" $if_file 2>/dev/null) | ||
22 | |||
23 | if [ "$dhcp" != "" ]; | ||
24 | then | ||
25 | echo "Enabled" | ||
26 | else | ||
27 | echo "Disabled" | ||
28 | fi | ||
diff --git a/tools/hv/hv_get_dns_info.sh b/tools/hv/hv_get_dns_info.sh new file mode 100755 index 000000000000..058c17b46ffc --- /dev/null +++ b/tools/hv/hv_get_dns_info.sh | |||
@@ -0,0 +1,13 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | # This example script parses /etc/resolv.conf to retrive DNS information. | ||
4 | # In the interest of keeping the KVP daemon code free of distro specific | ||
5 | # information; the kvp daemon code invokes this external script to gather | ||
6 | # DNS information. | ||
7 | # This script is expected to print the nameserver values to stdout. | ||
8 | # Each Distro is expected to implement this script in a distro specific | ||
9 | # fashion. For instance on Distros that ship with Network Manager enabled, | ||
10 | # this script can be based on the Network Manager APIs for retrieving DNS | ||
11 | # entries. | ||
12 | |||
13 | cat /etc/resolv.conf 2>/dev/null | awk '/^nameserver/ { print $2 }' | ||
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index d9834b362943..5959affd8820 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <stdlib.h> | 31 | #include <stdlib.h> |
32 | #include <unistd.h> | 32 | #include <unistd.h> |
33 | #include <string.h> | 33 | #include <string.h> |
34 | #include <ctype.h> | ||
34 | #include <errno.h> | 35 | #include <errno.h> |
35 | #include <arpa/inet.h> | 36 | #include <arpa/inet.h> |
36 | #include <linux/connector.h> | 37 | #include <linux/connector.h> |
@@ -41,6 +42,7 @@ | |||
41 | #include <syslog.h> | 42 | #include <syslog.h> |
42 | #include <sys/stat.h> | 43 | #include <sys/stat.h> |
43 | #include <fcntl.h> | 44 | #include <fcntl.h> |
45 | #include <dirent.h> | ||
44 | 46 | ||
45 | /* | 47 | /* |
46 | * KVP protocol: The user mode component first registers with the | 48 | * KVP protocol: The user mode component first registers with the |
@@ -68,25 +70,39 @@ enum key_index { | |||
68 | ProcessorArchitecture | 70 | ProcessorArchitecture |
69 | }; | 71 | }; |
70 | 72 | ||
73 | |||
74 | enum { | ||
75 | IPADDR = 0, | ||
76 | NETMASK, | ||
77 | GATEWAY, | ||
78 | DNS | ||
79 | }; | ||
80 | |||
71 | static char kvp_send_buffer[4096]; | 81 | static char kvp_send_buffer[4096]; |
72 | static char kvp_recv_buffer[4096]; | 82 | static char kvp_recv_buffer[4096 * 2]; |
73 | static struct sockaddr_nl addr; | 83 | static struct sockaddr_nl addr; |
84 | static int in_hand_shake = 1; | ||
74 | 85 | ||
75 | static char *os_name = ""; | 86 | static char *os_name = ""; |
76 | static char *os_major = ""; | 87 | static char *os_major = ""; |
77 | static char *os_minor = ""; | 88 | static char *os_minor = ""; |
78 | static char *processor_arch; | 89 | static char *processor_arch; |
79 | static char *os_build; | 90 | static char *os_build; |
80 | static char *lic_version; | 91 | static char *lic_version = "Unknown version"; |
81 | static struct utsname uts_buf; | 92 | static struct utsname uts_buf; |
82 | 93 | ||
94 | /* | ||
95 | * The location of the interface configuration file. | ||
96 | */ | ||
97 | |||
98 | #define KVP_CONFIG_LOC "/var/opt/" | ||
83 | 99 | ||
84 | #define MAX_FILE_NAME 100 | 100 | #define MAX_FILE_NAME 100 |
85 | #define ENTRIES_PER_BLOCK 50 | 101 | #define ENTRIES_PER_BLOCK 50 |
86 | 102 | ||
87 | struct kvp_record { | 103 | struct kvp_record { |
88 | __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; | 104 | char key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; |
89 | __u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; | 105 | char value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; |
90 | }; | 106 | }; |
91 | 107 | ||
92 | struct kvp_file_state { | 108 | struct kvp_file_state { |
@@ -94,7 +110,7 @@ struct kvp_file_state { | |||
94 | int num_blocks; | 110 | int num_blocks; |
95 | struct kvp_record *records; | 111 | struct kvp_record *records; |
96 | int num_records; | 112 | int num_records; |
97 | __u8 fname[MAX_FILE_NAME]; | 113 | char fname[MAX_FILE_NAME]; |
98 | }; | 114 | }; |
99 | 115 | ||
100 | static struct kvp_file_state kvp_file_info[KVP_POOL_COUNT]; | 116 | static struct kvp_file_state kvp_file_info[KVP_POOL_COUNT]; |
@@ -106,7 +122,7 @@ static void kvp_acquire_lock(int pool) | |||
106 | 122 | ||
107 | if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { | 123 | if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { |
108 | syslog(LOG_ERR, "Failed to acquire the lock pool: %d", pool); | 124 | syslog(LOG_ERR, "Failed to acquire the lock pool: %d", pool); |
109 | exit(-1); | 125 | exit(EXIT_FAILURE); |
110 | } | 126 | } |
111 | } | 127 | } |
112 | 128 | ||
@@ -118,7 +134,7 @@ static void kvp_release_lock(int pool) | |||
118 | if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { | 134 | if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { |
119 | perror("fcntl"); | 135 | perror("fcntl"); |
120 | syslog(LOG_ERR, "Failed to release the lock pool: %d", pool); | 136 | syslog(LOG_ERR, "Failed to release the lock pool: %d", pool); |
121 | exit(-1); | 137 | exit(EXIT_FAILURE); |
122 | } | 138 | } |
123 | } | 139 | } |
124 | 140 | ||
@@ -137,14 +153,19 @@ static void kvp_update_file(int pool) | |||
137 | if (!filep) { | 153 | if (!filep) { |
138 | kvp_release_lock(pool); | 154 | kvp_release_lock(pool); |
139 | syslog(LOG_ERR, "Failed to open file, pool: %d", pool); | 155 | syslog(LOG_ERR, "Failed to open file, pool: %d", pool); |
140 | exit(-1); | 156 | exit(EXIT_FAILURE); |
141 | } | 157 | } |
142 | 158 | ||
143 | bytes_written = fwrite(kvp_file_info[pool].records, | 159 | bytes_written = fwrite(kvp_file_info[pool].records, |
144 | sizeof(struct kvp_record), | 160 | sizeof(struct kvp_record), |
145 | kvp_file_info[pool].num_records, filep); | 161 | kvp_file_info[pool].num_records, filep); |
146 | 162 | ||
147 | fflush(filep); | 163 | if (ferror(filep) || fclose(filep)) { |
164 | kvp_release_lock(pool); | ||
165 | syslog(LOG_ERR, "Failed to write file, pool: %d", pool); | ||
166 | exit(EXIT_FAILURE); | ||
167 | } | ||
168 | |||
148 | kvp_release_lock(pool); | 169 | kvp_release_lock(pool); |
149 | } | 170 | } |
150 | 171 | ||
@@ -163,14 +184,19 @@ static void kvp_update_mem_state(int pool) | |||
163 | if (!filep) { | 184 | if (!filep) { |
164 | kvp_release_lock(pool); | 185 | kvp_release_lock(pool); |
165 | syslog(LOG_ERR, "Failed to open file, pool: %d", pool); | 186 | syslog(LOG_ERR, "Failed to open file, pool: %d", pool); |
166 | exit(-1); | 187 | exit(EXIT_FAILURE); |
167 | } | 188 | } |
168 | while (!feof(filep)) { | 189 | for (;;) { |
169 | readp = &record[records_read]; | 190 | readp = &record[records_read]; |
170 | records_read += fread(readp, sizeof(struct kvp_record), | 191 | records_read += fread(readp, sizeof(struct kvp_record), |
171 | ENTRIES_PER_BLOCK * num_blocks, | 192 | ENTRIES_PER_BLOCK * num_blocks, |
172 | filep); | 193 | filep); |
173 | 194 | ||
195 | if (ferror(filep)) { | ||
196 | syslog(LOG_ERR, "Failed to read file, pool: %d", pool); | ||
197 | exit(EXIT_FAILURE); | ||
198 | } | ||
199 | |||
174 | if (!feof(filep)) { | 200 | if (!feof(filep)) { |
175 | /* | 201 | /* |
176 | * We have more data to read. | 202 | * We have more data to read. |
@@ -180,7 +206,7 @@ static void kvp_update_mem_state(int pool) | |||
180 | 206 | ||
181 | if (record == NULL) { | 207 | if (record == NULL) { |
182 | syslog(LOG_ERR, "malloc failed"); | 208 | syslog(LOG_ERR, "malloc failed"); |
183 | exit(-1); | 209 | exit(EXIT_FAILURE); |
184 | } | 210 | } |
185 | continue; | 211 | continue; |
186 | } | 212 | } |
@@ -191,14 +217,15 @@ static void kvp_update_mem_state(int pool) | |||
191 | kvp_file_info[pool].records = record; | 217 | kvp_file_info[pool].records = record; |
192 | kvp_file_info[pool].num_records = records_read; | 218 | kvp_file_info[pool].num_records = records_read; |
193 | 219 | ||
220 | fclose(filep); | ||
194 | kvp_release_lock(pool); | 221 | kvp_release_lock(pool); |
195 | } | 222 | } |
196 | static int kvp_file_init(void) | 223 | static int kvp_file_init(void) |
197 | { | 224 | { |
198 | int ret, fd; | 225 | int fd; |
199 | FILE *filep; | 226 | FILE *filep; |
200 | size_t records_read; | 227 | size_t records_read; |
201 | __u8 *fname; | 228 | char *fname; |
202 | struct kvp_record *record; | 229 | struct kvp_record *record; |
203 | struct kvp_record *readp; | 230 | struct kvp_record *readp; |
204 | int num_blocks; | 231 | int num_blocks; |
@@ -208,7 +235,7 @@ static int kvp_file_init(void) | |||
208 | if (access("/var/opt/hyperv", F_OK)) { | 235 | if (access("/var/opt/hyperv", F_OK)) { |
209 | if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) { | 236 | if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) { |
210 | syslog(LOG_ERR, " Failed to create /var/opt/hyperv"); | 237 | syslog(LOG_ERR, " Failed to create /var/opt/hyperv"); |
211 | exit(-1); | 238 | exit(EXIT_FAILURE); |
212 | } | 239 | } |
213 | } | 240 | } |
214 | 241 | ||
@@ -232,12 +259,18 @@ static int kvp_file_init(void) | |||
232 | fclose(filep); | 259 | fclose(filep); |
233 | return 1; | 260 | return 1; |
234 | } | 261 | } |
235 | while (!feof(filep)) { | 262 | for (;;) { |
236 | readp = &record[records_read]; | 263 | readp = &record[records_read]; |
237 | records_read += fread(readp, sizeof(struct kvp_record), | 264 | records_read += fread(readp, sizeof(struct kvp_record), |
238 | ENTRIES_PER_BLOCK, | 265 | ENTRIES_PER_BLOCK, |
239 | filep); | 266 | filep); |
240 | 267 | ||
268 | if (ferror(filep)) { | ||
269 | syslog(LOG_ERR, "Failed to read file, pool: %d", | ||
270 | i); | ||
271 | exit(EXIT_FAILURE); | ||
272 | } | ||
273 | |||
241 | if (!feof(filep)) { | 274 | if (!feof(filep)) { |
242 | /* | 275 | /* |
243 | * We have more data to read. | 276 | * We have more data to read. |
@@ -311,7 +344,6 @@ static int kvp_key_add_or_modify(int pool, __u8 *key, int key_size, __u8 *value, | |||
311 | int value_size) | 344 | int value_size) |
312 | { | 345 | { |
313 | int i; | 346 | int i; |
314 | int j, k; | ||
315 | int num_records; | 347 | int num_records; |
316 | struct kvp_record *record; | 348 | struct kvp_record *record; |
317 | int num_blocks; | 349 | int num_blocks; |
@@ -394,7 +426,7 @@ static int kvp_get_value(int pool, __u8 *key, int key_size, __u8 *value, | |||
394 | return 1; | 426 | return 1; |
395 | } | 427 | } |
396 | 428 | ||
397 | static void kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size, | 429 | static int kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size, |
398 | __u8 *value, int value_size) | 430 | __u8 *value, int value_size) |
399 | { | 431 | { |
400 | struct kvp_record *record; | 432 | struct kvp_record *record; |
@@ -406,16 +438,12 @@ static void kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size, | |||
406 | record = kvp_file_info[pool].records; | 438 | record = kvp_file_info[pool].records; |
407 | 439 | ||
408 | if (index >= kvp_file_info[pool].num_records) { | 440 | if (index >= kvp_file_info[pool].num_records) { |
409 | /* | 441 | return 1; |
410 | * This is an invalid index; terminate enumeration; | ||
411 | * - a NULL value will do the trick. | ||
412 | */ | ||
413 | strcpy(value, ""); | ||
414 | return; | ||
415 | } | 442 | } |
416 | 443 | ||
417 | memcpy(key, record[index].key, key_size); | 444 | memcpy(key, record[index].key, key_size); |
418 | memcpy(value, record[index].value, value_size); | 445 | memcpy(value, record[index].value, value_size); |
446 | return 0; | ||
419 | } | 447 | } |
420 | 448 | ||
421 | 449 | ||
@@ -426,6 +454,7 @@ void kvp_get_os_info(void) | |||
426 | 454 | ||
427 | uname(&uts_buf); | 455 | uname(&uts_buf); |
428 | os_build = uts_buf.release; | 456 | os_build = uts_buf.release; |
457 | os_name = uts_buf.sysname; | ||
429 | processor_arch = uts_buf.machine; | 458 | processor_arch = uts_buf.machine; |
430 | 459 | ||
431 | /* | 460 | /* |
@@ -437,20 +466,70 @@ void kvp_get_os_info(void) | |||
437 | if (p) | 466 | if (p) |
438 | *p = '\0'; | 467 | *p = '\0'; |
439 | 468 | ||
469 | /* | ||
470 | * Parse the /etc/os-release file if present: | ||
471 | * http://www.freedesktop.org/software/systemd/man/os-release.html | ||
472 | */ | ||
473 | file = fopen("/etc/os-release", "r"); | ||
474 | if (file != NULL) { | ||
475 | while (fgets(buf, sizeof(buf), file)) { | ||
476 | char *value, *q; | ||
477 | |||
478 | /* Ignore comments */ | ||
479 | if (buf[0] == '#') | ||
480 | continue; | ||
481 | |||
482 | /* Split into name=value */ | ||
483 | p = strchr(buf, '='); | ||
484 | if (!p) | ||
485 | continue; | ||
486 | *p++ = 0; | ||
487 | |||
488 | /* Remove quotes and newline; un-escape */ | ||
489 | value = p; | ||
490 | q = p; | ||
491 | while (*p) { | ||
492 | if (*p == '\\') { | ||
493 | ++p; | ||
494 | if (!*p) | ||
495 | break; | ||
496 | *q++ = *p++; | ||
497 | } else if (*p == '\'' || *p == '"' || | ||
498 | *p == '\n') { | ||
499 | ++p; | ||
500 | } else { | ||
501 | *q++ = *p++; | ||
502 | } | ||
503 | } | ||
504 | *q = 0; | ||
505 | |||
506 | if (!strcmp(buf, "NAME")) { | ||
507 | p = strdup(value); | ||
508 | if (!p) | ||
509 | break; | ||
510 | os_name = p; | ||
511 | } else if (!strcmp(buf, "VERSION_ID")) { | ||
512 | p = strdup(value); | ||
513 | if (!p) | ||
514 | break; | ||
515 | os_major = p; | ||
516 | } | ||
517 | } | ||
518 | fclose(file); | ||
519 | return; | ||
520 | } | ||
521 | |||
522 | /* Fallback for older RH/SUSE releases */ | ||
440 | file = fopen("/etc/SuSE-release", "r"); | 523 | file = fopen("/etc/SuSE-release", "r"); |
441 | if (file != NULL) | 524 | if (file != NULL) |
442 | goto kvp_osinfo_found; | 525 | goto kvp_osinfo_found; |
443 | file = fopen("/etc/redhat-release", "r"); | 526 | file = fopen("/etc/redhat-release", "r"); |
444 | if (file != NULL) | 527 | if (file != NULL) |
445 | goto kvp_osinfo_found; | 528 | goto kvp_osinfo_found; |
446 | /* | ||
447 | * Add code for other supported platforms. | ||
448 | */ | ||
449 | 529 | ||
450 | /* | 530 | /* |
451 | * We don't have information about the os. | 531 | * We don't have information about the os. |
452 | */ | 532 | */ |
453 | os_name = uts_buf.sysname; | ||
454 | return; | 533 | return; |
455 | 534 | ||
456 | kvp_osinfo_found: | 535 | kvp_osinfo_found: |
@@ -494,82 +573,458 @@ done: | |||
494 | return; | 573 | return; |
495 | } | 574 | } |
496 | 575 | ||
576 | |||
577 | |||
578 | /* | ||
579 | * Retrieve an interface name corresponding to the specified guid. | ||
580 | * If there is a match, the function returns a pointer | ||
581 | * to the interface name and if not, a NULL is returned. | ||
582 | * If a match is found, the caller is responsible for | ||
583 | * freeing the memory. | ||
584 | */ | ||
585 | |||
586 | static char *kvp_get_if_name(char *guid) | ||
587 | { | ||
588 | DIR *dir; | ||
589 | struct dirent *entry; | ||
590 | FILE *file; | ||
591 | char *p, *q, *x; | ||
592 | char *if_name = NULL; | ||
593 | char buf[256]; | ||
594 | char *kvp_net_dir = "/sys/class/net/"; | ||
595 | char dev_id[256]; | ||
596 | |||
597 | dir = opendir(kvp_net_dir); | ||
598 | if (dir == NULL) | ||
599 | return NULL; | ||
600 | |||
601 | snprintf(dev_id, sizeof(dev_id), "%s", kvp_net_dir); | ||
602 | q = dev_id + strlen(kvp_net_dir); | ||
603 | |||
604 | while ((entry = readdir(dir)) != NULL) { | ||
605 | /* | ||
606 | * Set the state for the next pass. | ||
607 | */ | ||
608 | *q = '\0'; | ||
609 | strcat(dev_id, entry->d_name); | ||
610 | strcat(dev_id, "/device/device_id"); | ||
611 | |||
612 | file = fopen(dev_id, "r"); | ||
613 | if (file == NULL) | ||
614 | continue; | ||
615 | |||
616 | p = fgets(buf, sizeof(buf), file); | ||
617 | if (p) { | ||
618 | x = strchr(p, '\n'); | ||
619 | if (x) | ||
620 | *x = '\0'; | ||
621 | |||
622 | if (!strcmp(p, guid)) { | ||
623 | /* | ||
624 | * Found the guid match; return the interface | ||
625 | * name. The caller will free the memory. | ||
626 | */ | ||
627 | if_name = strdup(entry->d_name); | ||
628 | fclose(file); | ||
629 | break; | ||
630 | } | ||
631 | } | ||
632 | fclose(file); | ||
633 | } | ||
634 | |||
635 | closedir(dir); | ||
636 | return if_name; | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * Retrieve the MAC address given the interface name. | ||
641 | */ | ||
642 | |||
643 | static char *kvp_if_name_to_mac(char *if_name) | ||
644 | { | ||
645 | FILE *file; | ||
646 | char *p, *x; | ||
647 | char buf[256]; | ||
648 | char addr_file[256]; | ||
649 | int i; | ||
650 | char *mac_addr = NULL; | ||
651 | |||
652 | snprintf(addr_file, sizeof(addr_file), "%s%s%s", "/sys/class/net/", | ||
653 | if_name, "/address"); | ||
654 | |||
655 | file = fopen(addr_file, "r"); | ||
656 | if (file == NULL) | ||
657 | return NULL; | ||
658 | |||
659 | p = fgets(buf, sizeof(buf), file); | ||
660 | if (p) { | ||
661 | x = strchr(p, '\n'); | ||
662 | if (x) | ||
663 | *x = '\0'; | ||
664 | for (i = 0; i < strlen(p); i++) | ||
665 | p[i] = toupper(p[i]); | ||
666 | mac_addr = strdup(p); | ||
667 | } | ||
668 | |||
669 | fclose(file); | ||
670 | return mac_addr; | ||
671 | } | ||
672 | |||
673 | |||
674 | /* | ||
675 | * Retrieve the interface name given tha MAC address. | ||
676 | */ | ||
677 | |||
678 | static char *kvp_mac_to_if_name(char *mac) | ||
679 | { | ||
680 | DIR *dir; | ||
681 | struct dirent *entry; | ||
682 | FILE *file; | ||
683 | char *p, *q, *x; | ||
684 | char *if_name = NULL; | ||
685 | char buf[256]; | ||
686 | char *kvp_net_dir = "/sys/class/net/"; | ||
687 | char dev_id[256]; | ||
688 | int i; | ||
689 | |||
690 | dir = opendir(kvp_net_dir); | ||
691 | if (dir == NULL) | ||
692 | return NULL; | ||
693 | |||
694 | snprintf(dev_id, sizeof(dev_id), kvp_net_dir); | ||
695 | q = dev_id + strlen(kvp_net_dir); | ||
696 | |||
697 | while ((entry = readdir(dir)) != NULL) { | ||
698 | /* | ||
699 | * Set the state for the next pass. | ||
700 | */ | ||
701 | *q = '\0'; | ||
702 | |||
703 | strcat(dev_id, entry->d_name); | ||
704 | strcat(dev_id, "/address"); | ||
705 | |||
706 | file = fopen(dev_id, "r"); | ||
707 | if (file == NULL) | ||
708 | continue; | ||
709 | |||
710 | p = fgets(buf, sizeof(buf), file); | ||
711 | if (p) { | ||
712 | x = strchr(p, '\n'); | ||
713 | if (x) | ||
714 | *x = '\0'; | ||
715 | |||
716 | for (i = 0; i < strlen(p); i++) | ||
717 | p[i] = toupper(p[i]); | ||
718 | |||
719 | if (!strcmp(p, mac)) { | ||
720 | /* | ||
721 | * Found the MAC match; return the interface | ||
722 | * name. The caller will free the memory. | ||
723 | */ | ||
724 | if_name = strdup(entry->d_name); | ||
725 | fclose(file); | ||
726 | break; | ||
727 | } | ||
728 | } | ||
729 | fclose(file); | ||
730 | } | ||
731 | |||
732 | closedir(dir); | ||
733 | return if_name; | ||
734 | } | ||
735 | |||
736 | |||
737 | static void kvp_process_ipconfig_file(char *cmd, | ||
738 | char *config_buf, int len, | ||
739 | int element_size, int offset) | ||
740 | { | ||
741 | char buf[256]; | ||
742 | char *p; | ||
743 | char *x; | ||
744 | FILE *file; | ||
745 | |||
746 | /* | ||
747 | * First execute the command. | ||
748 | */ | ||
749 | file = popen(cmd, "r"); | ||
750 | if (file == NULL) | ||
751 | return; | ||
752 | |||
753 | if (offset == 0) | ||
754 | memset(config_buf, 0, len); | ||
755 | while ((p = fgets(buf, sizeof(buf), file)) != NULL) { | ||
756 | if ((len - strlen(config_buf)) < (element_size + 1)) | ||
757 | break; | ||
758 | |||
759 | x = strchr(p, '\n'); | ||
760 | *x = '\0'; | ||
761 | strcat(config_buf, p); | ||
762 | strcat(config_buf, ";"); | ||
763 | } | ||
764 | pclose(file); | ||
765 | } | ||
766 | |||
767 | static void kvp_get_ipconfig_info(char *if_name, | ||
768 | struct hv_kvp_ipaddr_value *buffer) | ||
769 | { | ||
770 | char cmd[512]; | ||
771 | char dhcp_info[128]; | ||
772 | char *p; | ||
773 | FILE *file; | ||
774 | |||
775 | /* | ||
776 | * Get the address of default gateway (ipv4). | ||
777 | */ | ||
778 | sprintf(cmd, "%s %s", "ip route show dev", if_name); | ||
779 | strcat(cmd, " | awk '/default/ {print $3 }'"); | ||
780 | |||
781 | /* | ||
782 | * Execute the command to gather gateway info. | ||
783 | */ | ||
784 | kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way, | ||
785 | (MAX_GATEWAY_SIZE * 2), INET_ADDRSTRLEN, 0); | ||
786 | |||
787 | /* | ||
788 | * Get the address of default gateway (ipv6). | ||
789 | */ | ||
790 | sprintf(cmd, "%s %s", "ip -f inet6 route show dev", if_name); | ||
791 | strcat(cmd, " | awk '/default/ {print $3 }'"); | ||
792 | |||
793 | /* | ||
794 | * Execute the command to gather gateway info (ipv6). | ||
795 | */ | ||
796 | kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way, | ||
797 | (MAX_GATEWAY_SIZE * 2), INET6_ADDRSTRLEN, 1); | ||
798 | |||
799 | |||
800 | /* | ||
801 | * Gather the DNS state. | ||
802 | * Since there is no standard way to get this information | ||
803 | * across various distributions of interest; we just invoke | ||
804 | * an external script that needs to be ported across distros | ||
805 | * of interest. | ||
806 | * | ||
807 | * Following is the expected format of the information from the script: | ||
808 | * | ||
809 | * ipaddr1 (nameserver1) | ||
810 | * ipaddr2 (nameserver2) | ||
811 | * . | ||
812 | * . | ||
813 | */ | ||
814 | |||
815 | sprintf(cmd, "%s", "hv_get_dns_info"); | ||
816 | |||
817 | /* | ||
818 | * Execute the command to gather DNS info. | ||
819 | */ | ||
820 | kvp_process_ipconfig_file(cmd, (char *)buffer->dns_addr, | ||
821 | (MAX_IP_ADDR_SIZE * 2), INET_ADDRSTRLEN, 0); | ||
822 | |||
823 | /* | ||
824 | * Gather the DHCP state. | ||
825 | * We will gather this state by invoking an external script. | ||
826 | * The parameter to the script is the interface name. | ||
827 | * Here is the expected output: | ||
828 | * | ||
829 | * Enabled: DHCP enabled. | ||
830 | */ | ||
831 | |||
832 | sprintf(cmd, "%s %s", "hv_get_dhcp_info", if_name); | ||
833 | |||
834 | file = popen(cmd, "r"); | ||
835 | if (file == NULL) | ||
836 | return; | ||
837 | |||
838 | p = fgets(dhcp_info, sizeof(dhcp_info), file); | ||
839 | if (p == NULL) { | ||
840 | pclose(file); | ||
841 | return; | ||
842 | } | ||
843 | |||
844 | if (!strncmp(p, "Enabled", 7)) | ||
845 | buffer->dhcp_enabled = 1; | ||
846 | else | ||
847 | buffer->dhcp_enabled = 0; | ||
848 | |||
849 | pclose(file); | ||
850 | } | ||
851 | |||
852 | |||
853 | static unsigned int hweight32(unsigned int *w) | ||
854 | { | ||
855 | unsigned int res = *w - ((*w >> 1) & 0x55555555); | ||
856 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | ||
857 | res = (res + (res >> 4)) & 0x0F0F0F0F; | ||
858 | res = res + (res >> 8); | ||
859 | return (res + (res >> 16)) & 0x000000FF; | ||
860 | } | ||
861 | |||
862 | static int kvp_process_ip_address(void *addrp, | ||
863 | int family, char *buffer, | ||
864 | int length, int *offset) | ||
865 | { | ||
866 | struct sockaddr_in *addr; | ||
867 | struct sockaddr_in6 *addr6; | ||
868 | int addr_length; | ||
869 | char tmp[50]; | ||
870 | const char *str; | ||
871 | |||
872 | if (family == AF_INET) { | ||
873 | addr = (struct sockaddr_in *)addrp; | ||
874 | str = inet_ntop(family, &addr->sin_addr, tmp, 50); | ||
875 | addr_length = INET_ADDRSTRLEN; | ||
876 | } else { | ||
877 | addr6 = (struct sockaddr_in6 *)addrp; | ||
878 | str = inet_ntop(family, &addr6->sin6_addr.s6_addr, tmp, 50); | ||
879 | addr_length = INET6_ADDRSTRLEN; | ||
880 | } | ||
881 | |||
882 | if ((length - *offset) < addr_length + 1) | ||
883 | return HV_E_FAIL; | ||
884 | if (str == NULL) { | ||
885 | strcpy(buffer, "inet_ntop failed\n"); | ||
886 | return HV_E_FAIL; | ||
887 | } | ||
888 | if (*offset == 0) | ||
889 | strcpy(buffer, tmp); | ||
890 | else | ||
891 | strcat(buffer, tmp); | ||
892 | strcat(buffer, ";"); | ||
893 | |||
894 | *offset += strlen(str) + 1; | ||
895 | return 0; | ||
896 | } | ||
897 | |||
497 | static int | 898 | static int |
498 | kvp_get_ip_address(int family, char *buffer, int length) | 899 | kvp_get_ip_info(int family, char *if_name, int op, |
900 | void *out_buffer, int length) | ||
499 | { | 901 | { |
500 | struct ifaddrs *ifap; | 902 | struct ifaddrs *ifap; |
501 | struct ifaddrs *curp; | 903 | struct ifaddrs *curp; |
502 | int ipv4_len = strlen("255.255.255.255") + 1; | ||
503 | int ipv6_len = strlen("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")+1; | ||
504 | int offset = 0; | 904 | int offset = 0; |
505 | const char *str; | 905 | int sn_offset = 0; |
506 | char tmp[50]; | ||
507 | int error = 0; | 906 | int error = 0; |
508 | 907 | char *buffer; | |
908 | struct hv_kvp_ipaddr_value *ip_buffer; | ||
909 | char cidr_mask[5]; /* /xyz */ | ||
910 | int weight; | ||
911 | int i; | ||
912 | unsigned int *w; | ||
913 | char *sn_str; | ||
914 | struct sockaddr_in6 *addr6; | ||
915 | |||
916 | if (op == KVP_OP_ENUMERATE) { | ||
917 | buffer = out_buffer; | ||
918 | } else { | ||
919 | ip_buffer = out_buffer; | ||
920 | buffer = (char *)ip_buffer->ip_addr; | ||
921 | ip_buffer->addr_family = 0; | ||
922 | } | ||
509 | /* | 923 | /* |
510 | * On entry into this function, the buffer is capable of holding the | 924 | * On entry into this function, the buffer is capable of holding the |
511 | * maximum key value (2048 bytes). | 925 | * maximum key value. |
512 | */ | 926 | */ |
513 | 927 | ||
514 | if (getifaddrs(&ifap)) { | 928 | if (getifaddrs(&ifap)) { |
515 | strcpy(buffer, "getifaddrs failed\n"); | 929 | strcpy(buffer, "getifaddrs failed\n"); |
516 | return 1; | 930 | return HV_E_FAIL; |
517 | } | 931 | } |
518 | 932 | ||
519 | curp = ifap; | 933 | curp = ifap; |
520 | while (curp != NULL) { | 934 | while (curp != NULL) { |
521 | if ((curp->ifa_addr != NULL) && | 935 | if (curp->ifa_addr == NULL) { |
522 | (curp->ifa_addr->sa_family == family)) { | 936 | curp = curp->ifa_next; |
523 | if (family == AF_INET) { | 937 | continue; |
524 | struct sockaddr_in *addr = | 938 | } |
525 | (struct sockaddr_in *) curp->ifa_addr; | ||
526 | |||
527 | str = inet_ntop(family, &addr->sin_addr, | ||
528 | tmp, 50); | ||
529 | if (str == NULL) { | ||
530 | strcpy(buffer, "inet_ntop failed\n"); | ||
531 | error = 1; | ||
532 | goto getaddr_done; | ||
533 | } | ||
534 | if (offset == 0) | ||
535 | strcpy(buffer, tmp); | ||
536 | else | ||
537 | strcat(buffer, tmp); | ||
538 | strcat(buffer, ";"); | ||
539 | 939 | ||
540 | offset += strlen(str) + 1; | 940 | if ((if_name != NULL) && |
541 | if ((length - offset) < (ipv4_len + 1)) | 941 | (strncmp(curp->ifa_name, if_name, strlen(if_name)))) { |
542 | goto getaddr_done; | 942 | /* |
943 | * We want info about a specific interface; | ||
944 | * just continue. | ||
945 | */ | ||
946 | curp = curp->ifa_next; | ||
947 | continue; | ||
948 | } | ||
543 | 949 | ||
544 | } else { | 950 | /* |
951 | * We only support two address families: AF_INET and AF_INET6. | ||
952 | * If a family value of 0 is specified, we collect both | ||
953 | * supported address families; if not we gather info on | ||
954 | * the specified address family. | ||
955 | */ | ||
956 | if ((family != 0) && (curp->ifa_addr->sa_family != family)) { | ||
957 | curp = curp->ifa_next; | ||
958 | continue; | ||
959 | } | ||
960 | if ((curp->ifa_addr->sa_family != AF_INET) && | ||
961 | (curp->ifa_addr->sa_family != AF_INET6)) { | ||
962 | curp = curp->ifa_next; | ||
963 | continue; | ||
964 | } | ||
545 | 965 | ||
966 | if (op == KVP_OP_GET_IP_INFO) { | ||
546 | /* | 967 | /* |
547 | * We only support AF_INET and AF_INET6 | 968 | * Gather info other than the IP address. |
548 | * and the list of addresses is separated by a ";". | 969 | * IP address info will be gathered later. |
549 | */ | 970 | */ |
550 | struct sockaddr_in6 *addr = | 971 | if (curp->ifa_addr->sa_family == AF_INET) { |
551 | (struct sockaddr_in6 *) curp->ifa_addr; | 972 | ip_buffer->addr_family |= ADDR_FAMILY_IPV4; |
552 | 973 | /* | |
553 | str = inet_ntop(family, | 974 | * Get subnet info. |
554 | &addr->sin6_addr.s6_addr, | 975 | */ |
555 | tmp, 50); | 976 | error = kvp_process_ip_address( |
556 | if (str == NULL) { | 977 | curp->ifa_netmask, |
557 | strcpy(buffer, "inet_ntop failed\n"); | 978 | AF_INET, |
558 | error = 1; | 979 | (char *) |
559 | goto getaddr_done; | 980 | ip_buffer->sub_net, |
560 | } | 981 | length, |
561 | if (offset == 0) | 982 | &sn_offset); |
562 | strcpy(buffer, tmp); | 983 | if (error) |
563 | else | 984 | goto gather_ipaddr; |
564 | strcat(buffer, tmp); | 985 | } else { |
565 | strcat(buffer, ";"); | 986 | ip_buffer->addr_family |= ADDR_FAMILY_IPV6; |
566 | offset += strlen(str) + 1; | ||
567 | if ((length - offset) < (ipv6_len + 1)) | ||
568 | goto getaddr_done; | ||
569 | 987 | ||
988 | /* | ||
989 | * Get subnet info in CIDR format. | ||
990 | */ | ||
991 | weight = 0; | ||
992 | sn_str = (char *)ip_buffer->sub_net; | ||
993 | addr6 = (struct sockaddr_in6 *) | ||
994 | curp->ifa_netmask; | ||
995 | w = addr6->sin6_addr.s6_addr32; | ||
996 | |||
997 | for (i = 0; i < 4; i++) | ||
998 | weight += hweight32(&w[i]); | ||
999 | |||
1000 | sprintf(cidr_mask, "/%d", weight); | ||
1001 | if ((length - sn_offset) < | ||
1002 | (strlen(cidr_mask) + 1)) | ||
1003 | goto gather_ipaddr; | ||
1004 | |||
1005 | if (sn_offset == 0) | ||
1006 | strcpy(sn_str, cidr_mask); | ||
1007 | else | ||
1008 | strcat(sn_str, cidr_mask); | ||
1009 | strcat((char *)ip_buffer->sub_net, ";"); | ||
1010 | sn_offset += strlen(sn_str) + 1; | ||
570 | } | 1011 | } |
571 | 1012 | ||
1013 | /* | ||
1014 | * Collect other ip related configuration info. | ||
1015 | */ | ||
1016 | |||
1017 | kvp_get_ipconfig_info(if_name, ip_buffer); | ||
572 | } | 1018 | } |
1019 | |||
1020 | gather_ipaddr: | ||
1021 | error = kvp_process_ip_address(curp->ifa_addr, | ||
1022 | curp->ifa_addr->sa_family, | ||
1023 | buffer, | ||
1024 | length, &offset); | ||
1025 | if (error) | ||
1026 | goto getaddr_done; | ||
1027 | |||
573 | curp = curp->ifa_next; | 1028 | curp = curp->ifa_next; |
574 | } | 1029 | } |
575 | 1030 | ||
@@ -579,6 +1034,315 @@ getaddr_done: | |||
579 | } | 1034 | } |
580 | 1035 | ||
581 | 1036 | ||
1037 | static int expand_ipv6(char *addr, int type) | ||
1038 | { | ||
1039 | int ret; | ||
1040 | struct in6_addr v6_addr; | ||
1041 | |||
1042 | ret = inet_pton(AF_INET6, addr, &v6_addr); | ||
1043 | |||
1044 | if (ret != 1) { | ||
1045 | if (type == NETMASK) | ||
1046 | return 1; | ||
1047 | return 0; | ||
1048 | } | ||
1049 | |||
1050 | sprintf(addr, "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:" | ||
1051 | "%02x%02x:%02x%02x:%02x%02x", | ||
1052 | (int)v6_addr.s6_addr[0], (int)v6_addr.s6_addr[1], | ||
1053 | (int)v6_addr.s6_addr[2], (int)v6_addr.s6_addr[3], | ||
1054 | (int)v6_addr.s6_addr[4], (int)v6_addr.s6_addr[5], | ||
1055 | (int)v6_addr.s6_addr[6], (int)v6_addr.s6_addr[7], | ||
1056 | (int)v6_addr.s6_addr[8], (int)v6_addr.s6_addr[9], | ||
1057 | (int)v6_addr.s6_addr[10], (int)v6_addr.s6_addr[11], | ||
1058 | (int)v6_addr.s6_addr[12], (int)v6_addr.s6_addr[13], | ||
1059 | (int)v6_addr.s6_addr[14], (int)v6_addr.s6_addr[15]); | ||
1060 | |||
1061 | return 1; | ||
1062 | |||
1063 | } | ||
1064 | |||
1065 | static int is_ipv4(char *addr) | ||
1066 | { | ||
1067 | int ret; | ||
1068 | struct in_addr ipv4_addr; | ||
1069 | |||
1070 | ret = inet_pton(AF_INET, addr, &ipv4_addr); | ||
1071 | |||
1072 | if (ret == 1) | ||
1073 | return 1; | ||
1074 | return 0; | ||
1075 | } | ||
1076 | |||
1077 | static int parse_ip_val_buffer(char *in_buf, int *offset, | ||
1078 | char *out_buf, int out_len) | ||
1079 | { | ||
1080 | char *x; | ||
1081 | char *start; | ||
1082 | |||
1083 | /* | ||
1084 | * in_buf has sequence of characters that are seperated by | ||
1085 | * the character ';'. The last sequence does not have the | ||
1086 | * terminating ";" character. | ||
1087 | */ | ||
1088 | start = in_buf + *offset; | ||
1089 | |||
1090 | x = strchr(start, ';'); | ||
1091 | if (x) | ||
1092 | *x = 0; | ||
1093 | else | ||
1094 | x = start + strlen(start); | ||
1095 | |||
1096 | if (strlen(start) != 0) { | ||
1097 | int i = 0; | ||
1098 | /* | ||
1099 | * Get rid of leading spaces. | ||
1100 | */ | ||
1101 | while (start[i] == ' ') | ||
1102 | i++; | ||
1103 | |||
1104 | if ((x - start) <= out_len) { | ||
1105 | strcpy(out_buf, (start + i)); | ||
1106 | *offset += (x - start) + 1; | ||
1107 | return 1; | ||
1108 | } | ||
1109 | } | ||
1110 | return 0; | ||
1111 | } | ||
1112 | |||
1113 | static int kvp_write_file(FILE *f, char *s1, char *s2, char *s3) | ||
1114 | { | ||
1115 | int ret; | ||
1116 | |||
1117 | ret = fprintf(f, "%s%s%s%s\n", s1, s2, "=", s3); | ||
1118 | |||
1119 | if (ret < 0) | ||
1120 | return HV_E_FAIL; | ||
1121 | |||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | |||
1126 | static int process_ip_string(FILE *f, char *ip_string, int type) | ||
1127 | { | ||
1128 | int error = 0; | ||
1129 | char addr[INET6_ADDRSTRLEN]; | ||
1130 | int i = 0; | ||
1131 | int j = 0; | ||
1132 | char str[256]; | ||
1133 | char sub_str[10]; | ||
1134 | int offset = 0; | ||
1135 | |||
1136 | memset(addr, 0, sizeof(addr)); | ||
1137 | |||
1138 | while (parse_ip_val_buffer(ip_string, &offset, addr, | ||
1139 | (MAX_IP_ADDR_SIZE * 2))) { | ||
1140 | |||
1141 | sub_str[0] = 0; | ||
1142 | if (is_ipv4(addr)) { | ||
1143 | switch (type) { | ||
1144 | case IPADDR: | ||
1145 | snprintf(str, sizeof(str), "%s", "IPADDR"); | ||
1146 | break; | ||
1147 | case NETMASK: | ||
1148 | snprintf(str, sizeof(str), "%s", "NETMASK"); | ||
1149 | break; | ||
1150 | case GATEWAY: | ||
1151 | snprintf(str, sizeof(str), "%s", "GATEWAY"); | ||
1152 | break; | ||
1153 | case DNS: | ||
1154 | snprintf(str, sizeof(str), "%s", "DNS"); | ||
1155 | break; | ||
1156 | } | ||
1157 | if (i != 0) { | ||
1158 | if (type != DNS) { | ||
1159 | snprintf(sub_str, sizeof(sub_str), | ||
1160 | "_%d", i++); | ||
1161 | } else { | ||
1162 | snprintf(sub_str, sizeof(sub_str), | ||
1163 | "%d", ++i); | ||
1164 | } | ||
1165 | } else if (type == DNS) { | ||
1166 | snprintf(sub_str, sizeof(sub_str), "%d", ++i); | ||
1167 | } | ||
1168 | |||
1169 | |||
1170 | } else if (expand_ipv6(addr, type)) { | ||
1171 | switch (type) { | ||
1172 | case IPADDR: | ||
1173 | snprintf(str, sizeof(str), "%s", "IPV6ADDR"); | ||
1174 | break; | ||
1175 | case NETMASK: | ||
1176 | snprintf(str, sizeof(str), "%s", "IPV6NETMASK"); | ||
1177 | break; | ||
1178 | case GATEWAY: | ||
1179 | snprintf(str, sizeof(str), "%s", | ||
1180 | "IPV6_DEFAULTGW"); | ||
1181 | break; | ||
1182 | case DNS: | ||
1183 | snprintf(str, sizeof(str), "%s", "DNS"); | ||
1184 | break; | ||
1185 | } | ||
1186 | if ((j != 0) || (type == DNS)) { | ||
1187 | if (type != DNS) { | ||
1188 | snprintf(sub_str, sizeof(sub_str), | ||
1189 | "_%d", j++); | ||
1190 | } else { | ||
1191 | snprintf(sub_str, sizeof(sub_str), | ||
1192 | "%d", ++i); | ||
1193 | } | ||
1194 | } else if (type == DNS) { | ||
1195 | snprintf(sub_str, sizeof(sub_str), | ||
1196 | "%d", ++i); | ||
1197 | } | ||
1198 | } else { | ||
1199 | return HV_INVALIDARG; | ||
1200 | } | ||
1201 | |||
1202 | error = kvp_write_file(f, str, sub_str, addr); | ||
1203 | if (error) | ||
1204 | return error; | ||
1205 | memset(addr, 0, sizeof(addr)); | ||
1206 | } | ||
1207 | |||
1208 | return 0; | ||
1209 | } | ||
1210 | |||
1211 | static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) | ||
1212 | { | ||
1213 | int error = 0; | ||
1214 | char if_file[128]; | ||
1215 | FILE *file; | ||
1216 | char cmd[512]; | ||
1217 | char *mac_addr; | ||
1218 | |||
1219 | /* | ||
1220 | * Set the configuration for the specified interface with | ||
1221 | * the information provided. Since there is no standard | ||
1222 | * way to configure an interface, we will have an external | ||
1223 | * script that does the job of configuring the interface and | ||
1224 | * flushing the configuration. | ||
1225 | * | ||
1226 | * The parameters passed to this external script are: | ||
1227 | * 1. A configuration file that has the specified configuration. | ||
1228 | * | ||
1229 | * We will embed the name of the interface in the configuration | ||
1230 | * file: ifcfg-ethx (where ethx is the interface name). | ||
1231 | * | ||
1232 | * The information provided here may be more than what is needed | ||
1233 | * in a given distro to configure the interface and so are free | ||
1234 | * ignore information that may not be relevant. | ||
1235 | * | ||
1236 | * Here is the format of the ip configuration file: | ||
1237 | * | ||
1238 | * HWADDR=macaddr | ||
1239 | * IF_NAME=interface name | ||
1240 | * DHCP=yes (This is optional; if yes, DHCP is configured) | ||
1241 | * | ||
1242 | * IPADDR=ipaddr1 | ||
1243 | * IPADDR_1=ipaddr2 | ||
1244 | * IPADDR_x=ipaddry (where y = x + 1) | ||
1245 | * | ||
1246 | * NETMASK=netmask1 | ||
1247 | * NETMASK_x=netmasky (where y = x + 1) | ||
1248 | * | ||
1249 | * GATEWAY=ipaddr1 | ||
1250 | * GATEWAY_x=ipaddry (where y = x + 1) | ||
1251 | * | ||
1252 | * DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc) | ||
1253 | * | ||
1254 | * IPV6 addresses will be tagged as IPV6ADDR, IPV6 gateway will be | ||
1255 | * tagged as IPV6_DEFAULTGW and IPV6 NETMASK will be tagged as | ||
1256 | * IPV6NETMASK. | ||
1257 | * | ||
1258 | * The host can specify multiple ipv4 and ipv6 addresses to be | ||
1259 | * configured for the interface. Furthermore, the configuration | ||
1260 | * needs to be persistent. A subsequent GET call on the interface | ||
1261 | * is expected to return the configuration that is set via the SET | ||
1262 | * call. | ||
1263 | */ | ||
1264 | |||
1265 | snprintf(if_file, sizeof(if_file), "%s%s%s", KVP_CONFIG_LOC, | ||
1266 | "hyperv/ifcfg-", if_name); | ||
1267 | |||
1268 | file = fopen(if_file, "w"); | ||
1269 | |||
1270 | if (file == NULL) { | ||
1271 | syslog(LOG_ERR, "Failed to open config file"); | ||
1272 | return HV_E_FAIL; | ||
1273 | } | ||
1274 | |||
1275 | /* | ||
1276 | * First write out the MAC address. | ||
1277 | */ | ||
1278 | |||
1279 | mac_addr = kvp_if_name_to_mac(if_name); | ||
1280 | if (mac_addr == NULL) { | ||
1281 | error = HV_E_FAIL; | ||
1282 | goto setval_error; | ||
1283 | } | ||
1284 | |||
1285 | error = kvp_write_file(file, "HWADDR", "", mac_addr); | ||
1286 | if (error) | ||
1287 | goto setval_error; | ||
1288 | |||
1289 | error = kvp_write_file(file, "IF_NAME", "", if_name); | ||
1290 | if (error) | ||
1291 | goto setval_error; | ||
1292 | |||
1293 | if (new_val->dhcp_enabled) { | ||
1294 | error = kvp_write_file(file, "DHCP", "", "yes"); | ||
1295 | if (error) | ||
1296 | goto setval_error; | ||
1297 | |||
1298 | /* | ||
1299 | * We are done!. | ||
1300 | */ | ||
1301 | goto setval_done; | ||
1302 | } | ||
1303 | |||
1304 | /* | ||
1305 | * Write the configuration for ipaddress, netmask, gateway and | ||
1306 | * name servers. | ||
1307 | */ | ||
1308 | |||
1309 | error = process_ip_string(file, (char *)new_val->ip_addr, IPADDR); | ||
1310 | if (error) | ||
1311 | goto setval_error; | ||
1312 | |||
1313 | error = process_ip_string(file, (char *)new_val->sub_net, NETMASK); | ||
1314 | if (error) | ||
1315 | goto setval_error; | ||
1316 | |||
1317 | error = process_ip_string(file, (char *)new_val->gate_way, GATEWAY); | ||
1318 | if (error) | ||
1319 | goto setval_error; | ||
1320 | |||
1321 | error = process_ip_string(file, (char *)new_val->dns_addr, DNS); | ||
1322 | if (error) | ||
1323 | goto setval_error; | ||
1324 | |||
1325 | setval_done: | ||
1326 | free(mac_addr); | ||
1327 | fclose(file); | ||
1328 | |||
1329 | /* | ||
1330 | * Now that we have populated the configuration file, | ||
1331 | * invoke the external script to do its magic. | ||
1332 | */ | ||
1333 | |||
1334 | snprintf(cmd, sizeof(cmd), "%s %s", "hv_set_ifconfig", if_file); | ||
1335 | system(cmd); | ||
1336 | return 0; | ||
1337 | |||
1338 | setval_error: | ||
1339 | syslog(LOG_ERR, "Failed to write config file"); | ||
1340 | free(mac_addr); | ||
1341 | fclose(file); | ||
1342 | return error; | ||
1343 | } | ||
1344 | |||
1345 | |||
582 | static int | 1346 | static int |
583 | kvp_get_domain_name(char *buffer, int length) | 1347 | kvp_get_domain_name(char *buffer, int length) |
584 | { | 1348 | { |
@@ -646,6 +1410,10 @@ int main(void) | |||
646 | char *p; | 1410 | char *p; |
647 | char *key_value; | 1411 | char *key_value; |
648 | char *key_name; | 1412 | char *key_name; |
1413 | int op; | ||
1414 | int pool; | ||
1415 | char *if_name; | ||
1416 | struct hv_kvp_ipaddr_value *kvp_ip_val; | ||
649 | 1417 | ||
650 | daemon(1, 0); | 1418 | daemon(1, 0); |
651 | openlog("KVP", 0, LOG_USER); | 1419 | openlog("KVP", 0, LOG_USER); |
@@ -657,13 +1425,13 @@ int main(void) | |||
657 | 1425 | ||
658 | if (kvp_file_init()) { | 1426 | if (kvp_file_init()) { |
659 | syslog(LOG_ERR, "Failed to initialize the pools"); | 1427 | syslog(LOG_ERR, "Failed to initialize the pools"); |
660 | exit(-1); | 1428 | exit(EXIT_FAILURE); |
661 | } | 1429 | } |
662 | 1430 | ||
663 | fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); | 1431 | fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); |
664 | if (fd < 0) { | 1432 | if (fd < 0) { |
665 | syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd); | 1433 | syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd); |
666 | exit(-1); | 1434 | exit(EXIT_FAILURE); |
667 | } | 1435 | } |
668 | addr.nl_family = AF_NETLINK; | 1436 | addr.nl_family = AF_NETLINK; |
669 | addr.nl_pad = 0; | 1437 | addr.nl_pad = 0; |
@@ -675,7 +1443,7 @@ int main(void) | |||
675 | if (error < 0) { | 1443 | if (error < 0) { |
676 | syslog(LOG_ERR, "bind failed; error:%d", error); | 1444 | syslog(LOG_ERR, "bind failed; error:%d", error); |
677 | close(fd); | 1445 | close(fd); |
678 | exit(-1); | 1446 | exit(EXIT_FAILURE); |
679 | } | 1447 | } |
680 | sock_opt = addr.nl_groups; | 1448 | sock_opt = addr.nl_groups; |
681 | setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt)); | 1449 | setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt)); |
@@ -687,7 +1455,7 @@ int main(void) | |||
687 | message->id.val = CN_KVP_VAL; | 1455 | message->id.val = CN_KVP_VAL; |
688 | 1456 | ||
689 | hv_msg = (struct hv_kvp_msg *)message->data; | 1457 | hv_msg = (struct hv_kvp_msg *)message->data; |
690 | hv_msg->kvp_hdr.operation = KVP_OP_REGISTER; | 1458 | hv_msg->kvp_hdr.operation = KVP_OP_REGISTER1; |
691 | message->ack = 0; | 1459 | message->ack = 0; |
692 | message->len = sizeof(struct hv_kvp_msg); | 1460 | message->len = sizeof(struct hv_kvp_msg); |
693 | 1461 | ||
@@ -695,7 +1463,7 @@ int main(void) | |||
695 | if (len < 0) { | 1463 | if (len < 0) { |
696 | syslog(LOG_ERR, "netlink_send failed; error:%d", len); | 1464 | syslog(LOG_ERR, "netlink_send failed; error:%d", len); |
697 | close(fd); | 1465 | close(fd); |
698 | exit(-1); | 1466 | exit(EXIT_FAILURE); |
699 | } | 1467 | } |
700 | 1468 | ||
701 | pfd.fd = fd; | 1469 | pfd.fd = fd; |
@@ -721,12 +1489,21 @@ int main(void) | |||
721 | incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg); | 1489 | incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg); |
722 | hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data; | 1490 | hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data; |
723 | 1491 | ||
724 | switch (hv_msg->kvp_hdr.operation) { | 1492 | /* |
725 | case KVP_OP_REGISTER: | 1493 | * We will use the KVP header information to pass back |
1494 | * the error from this daemon. So, first copy the state | ||
1495 | * and set the error code to success. | ||
1496 | */ | ||
1497 | op = hv_msg->kvp_hdr.operation; | ||
1498 | pool = hv_msg->kvp_hdr.pool; | ||
1499 | hv_msg->error = HV_S_OK; | ||
1500 | |||
1501 | if ((in_hand_shake) && (op == KVP_OP_REGISTER1)) { | ||
726 | /* | 1502 | /* |
727 | * Driver is registering with us; stash away the version | 1503 | * Driver is registering with us; stash away the version |
728 | * information. | 1504 | * information. |
729 | */ | 1505 | */ |
1506 | in_hand_shake = 0; | ||
730 | p = (char *)hv_msg->body.kvp_register.version; | 1507 | p = (char *)hv_msg->body.kvp_register.version; |
731 | lic_version = malloc(strlen(p) + 1); | 1508 | lic_version = malloc(strlen(p) + 1); |
732 | if (lic_version) { | 1509 | if (lic_version) { |
@@ -737,44 +1514,82 @@ int main(void) | |||
737 | syslog(LOG_ERR, "malloc failed"); | 1514 | syslog(LOG_ERR, "malloc failed"); |
738 | } | 1515 | } |
739 | continue; | 1516 | continue; |
1517 | } | ||
740 | 1518 | ||
741 | /* | 1519 | switch (op) { |
742 | * The current protocol with the kernel component uses a | 1520 | case KVP_OP_GET_IP_INFO: |
743 | * NULL key name to pass an error condition. | 1521 | kvp_ip_val = &hv_msg->body.kvp_ip_val; |
744 | * For the SET, GET and DELETE operations, | 1522 | if_name = |
745 | * use the existing protocol to pass back error. | 1523 | kvp_mac_to_if_name((char *)kvp_ip_val->adapter_id); |
746 | */ | 1524 | |
1525 | if (if_name == NULL) { | ||
1526 | /* | ||
1527 | * We could not map the mac address to an | ||
1528 | * interface name; return error. | ||
1529 | */ | ||
1530 | hv_msg->error = HV_E_FAIL; | ||
1531 | break; | ||
1532 | } | ||
1533 | error = kvp_get_ip_info( | ||
1534 | 0, if_name, KVP_OP_GET_IP_INFO, | ||
1535 | kvp_ip_val, | ||
1536 | (MAX_IP_ADDR_SIZE * 2)); | ||
1537 | |||
1538 | if (error) | ||
1539 | hv_msg->error = error; | ||
1540 | |||
1541 | free(if_name); | ||
1542 | break; | ||
1543 | |||
1544 | case KVP_OP_SET_IP_INFO: | ||
1545 | kvp_ip_val = &hv_msg->body.kvp_ip_val; | ||
1546 | if_name = kvp_get_if_name( | ||
1547 | (char *)kvp_ip_val->adapter_id); | ||
1548 | if (if_name == NULL) { | ||
1549 | /* | ||
1550 | * We could not map the guid to an | ||
1551 | * interface name; return error. | ||
1552 | */ | ||
1553 | hv_msg->error = HV_GUID_NOTFOUND; | ||
1554 | break; | ||
1555 | } | ||
1556 | error = kvp_set_ip_info(if_name, kvp_ip_val); | ||
1557 | if (error) | ||
1558 | hv_msg->error = error; | ||
1559 | |||
1560 | free(if_name); | ||
1561 | break; | ||
747 | 1562 | ||
748 | case KVP_OP_SET: | 1563 | case KVP_OP_SET: |
749 | if (kvp_key_add_or_modify(hv_msg->kvp_hdr.pool, | 1564 | if (kvp_key_add_or_modify(pool, |
750 | hv_msg->body.kvp_set.data.key, | 1565 | hv_msg->body.kvp_set.data.key, |
751 | hv_msg->body.kvp_set.data.key_size, | 1566 | hv_msg->body.kvp_set.data.key_size, |
752 | hv_msg->body.kvp_set.data.value, | 1567 | hv_msg->body.kvp_set.data.value, |
753 | hv_msg->body.kvp_set.data.value_size)) | 1568 | hv_msg->body.kvp_set.data.value_size)) |
754 | strcpy(hv_msg->body.kvp_set.data.key, ""); | 1569 | hv_msg->error = HV_S_CONT; |
755 | break; | 1570 | break; |
756 | 1571 | ||
757 | case KVP_OP_GET: | 1572 | case KVP_OP_GET: |
758 | if (kvp_get_value(hv_msg->kvp_hdr.pool, | 1573 | if (kvp_get_value(pool, |
759 | hv_msg->body.kvp_set.data.key, | 1574 | hv_msg->body.kvp_set.data.key, |
760 | hv_msg->body.kvp_set.data.key_size, | 1575 | hv_msg->body.kvp_set.data.key_size, |
761 | hv_msg->body.kvp_set.data.value, | 1576 | hv_msg->body.kvp_set.data.value, |
762 | hv_msg->body.kvp_set.data.value_size)) | 1577 | hv_msg->body.kvp_set.data.value_size)) |
763 | strcpy(hv_msg->body.kvp_set.data.key, ""); | 1578 | hv_msg->error = HV_S_CONT; |
764 | break; | 1579 | break; |
765 | 1580 | ||
766 | case KVP_OP_DELETE: | 1581 | case KVP_OP_DELETE: |
767 | if (kvp_key_delete(hv_msg->kvp_hdr.pool, | 1582 | if (kvp_key_delete(pool, |
768 | hv_msg->body.kvp_delete.key, | 1583 | hv_msg->body.kvp_delete.key, |
769 | hv_msg->body.kvp_delete.key_size)) | 1584 | hv_msg->body.kvp_delete.key_size)) |
770 | strcpy(hv_msg->body.kvp_delete.key, ""); | 1585 | hv_msg->error = HV_S_CONT; |
771 | break; | 1586 | break; |
772 | 1587 | ||
773 | default: | 1588 | default: |
774 | break; | 1589 | break; |
775 | } | 1590 | } |
776 | 1591 | ||
777 | if (hv_msg->kvp_hdr.operation != KVP_OP_ENUMERATE) | 1592 | if (op != KVP_OP_ENUMERATE) |
778 | goto kvp_done; | 1593 | goto kvp_done; |
779 | 1594 | ||
780 | /* | 1595 | /* |
@@ -782,13 +1597,14 @@ int main(void) | |||
782 | * both the key and the value; if not read from the | 1597 | * both the key and the value; if not read from the |
783 | * appropriate pool. | 1598 | * appropriate pool. |
784 | */ | 1599 | */ |
785 | if (hv_msg->kvp_hdr.pool != KVP_POOL_AUTO) { | 1600 | if (pool != KVP_POOL_AUTO) { |
786 | kvp_pool_enumerate(hv_msg->kvp_hdr.pool, | 1601 | if (kvp_pool_enumerate(pool, |
787 | hv_msg->body.kvp_enum_data.index, | 1602 | hv_msg->body.kvp_enum_data.index, |
788 | hv_msg->body.kvp_enum_data.data.key, | 1603 | hv_msg->body.kvp_enum_data.data.key, |
789 | HV_KVP_EXCHANGE_MAX_KEY_SIZE, | 1604 | HV_KVP_EXCHANGE_MAX_KEY_SIZE, |
790 | hv_msg->body.kvp_enum_data.data.value, | 1605 | hv_msg->body.kvp_enum_data.data.value, |
791 | HV_KVP_EXCHANGE_MAX_VALUE_SIZE); | 1606 | HV_KVP_EXCHANGE_MAX_VALUE_SIZE)) |
1607 | hv_msg->error = HV_S_CONT; | ||
792 | goto kvp_done; | 1608 | goto kvp_done; |
793 | } | 1609 | } |
794 | 1610 | ||
@@ -807,13 +1623,13 @@ int main(void) | |||
807 | strcpy(key_value, lic_version); | 1623 | strcpy(key_value, lic_version); |
808 | break; | 1624 | break; |
809 | case NetworkAddressIPv4: | 1625 | case NetworkAddressIPv4: |
810 | kvp_get_ip_address(AF_INET, key_value, | 1626 | kvp_get_ip_info(AF_INET, NULL, KVP_OP_ENUMERATE, |
811 | HV_KVP_EXCHANGE_MAX_VALUE_SIZE); | 1627 | key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE); |
812 | strcpy(key_name, "NetworkAddressIPv4"); | 1628 | strcpy(key_name, "NetworkAddressIPv4"); |
813 | break; | 1629 | break; |
814 | case NetworkAddressIPv6: | 1630 | case NetworkAddressIPv6: |
815 | kvp_get_ip_address(AF_INET6, key_value, | 1631 | kvp_get_ip_info(AF_INET6, NULL, KVP_OP_ENUMERATE, |
816 | HV_KVP_EXCHANGE_MAX_VALUE_SIZE); | 1632 | key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE); |
817 | strcpy(key_name, "NetworkAddressIPv6"); | 1633 | strcpy(key_name, "NetworkAddressIPv6"); |
818 | break; | 1634 | break; |
819 | case OSBuildNumber: | 1635 | case OSBuildNumber: |
@@ -841,11 +1657,7 @@ int main(void) | |||
841 | strcpy(key_name, "ProcessorArchitecture"); | 1657 | strcpy(key_name, "ProcessorArchitecture"); |
842 | break; | 1658 | break; |
843 | default: | 1659 | default: |
844 | strcpy(key_value, "Unknown Key"); | 1660 | hv_msg->error = HV_S_CONT; |
845 | /* | ||
846 | * We use a null key name to terminate enumeration. | ||
847 | */ | ||
848 | strcpy(key_name, ""); | ||
849 | break; | 1661 | break; |
850 | } | 1662 | } |
851 | /* | 1663 | /* |
@@ -863,7 +1675,7 @@ kvp_done: | |||
863 | len = netlink_send(fd, incoming_cn_msg); | 1675 | len = netlink_send(fd, incoming_cn_msg); |
864 | if (len < 0) { | 1676 | if (len < 0) { |
865 | syslog(LOG_ERR, "net_link send failed; error:%d", len); | 1677 | syslog(LOG_ERR, "net_link send failed; error:%d", len); |
866 | exit(-1); | 1678 | exit(EXIT_FAILURE); |
867 | } | 1679 | } |
868 | } | 1680 | } |
869 | 1681 | ||
diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh new file mode 100755 index 000000000000..3e9427e08d80 --- /dev/null +++ b/tools/hv/hv_set_ifconfig.sh | |||
@@ -0,0 +1,68 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | # This example script activates an interface based on the specified | ||
4 | # configuration. | ||
5 | # | ||
6 | # In the interest of keeping the KVP daemon code free of distro specific | ||
7 | # information; the kvp daemon code invokes this external script to configure | ||
8 | # the interface. | ||
9 | # | ||
10 | # The only argument to this script is the configuration file that is to | ||
11 | # be used to configure the interface. | ||
12 | # | ||
13 | # Each Distro is expected to implement this script in a distro specific | ||
14 | # fashion. For instance on Distros that ship with Network Manager enabled, | ||
15 | # this script can be based on the Network Manager APIs for configuring the | ||
16 | # interface. | ||
17 | # | ||
18 | # This example script is based on a RHEL environment. | ||
19 | # | ||
20 | # Here is the format of the ip configuration file: | ||
21 | # | ||
22 | # HWADDR=macaddr | ||
23 | # IF_NAME=interface name | ||
24 | # DHCP=yes (This is optional; if yes, DHCP is configured) | ||
25 | # | ||
26 | # IPADDR=ipaddr1 | ||
27 | # IPADDR_1=ipaddr2 | ||
28 | # IPADDR_x=ipaddry (where y = x + 1) | ||
29 | # | ||
30 | # NETMASK=netmask1 | ||
31 | # NETMASK_x=netmasky (where y = x + 1) | ||
32 | # | ||
33 | # GATEWAY=ipaddr1 | ||
34 | # GATEWAY_x=ipaddry (where y = x + 1) | ||
35 | # | ||
36 | # DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc) | ||
37 | # | ||
38 | # IPV6 addresses will be tagged as IPV6ADDR, IPV6 gateway will be | ||
39 | # tagged as IPV6_DEFAULTGW and IPV6 NETMASK will be tagged as | ||
40 | # IPV6NETMASK. | ||
41 | # | ||
42 | # The host can specify multiple ipv4 and ipv6 addresses to be | ||
43 | # configured for the interface. Furthermore, the configuration | ||
44 | # needs to be persistent. A subsequent GET call on the interface | ||
45 | # is expected to return the configuration that is set via the SET | ||
46 | # call. | ||
47 | # | ||
48 | |||
49 | |||
50 | |||
51 | echo "IPV6INIT=yes" >> $1 | ||
52 | echo "NM_CONTROLLED=no" >> $1 | ||
53 | echo "PEERDNS=yes" >> $1 | ||
54 | echo "ONBOOT=yes" >> $1 | ||
55 | |||
56 | dhcp=$(grep "DHCP" $1 2>/dev/null) | ||
57 | if [ "$dhcp" != "" ]; | ||
58 | then | ||
59 | echo "BOOTPROTO=dhcp" >> $1; | ||
60 | fi | ||
61 | |||
62 | cp $1 /etc/sysconfig/network-scripts/ | ||
63 | |||
64 | |||
65 | interface=$(echo $1 | awk -F - '{ print $2 }') | ||
66 | |||
67 | /sbin/ifdown $interface 2>/dev/null | ||
68 | /sbin/ifup $interfac 2>/dev/null | ||
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 14131cb0522d..04d959fa0226 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile | |||
@@ -129,7 +129,7 @@ CFLAGS ?= -g -Wall | |||
129 | 129 | ||
130 | # Append required CFLAGS | 130 | # Append required CFLAGS |
131 | override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ) | 131 | override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ) |
132 | override CFLAGS += $(udis86-flags) | 132 | override CFLAGS += $(udis86-flags) -D_GNU_SOURCE |
133 | 133 | ||
134 | ifeq ($(VERBOSE),1) | 134 | ifeq ($(VERBOSE),1) |
135 | Q = | 135 | Q = |
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 5f34aa371b56..47264b4652b9 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
@@ -24,13 +24,14 @@ | |||
24 | * Frederic Weisbecker gave his permission to relicense the code to | 24 | * Frederic Weisbecker gave his permission to relicense the code to |
25 | * the Lesser General Public License. | 25 | * the Lesser General Public License. |
26 | */ | 26 | */ |
27 | #define _GNU_SOURCE | ||
28 | #include <stdio.h> | 27 | #include <stdio.h> |
29 | #include <stdlib.h> | 28 | #include <stdlib.h> |
30 | #include <string.h> | 29 | #include <string.h> |
31 | #include <stdarg.h> | 30 | #include <stdarg.h> |
32 | #include <ctype.h> | 31 | #include <ctype.h> |
33 | #include <errno.h> | 32 | #include <errno.h> |
33 | #include <stdint.h> | ||
34 | #include <limits.h> | ||
34 | 35 | ||
35 | #include "event-parse.h" | 36 | #include "event-parse.h" |
36 | #include "event-utils.h" | 37 | #include "event-utils.h" |
@@ -117,14 +118,7 @@ void breakpoint(void) | |||
117 | 118 | ||
118 | struct print_arg *alloc_arg(void) | 119 | struct print_arg *alloc_arg(void) |
119 | { | 120 | { |
120 | struct print_arg *arg; | 121 | return calloc(1, sizeof(struct print_arg)); |
121 | |||
122 | arg = malloc_or_die(sizeof(*arg)); | ||
123 | if (!arg) | ||
124 | return NULL; | ||
125 | memset(arg, 0, sizeof(*arg)); | ||
126 | |||
127 | return arg; | ||
128 | } | 122 | } |
129 | 123 | ||
130 | struct cmdline { | 124 | struct cmdline { |
@@ -158,7 +152,9 @@ static int cmdline_init(struct pevent *pevent) | |||
158 | struct cmdline *cmdlines; | 152 | struct cmdline *cmdlines; |
159 | int i; | 153 | int i; |
160 | 154 | ||
161 | cmdlines = malloc_or_die(sizeof(*cmdlines) * pevent->cmdline_count); | 155 | cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count); |
156 | if (!cmdlines) | ||
157 | return -1; | ||
162 | 158 | ||
163 | i = 0; | 159 | i = 0; |
164 | while (cmdlist) { | 160 | while (cmdlist) { |
@@ -186,8 +182,8 @@ static char *find_cmdline(struct pevent *pevent, int pid) | |||
186 | if (!pid) | 182 | if (!pid) |
187 | return "<idle>"; | 183 | return "<idle>"; |
188 | 184 | ||
189 | if (!pevent->cmdlines) | 185 | if (!pevent->cmdlines && cmdline_init(pevent)) |
190 | cmdline_init(pevent); | 186 | return "<not enough memory for cmdlines!>"; |
191 | 187 | ||
192 | key.pid = pid; | 188 | key.pid = pid; |
193 | 189 | ||
@@ -215,8 +211,8 @@ int pevent_pid_is_registered(struct pevent *pevent, int pid) | |||
215 | if (!pid) | 211 | if (!pid) |
216 | return 1; | 212 | return 1; |
217 | 213 | ||
218 | if (!pevent->cmdlines) | 214 | if (!pevent->cmdlines && cmdline_init(pevent)) |
219 | cmdline_init(pevent); | 215 | return 0; |
220 | 216 | ||
221 | key.pid = pid; | 217 | key.pid = pid; |
222 | 218 | ||
@@ -258,10 +254,14 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid) | |||
258 | return -1; | 254 | return -1; |
259 | } | 255 | } |
260 | 256 | ||
261 | cmdlines[pevent->cmdline_count].pid = pid; | ||
262 | cmdlines[pevent->cmdline_count].comm = strdup(comm); | 257 | cmdlines[pevent->cmdline_count].comm = strdup(comm); |
263 | if (!cmdlines[pevent->cmdline_count].comm) | 258 | if (!cmdlines[pevent->cmdline_count].comm) { |
264 | die("malloc comm"); | 259 | free(cmdlines); |
260 | errno = ENOMEM; | ||
261 | return -1; | ||
262 | } | ||
263 | |||
264 | cmdlines[pevent->cmdline_count].pid = pid; | ||
265 | 265 | ||
266 | if (cmdlines[pevent->cmdline_count].comm) | 266 | if (cmdlines[pevent->cmdline_count].comm) |
267 | pevent->cmdline_count++; | 267 | pevent->cmdline_count++; |
@@ -288,10 +288,15 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid) | |||
288 | if (pevent->cmdlines) | 288 | if (pevent->cmdlines) |
289 | return add_new_comm(pevent, comm, pid); | 289 | return add_new_comm(pevent, comm, pid); |
290 | 290 | ||
291 | item = malloc_or_die(sizeof(*item)); | 291 | item = malloc(sizeof(*item)); |
292 | if (!item) | ||
293 | return -1; | ||
294 | |||
292 | item->comm = strdup(comm); | 295 | item->comm = strdup(comm); |
293 | if (!item->comm) | 296 | if (!item->comm) { |
294 | die("malloc comm"); | 297 | free(item); |
298 | return -1; | ||
299 | } | ||
295 | item->pid = pid; | 300 | item->pid = pid; |
296 | item->next = pevent->cmdlist; | 301 | item->next = pevent->cmdlist; |
297 | 302 | ||
@@ -355,7 +360,10 @@ static int func_map_init(struct pevent *pevent) | |||
355 | struct func_map *func_map; | 360 | struct func_map *func_map; |
356 | int i; | 361 | int i; |
357 | 362 | ||
358 | func_map = malloc_or_die(sizeof(*func_map) * (pevent->func_count + 1)); | 363 | func_map = malloc(sizeof(*func_map) * (pevent->func_count + 1)); |
364 | if (!func_map) | ||
365 | return -1; | ||
366 | |||
359 | funclist = pevent->funclist; | 367 | funclist = pevent->funclist; |
360 | 368 | ||
361 | i = 0; | 369 | i = 0; |
@@ -455,25 +463,36 @@ pevent_find_function_address(struct pevent *pevent, unsigned long long addr) | |||
455 | int pevent_register_function(struct pevent *pevent, char *func, | 463 | int pevent_register_function(struct pevent *pevent, char *func, |
456 | unsigned long long addr, char *mod) | 464 | unsigned long long addr, char *mod) |
457 | { | 465 | { |
458 | struct func_list *item; | 466 | struct func_list *item = malloc(sizeof(*item)); |
459 | 467 | ||
460 | item = malloc_or_die(sizeof(*item)); | 468 | if (!item) |
469 | return -1; | ||
461 | 470 | ||
462 | item->next = pevent->funclist; | 471 | item->next = pevent->funclist; |
463 | item->func = strdup(func); | 472 | item->func = strdup(func); |
464 | if (mod) | 473 | if (!item->func) |
474 | goto out_free; | ||
475 | |||
476 | if (mod) { | ||
465 | item->mod = strdup(mod); | 477 | item->mod = strdup(mod); |
466 | else | 478 | if (!item->mod) |
479 | goto out_free_func; | ||
480 | } else | ||
467 | item->mod = NULL; | 481 | item->mod = NULL; |
468 | item->addr = addr; | 482 | item->addr = addr; |
469 | 483 | ||
470 | if (!item->func || (mod && !item->mod)) | ||
471 | die("malloc func"); | ||
472 | |||
473 | pevent->funclist = item; | 484 | pevent->funclist = item; |
474 | pevent->func_count++; | 485 | pevent->func_count++; |
475 | 486 | ||
476 | return 0; | 487 | return 0; |
488 | |||
489 | out_free_func: | ||
490 | free(item->func); | ||
491 | item->func = NULL; | ||
492 | out_free: | ||
493 | free(item); | ||
494 | errno = ENOMEM; | ||
495 | return -1; | ||
477 | } | 496 | } |
478 | 497 | ||
479 | /** | 498 | /** |
@@ -524,14 +543,16 @@ static int printk_cmp(const void *a, const void *b) | |||
524 | return 0; | 543 | return 0; |
525 | } | 544 | } |
526 | 545 | ||
527 | static void printk_map_init(struct pevent *pevent) | 546 | static int printk_map_init(struct pevent *pevent) |
528 | { | 547 | { |
529 | struct printk_list *printklist; | 548 | struct printk_list *printklist; |
530 | struct printk_list *item; | 549 | struct printk_list *item; |
531 | struct printk_map *printk_map; | 550 | struct printk_map *printk_map; |
532 | int i; | 551 | int i; |
533 | 552 | ||
534 | printk_map = malloc_or_die(sizeof(*printk_map) * (pevent->printk_count + 1)); | 553 | printk_map = malloc(sizeof(*printk_map) * (pevent->printk_count + 1)); |
554 | if (!printk_map) | ||
555 | return -1; | ||
535 | 556 | ||
536 | printklist = pevent->printklist; | 557 | printklist = pevent->printklist; |
537 | 558 | ||
@@ -549,6 +570,8 @@ static void printk_map_init(struct pevent *pevent) | |||
549 | 570 | ||
550 | pevent->printk_map = printk_map; | 571 | pevent->printk_map = printk_map; |
551 | pevent->printklist = NULL; | 572 | pevent->printklist = NULL; |
573 | |||
574 | return 0; | ||
552 | } | 575 | } |
553 | 576 | ||
554 | static struct printk_map * | 577 | static struct printk_map * |
@@ -557,8 +580,8 @@ find_printk(struct pevent *pevent, unsigned long long addr) | |||
557 | struct printk_map *printk; | 580 | struct printk_map *printk; |
558 | struct printk_map key; | 581 | struct printk_map key; |
559 | 582 | ||
560 | if (!pevent->printk_map) | 583 | if (!pevent->printk_map && printk_map_init(pevent)) |
561 | printk_map_init(pevent); | 584 | return NULL; |
562 | 585 | ||
563 | key.addr = addr; | 586 | key.addr = addr; |
564 | 587 | ||
@@ -580,21 +603,27 @@ find_printk(struct pevent *pevent, unsigned long long addr) | |||
580 | int pevent_register_print_string(struct pevent *pevent, char *fmt, | 603 | int pevent_register_print_string(struct pevent *pevent, char *fmt, |
581 | unsigned long long addr) | 604 | unsigned long long addr) |
582 | { | 605 | { |
583 | struct printk_list *item; | 606 | struct printk_list *item = malloc(sizeof(*item)); |
584 | 607 | ||
585 | item = malloc_or_die(sizeof(*item)); | 608 | if (!item) |
609 | return -1; | ||
586 | 610 | ||
587 | item->next = pevent->printklist; | 611 | item->next = pevent->printklist; |
588 | item->printk = strdup(fmt); | ||
589 | item->addr = addr; | 612 | item->addr = addr; |
590 | 613 | ||
614 | item->printk = strdup(fmt); | ||
591 | if (!item->printk) | 615 | if (!item->printk) |
592 | die("malloc fmt"); | 616 | goto out_free; |
593 | 617 | ||
594 | pevent->printklist = item; | 618 | pevent->printklist = item; |
595 | pevent->printk_count++; | 619 | pevent->printk_count++; |
596 | 620 | ||
597 | return 0; | 621 | return 0; |
622 | |||
623 | out_free: | ||
624 | free(item); | ||
625 | errno = ENOMEM; | ||
626 | return -1; | ||
598 | } | 627 | } |
599 | 628 | ||
600 | /** | 629 | /** |
@@ -619,24 +648,18 @@ void pevent_print_printk(struct pevent *pevent) | |||
619 | 648 | ||
620 | static struct event_format *alloc_event(void) | 649 | static struct event_format *alloc_event(void) |
621 | { | 650 | { |
622 | struct event_format *event; | 651 | return calloc(1, sizeof(struct event_format)); |
623 | |||
624 | event = malloc(sizeof(*event)); | ||
625 | if (!event) | ||
626 | return NULL; | ||
627 | memset(event, 0, sizeof(*event)); | ||
628 | |||
629 | return event; | ||
630 | } | 652 | } |
631 | 653 | ||
632 | static void add_event(struct pevent *pevent, struct event_format *event) | 654 | static int add_event(struct pevent *pevent, struct event_format *event) |
633 | { | 655 | { |
634 | int i; | 656 | int i; |
657 | struct event_format **events = realloc(pevent->events, sizeof(event) * | ||
658 | (pevent->nr_events + 1)); | ||
659 | if (!events) | ||
660 | return -1; | ||
635 | 661 | ||
636 | pevent->events = realloc(pevent->events, sizeof(event) * | 662 | pevent->events = events; |
637 | (pevent->nr_events + 1)); | ||
638 | if (!pevent->events) | ||
639 | die("Can not allocate events"); | ||
640 | 663 | ||
641 | for (i = 0; i < pevent->nr_events; i++) { | 664 | for (i = 0; i < pevent->nr_events; i++) { |
642 | if (pevent->events[i]->id > event->id) | 665 | if (pevent->events[i]->id > event->id) |
@@ -651,6 +674,8 @@ static void add_event(struct pevent *pevent, struct event_format *event) | |||
651 | pevent->nr_events++; | 674 | pevent->nr_events++; |
652 | 675 | ||
653 | event->pevent = pevent; | 676 | event->pevent = pevent; |
677 | |||
678 | return 0; | ||
654 | } | 679 | } |
655 | 680 | ||
656 | static int event_item_type(enum event_type type) | 681 | static int event_item_type(enum event_type type) |
@@ -827,9 +852,9 @@ static enum event_type __read_token(char **tok) | |||
827 | switch (type) { | 852 | switch (type) { |
828 | case EVENT_NEWLINE: | 853 | case EVENT_NEWLINE: |
829 | case EVENT_DELIM: | 854 | case EVENT_DELIM: |
830 | *tok = malloc_or_die(2); | 855 | if (asprintf(tok, "%c", ch) < 0) |
831 | (*tok)[0] = ch; | 856 | return EVENT_ERROR; |
832 | (*tok)[1] = 0; | 857 | |
833 | return type; | 858 | return type; |
834 | 859 | ||
835 | case EVENT_OP: | 860 | case EVENT_OP: |
@@ -1240,8 +1265,10 @@ static int event_read_fields(struct event_format *event, struct format_field **f | |||
1240 | 1265 | ||
1241 | last_token = token; | 1266 | last_token = token; |
1242 | 1267 | ||
1243 | field = malloc_or_die(sizeof(*field)); | 1268 | field = calloc(1, sizeof(*field)); |
1244 | memset(field, 0, sizeof(*field)); | 1269 | if (!field) |
1270 | goto fail; | ||
1271 | |||
1245 | field->event = event; | 1272 | field->event = event; |
1246 | 1273 | ||
1247 | /* read the rest of the type */ | 1274 | /* read the rest of the type */ |
@@ -1282,7 +1309,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f | |||
1282 | } | 1309 | } |
1283 | 1310 | ||
1284 | if (!field->type) { | 1311 | if (!field->type) { |
1285 | die("no type found"); | 1312 | do_warning("%s: no type found", __func__); |
1286 | goto fail; | 1313 | goto fail; |
1287 | } | 1314 | } |
1288 | field->name = last_token; | 1315 | field->name = last_token; |
@@ -1329,7 +1356,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f | |||
1329 | free_token(token); | 1356 | free_token(token); |
1330 | type = read_token(&token); | 1357 | type = read_token(&token); |
1331 | if (type == EVENT_NONE) { | 1358 | if (type == EVENT_NONE) { |
1332 | die("failed to find token"); | 1359 | do_warning("failed to find token"); |
1333 | goto fail; | 1360 | goto fail; |
1334 | } | 1361 | } |
1335 | } | 1362 | } |
@@ -1538,6 +1565,14 @@ process_cond(struct event_format *event, struct print_arg *top, char **tok) | |||
1538 | left = alloc_arg(); | 1565 | left = alloc_arg(); |
1539 | right = alloc_arg(); | 1566 | right = alloc_arg(); |
1540 | 1567 | ||
1568 | if (!arg || !left || !right) { | ||
1569 | do_warning("%s: not enough memory!", __func__); | ||
1570 | /* arg will be freed at out_free */ | ||
1571 | free_arg(left); | ||
1572 | free_arg(right); | ||
1573 | goto out_free; | ||
1574 | } | ||
1575 | |||
1541 | arg->type = PRINT_OP; | 1576 | arg->type = PRINT_OP; |
1542 | arg->op.left = left; | 1577 | arg->op.left = left; |
1543 | arg->op.right = right; | 1578 | arg->op.right = right; |
@@ -1580,6 +1615,12 @@ process_array(struct event_format *event, struct print_arg *top, char **tok) | |||
1580 | char *token = NULL; | 1615 | char *token = NULL; |
1581 | 1616 | ||
1582 | arg = alloc_arg(); | 1617 | arg = alloc_arg(); |
1618 | if (!arg) { | ||
1619 | do_warning("%s: not enough memory!", __func__); | ||
1620 | /* '*tok' is set to top->op.op. No need to free. */ | ||
1621 | *tok = NULL; | ||
1622 | return EVENT_ERROR; | ||
1623 | } | ||
1583 | 1624 | ||
1584 | *tok = NULL; | 1625 | *tok = NULL; |
1585 | type = process_arg(event, arg, &token); | 1626 | type = process_arg(event, arg, &token); |
@@ -1595,8 +1636,7 @@ process_array(struct event_format *event, struct print_arg *top, char **tok) | |||
1595 | return type; | 1636 | return type; |
1596 | 1637 | ||
1597 | out_free: | 1638 | out_free: |
1598 | free_token(*tok); | 1639 | free_token(token); |
1599 | *tok = NULL; | ||
1600 | free_arg(arg); | 1640 | free_arg(arg); |
1601 | return EVENT_ERROR; | 1641 | return EVENT_ERROR; |
1602 | } | 1642 | } |
@@ -1682,7 +1722,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1682 | if (arg->type == PRINT_OP && !arg->op.left) { | 1722 | if (arg->type == PRINT_OP && !arg->op.left) { |
1683 | /* handle single op */ | 1723 | /* handle single op */ |
1684 | if (token[1]) { | 1724 | if (token[1]) { |
1685 | die("bad op token %s", token); | 1725 | do_warning("bad op token %s", token); |
1686 | goto out_free; | 1726 | goto out_free; |
1687 | } | 1727 | } |
1688 | switch (token[0]) { | 1728 | switch (token[0]) { |
@@ -1699,10 +1739,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1699 | 1739 | ||
1700 | /* make an empty left */ | 1740 | /* make an empty left */ |
1701 | left = alloc_arg(); | 1741 | left = alloc_arg(); |
1742 | if (!left) | ||
1743 | goto out_warn_free; | ||
1744 | |||
1702 | left->type = PRINT_NULL; | 1745 | left->type = PRINT_NULL; |
1703 | arg->op.left = left; | 1746 | arg->op.left = left; |
1704 | 1747 | ||
1705 | right = alloc_arg(); | 1748 | right = alloc_arg(); |
1749 | if (!right) | ||
1750 | goto out_warn_free; | ||
1751 | |||
1706 | arg->op.right = right; | 1752 | arg->op.right = right; |
1707 | 1753 | ||
1708 | /* do not free the token, it belongs to an op */ | 1754 | /* do not free the token, it belongs to an op */ |
@@ -1712,6 +1758,9 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1712 | } else if (strcmp(token, "?") == 0) { | 1758 | } else if (strcmp(token, "?") == 0) { |
1713 | 1759 | ||
1714 | left = alloc_arg(); | 1760 | left = alloc_arg(); |
1761 | if (!left) | ||
1762 | goto out_warn_free; | ||
1763 | |||
1715 | /* copy the top arg to the left */ | 1764 | /* copy the top arg to the left */ |
1716 | *left = *arg; | 1765 | *left = *arg; |
1717 | 1766 | ||
@@ -1720,6 +1769,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1720 | arg->op.left = left; | 1769 | arg->op.left = left; |
1721 | arg->op.prio = 0; | 1770 | arg->op.prio = 0; |
1722 | 1771 | ||
1772 | /* it will set arg->op.right */ | ||
1723 | type = process_cond(event, arg, tok); | 1773 | type = process_cond(event, arg, tok); |
1724 | 1774 | ||
1725 | } else if (strcmp(token, ">>") == 0 || | 1775 | } else if (strcmp(token, ">>") == 0 || |
@@ -1739,6 +1789,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1739 | strcmp(token, "!=") == 0) { | 1789 | strcmp(token, "!=") == 0) { |
1740 | 1790 | ||
1741 | left = alloc_arg(); | 1791 | left = alloc_arg(); |
1792 | if (!left) | ||
1793 | goto out_warn_free; | ||
1742 | 1794 | ||
1743 | /* copy the top arg to the left */ | 1795 | /* copy the top arg to the left */ |
1744 | *left = *arg; | 1796 | *left = *arg; |
@@ -1746,6 +1798,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1746 | arg->type = PRINT_OP; | 1798 | arg->type = PRINT_OP; |
1747 | arg->op.op = token; | 1799 | arg->op.op = token; |
1748 | arg->op.left = left; | 1800 | arg->op.left = left; |
1801 | arg->op.right = NULL; | ||
1749 | 1802 | ||
1750 | if (set_op_prio(arg) == -1) { | 1803 | if (set_op_prio(arg) == -1) { |
1751 | event->flags |= EVENT_FL_FAILED; | 1804 | event->flags |= EVENT_FL_FAILED; |
@@ -1762,12 +1815,14 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1762 | type == EVENT_DELIM && (strcmp(token, ")") == 0)) { | 1815 | type == EVENT_DELIM && (strcmp(token, ")") == 0)) { |
1763 | char *new_atom; | 1816 | char *new_atom; |
1764 | 1817 | ||
1765 | if (left->type != PRINT_ATOM) | 1818 | if (left->type != PRINT_ATOM) { |
1766 | die("bad pointer type"); | 1819 | do_warning("bad pointer type"); |
1820 | goto out_free; | ||
1821 | } | ||
1767 | new_atom = realloc(left->atom.atom, | 1822 | new_atom = realloc(left->atom.atom, |
1768 | strlen(left->atom.atom) + 3); | 1823 | strlen(left->atom.atom) + 3); |
1769 | if (!new_atom) | 1824 | if (!new_atom) |
1770 | goto out_free; | 1825 | goto out_warn_free; |
1771 | 1826 | ||
1772 | left->atom.atom = new_atom; | 1827 | left->atom.atom = new_atom; |
1773 | strcat(left->atom.atom, " *"); | 1828 | strcat(left->atom.atom, " *"); |
@@ -1779,12 +1834,18 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1779 | } | 1834 | } |
1780 | 1835 | ||
1781 | right = alloc_arg(); | 1836 | right = alloc_arg(); |
1837 | if (!right) | ||
1838 | goto out_warn_free; | ||
1839 | |||
1782 | type = process_arg_token(event, right, tok, type); | 1840 | type = process_arg_token(event, right, tok, type); |
1783 | arg->op.right = right; | 1841 | arg->op.right = right; |
1784 | 1842 | ||
1785 | } else if (strcmp(token, "[") == 0) { | 1843 | } else if (strcmp(token, "[") == 0) { |
1786 | 1844 | ||
1787 | left = alloc_arg(); | 1845 | left = alloc_arg(); |
1846 | if (!left) | ||
1847 | goto out_warn_free; | ||
1848 | |||
1788 | *left = *arg; | 1849 | *left = *arg; |
1789 | 1850 | ||
1790 | arg->type = PRINT_OP; | 1851 | arg->type = PRINT_OP; |
@@ -1793,6 +1854,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1793 | 1854 | ||
1794 | arg->op.prio = 0; | 1855 | arg->op.prio = 0; |
1795 | 1856 | ||
1857 | /* it will set arg->op.right */ | ||
1796 | type = process_array(event, arg, tok); | 1858 | type = process_array(event, arg, tok); |
1797 | 1859 | ||
1798 | } else { | 1860 | } else { |
@@ -1816,14 +1878,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) | |||
1816 | 1878 | ||
1817 | return type; | 1879 | return type; |
1818 | 1880 | ||
1819 | out_free: | 1881 | out_warn_free: |
1882 | do_warning("%s: not enough memory!", __func__); | ||
1883 | out_free: | ||
1820 | free_token(token); | 1884 | free_token(token); |
1821 | *tok = NULL; | 1885 | *tok = NULL; |
1822 | return EVENT_ERROR; | 1886 | return EVENT_ERROR; |
1823 | } | 1887 | } |
1824 | 1888 | ||
1825 | static enum event_type | 1889 | static enum event_type |
1826 | process_entry(struct event_format *event __unused, struct print_arg *arg, | 1890 | process_entry(struct event_format *event __maybe_unused, struct print_arg *arg, |
1827 | char **tok) | 1891 | char **tok) |
1828 | { | 1892 | { |
1829 | enum event_type type; | 1893 | enum event_type type; |
@@ -1880,7 +1944,11 @@ eval_type_str(unsigned long long val, const char *type, int pointer) | |||
1880 | return val; | 1944 | return val; |
1881 | } | 1945 | } |
1882 | 1946 | ||
1883 | ref = malloc_or_die(len); | 1947 | ref = malloc(len); |
1948 | if (!ref) { | ||
1949 | do_warning("%s: not enough memory!", __func__); | ||
1950 | return val; | ||
1951 | } | ||
1884 | memcpy(ref, type, len); | 1952 | memcpy(ref, type, len); |
1885 | 1953 | ||
1886 | /* chop off the " *" */ | 1954 | /* chop off the " *" */ |
@@ -1957,8 +2025,10 @@ eval_type_str(unsigned long long val, const char *type, int pointer) | |||
1957 | static unsigned long long | 2025 | static unsigned long long |
1958 | eval_type(unsigned long long val, struct print_arg *arg, int pointer) | 2026 | eval_type(unsigned long long val, struct print_arg *arg, int pointer) |
1959 | { | 2027 | { |
1960 | if (arg->type != PRINT_TYPE) | 2028 | if (arg->type != PRINT_TYPE) { |
1961 | die("expected type argument"); | 2029 | do_warning("expected type argument"); |
2030 | return 0; | ||
2031 | } | ||
1962 | 2032 | ||
1963 | return eval_type_str(val, arg->typecast.type, pointer); | 2033 | return eval_type_str(val, arg->typecast.type, pointer); |
1964 | } | 2034 | } |
@@ -2143,7 +2213,7 @@ static char *arg_eval (struct print_arg *arg) | |||
2143 | case PRINT_STRING: | 2213 | case PRINT_STRING: |
2144 | case PRINT_BSTRING: | 2214 | case PRINT_BSTRING: |
2145 | default: | 2215 | default: |
2146 | die("invalid eval type %d", arg->type); | 2216 | do_warning("invalid eval type %d", arg->type); |
2147 | break; | 2217 | break; |
2148 | } | 2218 | } |
2149 | 2219 | ||
@@ -2166,6 +2236,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char ** | |||
2166 | break; | 2236 | break; |
2167 | 2237 | ||
2168 | arg = alloc_arg(); | 2238 | arg = alloc_arg(); |
2239 | if (!arg) | ||
2240 | goto out_free; | ||
2169 | 2241 | ||
2170 | free_token(token); | 2242 | free_token(token); |
2171 | type = process_arg(event, arg, &token); | 2243 | type = process_arg(event, arg, &token); |
@@ -2179,30 +2251,33 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char ** | |||
2179 | if (test_type_token(type, token, EVENT_DELIM, ",")) | 2251 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
2180 | goto out_free; | 2252 | goto out_free; |
2181 | 2253 | ||
2182 | field = malloc_or_die(sizeof(*field)); | 2254 | field = calloc(1, sizeof(*field)); |
2183 | memset(field, 0, sizeof(*field)); | 2255 | if (!field) |
2256 | goto out_free; | ||
2184 | 2257 | ||
2185 | value = arg_eval(arg); | 2258 | value = arg_eval(arg); |
2186 | if (value == NULL) | 2259 | if (value == NULL) |
2187 | goto out_free; | 2260 | goto out_free_field; |
2188 | field->value = strdup(value); | 2261 | field->value = strdup(value); |
2189 | if (field->value == NULL) | 2262 | if (field->value == NULL) |
2190 | goto out_free; | 2263 | goto out_free_field; |
2191 | 2264 | ||
2192 | free_arg(arg); | 2265 | free_arg(arg); |
2193 | arg = alloc_arg(); | 2266 | arg = alloc_arg(); |
2267 | if (!arg) | ||
2268 | goto out_free; | ||
2194 | 2269 | ||
2195 | free_token(token); | 2270 | free_token(token); |
2196 | type = process_arg(event, arg, &token); | 2271 | type = process_arg(event, arg, &token); |
2197 | if (test_type_token(type, token, EVENT_OP, "}")) | 2272 | if (test_type_token(type, token, EVENT_OP, "}")) |
2198 | goto out_free; | 2273 | goto out_free_field; |
2199 | 2274 | ||
2200 | value = arg_eval(arg); | 2275 | value = arg_eval(arg); |
2201 | if (value == NULL) | 2276 | if (value == NULL) |
2202 | goto out_free; | 2277 | goto out_free_field; |
2203 | field->str = strdup(value); | 2278 | field->str = strdup(value); |
2204 | if (field->str == NULL) | 2279 | if (field->str == NULL) |
2205 | goto out_free; | 2280 | goto out_free_field; |
2206 | free_arg(arg); | 2281 | free_arg(arg); |
2207 | arg = NULL; | 2282 | arg = NULL; |
2208 | 2283 | ||
@@ -2216,6 +2291,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char ** | |||
2216 | *tok = token; | 2291 | *tok = token; |
2217 | return type; | 2292 | return type; |
2218 | 2293 | ||
2294 | out_free_field: | ||
2295 | free_flag_sym(field); | ||
2219 | out_free: | 2296 | out_free: |
2220 | free_arg(arg); | 2297 | free_arg(arg); |
2221 | free_token(token); | 2298 | free_token(token); |
@@ -2235,6 +2312,10 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) | |||
2235 | arg->type = PRINT_FLAGS; | 2312 | arg->type = PRINT_FLAGS; |
2236 | 2313 | ||
2237 | field = alloc_arg(); | 2314 | field = alloc_arg(); |
2315 | if (!field) { | ||
2316 | do_warning("%s: not enough memory!", __func__); | ||
2317 | goto out_free; | ||
2318 | } | ||
2238 | 2319 | ||
2239 | type = process_arg(event, field, &token); | 2320 | type = process_arg(event, field, &token); |
2240 | 2321 | ||
@@ -2243,7 +2324,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) | |||
2243 | type = process_op(event, field, &token); | 2324 | type = process_op(event, field, &token); |
2244 | 2325 | ||
2245 | if (test_type_token(type, token, EVENT_DELIM, ",")) | 2326 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
2246 | goto out_free; | 2327 | goto out_free_field; |
2247 | free_token(token); | 2328 | free_token(token); |
2248 | 2329 | ||
2249 | arg->flags.field = field; | 2330 | arg->flags.field = field; |
@@ -2265,7 +2346,9 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) | |||
2265 | type = read_token_item(tok); | 2346 | type = read_token_item(tok); |
2266 | return type; | 2347 | return type; |
2267 | 2348 | ||
2268 | out_free: | 2349 | out_free_field: |
2350 | free_arg(field); | ||
2351 | out_free: | ||
2269 | free_token(token); | 2352 | free_token(token); |
2270 | *tok = NULL; | 2353 | *tok = NULL; |
2271 | return EVENT_ERROR; | 2354 | return EVENT_ERROR; |
@@ -2282,10 +2365,14 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok) | |||
2282 | arg->type = PRINT_SYMBOL; | 2365 | arg->type = PRINT_SYMBOL; |
2283 | 2366 | ||
2284 | field = alloc_arg(); | 2367 | field = alloc_arg(); |
2368 | if (!field) { | ||
2369 | do_warning("%s: not enough memory!", __func__); | ||
2370 | goto out_free; | ||
2371 | } | ||
2285 | 2372 | ||
2286 | type = process_arg(event, field, &token); | 2373 | type = process_arg(event, field, &token); |
2287 | if (test_type_token(type, token, EVENT_DELIM, ",")) | 2374 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
2288 | goto out_free; | 2375 | goto out_free_field; |
2289 | 2376 | ||
2290 | arg->symbol.field = field; | 2377 | arg->symbol.field = field; |
2291 | 2378 | ||
@@ -2297,7 +2384,9 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok) | |||
2297 | type = read_token_item(tok); | 2384 | type = read_token_item(tok); |
2298 | return type; | 2385 | return type; |
2299 | 2386 | ||
2300 | out_free: | 2387 | out_free_field: |
2388 | free_arg(field); | ||
2389 | out_free: | ||
2301 | free_token(token); | 2390 | free_token(token); |
2302 | *tok = NULL; | 2391 | *tok = NULL; |
2303 | return EVENT_ERROR; | 2392 | return EVENT_ERROR; |
@@ -2314,6 +2403,11 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok) | |||
2314 | arg->type = PRINT_HEX; | 2403 | arg->type = PRINT_HEX; |
2315 | 2404 | ||
2316 | field = alloc_arg(); | 2405 | field = alloc_arg(); |
2406 | if (!field) { | ||
2407 | do_warning("%s: not enough memory!", __func__); | ||
2408 | goto out_free; | ||
2409 | } | ||
2410 | |||
2317 | type = process_arg(event, field, &token); | 2411 | type = process_arg(event, field, &token); |
2318 | 2412 | ||
2319 | if (test_type_token(type, token, EVENT_DELIM, ",")) | 2413 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
@@ -2324,6 +2418,12 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok) | |||
2324 | free_token(token); | 2418 | free_token(token); |
2325 | 2419 | ||
2326 | field = alloc_arg(); | 2420 | field = alloc_arg(); |
2421 | if (!field) { | ||
2422 | do_warning("%s: not enough memory!", __func__); | ||
2423 | *tok = NULL; | ||
2424 | return EVENT_ERROR; | ||
2425 | } | ||
2426 | |||
2327 | type = process_arg(event, field, &token); | 2427 | type = process_arg(event, field, &token); |
2328 | 2428 | ||
2329 | if (test_type_token(type, token, EVENT_DELIM, ")")) | 2429 | if (test_type_token(type, token, EVENT_DELIM, ")")) |
@@ -2381,6 +2481,12 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char ** | |||
2381 | 2481 | ||
2382 | free_token(token); | 2482 | free_token(token); |
2383 | arg = alloc_arg(); | 2483 | arg = alloc_arg(); |
2484 | if (!field) { | ||
2485 | do_warning("%s: not enough memory!", __func__); | ||
2486 | *tok = NULL; | ||
2487 | return EVENT_ERROR; | ||
2488 | } | ||
2489 | |||
2384 | type = process_arg(event, arg, &token); | 2490 | type = process_arg(event, arg, &token); |
2385 | if (type == EVENT_ERROR) | 2491 | if (type == EVENT_ERROR) |
2386 | goto out_free_arg; | 2492 | goto out_free_arg; |
@@ -2434,10 +2540,16 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok) | |||
2434 | /* make this a typecast and contine */ | 2540 | /* make this a typecast and contine */ |
2435 | 2541 | ||
2436 | /* prevous must be an atom */ | 2542 | /* prevous must be an atom */ |
2437 | if (arg->type != PRINT_ATOM) | 2543 | if (arg->type != PRINT_ATOM) { |
2438 | die("previous needed to be PRINT_ATOM"); | 2544 | do_warning("previous needed to be PRINT_ATOM"); |
2545 | goto out_free; | ||
2546 | } | ||
2439 | 2547 | ||
2440 | item_arg = alloc_arg(); | 2548 | item_arg = alloc_arg(); |
2549 | if (!item_arg) { | ||
2550 | do_warning("%s: not enough memory!", __func__); | ||
2551 | goto out_free; | ||
2552 | } | ||
2441 | 2553 | ||
2442 | arg->type = PRINT_TYPE; | 2554 | arg->type = PRINT_TYPE; |
2443 | arg->typecast.type = arg->atom.atom; | 2555 | arg->typecast.type = arg->atom.atom; |
@@ -2457,7 +2569,8 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok) | |||
2457 | 2569 | ||
2458 | 2570 | ||
2459 | static enum event_type | 2571 | static enum event_type |
2460 | process_str(struct event_format *event __unused, struct print_arg *arg, char **tok) | 2572 | process_str(struct event_format *event __maybe_unused, struct print_arg *arg, |
2573 | char **tok) | ||
2461 | { | 2574 | { |
2462 | enum event_type type; | 2575 | enum event_type type; |
2463 | char *token; | 2576 | char *token; |
@@ -2532,6 +2645,11 @@ process_func_handler(struct event_format *event, struct pevent_function_handler | |||
2532 | next_arg = &(arg->func.args); | 2645 | next_arg = &(arg->func.args); |
2533 | for (i = 0; i < func->nr_args; i++) { | 2646 | for (i = 0; i < func->nr_args; i++) { |
2534 | farg = alloc_arg(); | 2647 | farg = alloc_arg(); |
2648 | if (!farg) { | ||
2649 | do_warning("%s: not enough memory!", __func__); | ||
2650 | return EVENT_ERROR; | ||
2651 | } | ||
2652 | |||
2535 | type = process_arg(event, farg, &token); | 2653 | type = process_arg(event, farg, &token); |
2536 | if (i < (func->nr_args - 1)) | 2654 | if (i < (func->nr_args - 1)) |
2537 | test = ","; | 2655 | test = ","; |
@@ -2676,7 +2794,8 @@ process_arg_token(struct event_format *event, struct print_arg *arg, | |||
2676 | 2794 | ||
2677 | case EVENT_ERROR ... EVENT_NEWLINE: | 2795 | case EVENT_ERROR ... EVENT_NEWLINE: |
2678 | default: | 2796 | default: |
2679 | die("unexpected type %d", type); | 2797 | do_warning("unexpected type %d", type); |
2798 | return EVENT_ERROR; | ||
2680 | } | 2799 | } |
2681 | *tok = token; | 2800 | *tok = token; |
2682 | 2801 | ||
@@ -2697,6 +2816,10 @@ static int event_read_print_args(struct event_format *event, struct print_arg ** | |||
2697 | } | 2816 | } |
2698 | 2817 | ||
2699 | arg = alloc_arg(); | 2818 | arg = alloc_arg(); |
2819 | if (!arg) { | ||
2820 | do_warning("%s: not enough memory!", __func__); | ||
2821 | return -1; | ||
2822 | } | ||
2700 | 2823 | ||
2701 | type = process_arg(event, arg, &token); | 2824 | type = process_arg(event, arg, &token); |
2702 | 2825 | ||
@@ -2768,10 +2891,8 @@ static int event_read_print(struct event_format *event) | |||
2768 | if (type == EVENT_DQUOTE) { | 2891 | if (type == EVENT_DQUOTE) { |
2769 | char *cat; | 2892 | char *cat; |
2770 | 2893 | ||
2771 | cat = malloc_or_die(strlen(event->print_fmt.format) + | 2894 | if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0) |
2772 | strlen(token) + 1); | 2895 | goto fail; |
2773 | strcpy(cat, event->print_fmt.format); | ||
2774 | strcat(cat, token); | ||
2775 | free_token(token); | 2896 | free_token(token); |
2776 | free_token(event->print_fmt.format); | 2897 | free_token(event->print_fmt.format); |
2777 | event->print_fmt.format = NULL; | 2898 | event->print_fmt.format = NULL; |
@@ -2925,8 +3046,10 @@ static int get_common_info(struct pevent *pevent, | |||
2925 | * All events should have the same common elements. | 3046 | * All events should have the same common elements. |
2926 | * Pick any event to find where the type is; | 3047 | * Pick any event to find where the type is; |
2927 | */ | 3048 | */ |
2928 | if (!pevent->events) | 3049 | if (!pevent->events) { |
2929 | die("no event_list!"); | 3050 | do_warning("no event_list!"); |
3051 | return -1; | ||
3052 | } | ||
2930 | 3053 | ||
2931 | event = pevent->events[0]; | 3054 | event = pevent->events[0]; |
2932 | field = pevent_find_common_field(event, type); | 3055 | field = pevent_find_common_field(event, type); |
@@ -3084,7 +3207,8 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg | |||
3084 | if (!arg->field.field) { | 3207 | if (!arg->field.field) { |
3085 | arg->field.field = pevent_find_any_field(event, arg->field.name); | 3208 | arg->field.field = pevent_find_any_field(event, arg->field.name); |
3086 | if (!arg->field.field) | 3209 | if (!arg->field.field) |
3087 | die("field %s not found", arg->field.name); | 3210 | goto out_warning_field; |
3211 | |||
3088 | } | 3212 | } |
3089 | /* must be a number */ | 3213 | /* must be a number */ |
3090 | val = pevent_read_number(pevent, data + arg->field.field->offset, | 3214 | val = pevent_read_number(pevent, data + arg->field.field->offset, |
@@ -3145,8 +3269,10 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg | |||
3145 | if (!larg->field.field) { | 3269 | if (!larg->field.field) { |
3146 | larg->field.field = | 3270 | larg->field.field = |
3147 | pevent_find_any_field(event, larg->field.name); | 3271 | pevent_find_any_field(event, larg->field.name); |
3148 | if (!larg->field.field) | 3272 | if (!larg->field.field) { |
3149 | die("field %s not found", larg->field.name); | 3273 | arg = larg; |
3274 | goto out_warning_field; | ||
3275 | } | ||
3150 | } | 3276 | } |
3151 | field_size = larg->field.field->elementsize; | 3277 | field_size = larg->field.field->elementsize; |
3152 | offset = larg->field.field->offset + | 3278 | offset = larg->field.field->offset + |
@@ -3182,7 +3308,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg | |||
3182 | val = left != right; | 3308 | val = left != right; |
3183 | break; | 3309 | break; |
3184 | default: | 3310 | default: |
3185 | die("unknown op '%s'", arg->op.op); | 3311 | goto out_warning_op; |
3186 | } | 3312 | } |
3187 | break; | 3313 | break; |
3188 | case '~': | 3314 | case '~': |
@@ -3212,7 +3338,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg | |||
3212 | val = left <= right; | 3338 | val = left <= right; |
3213 | break; | 3339 | break; |
3214 | default: | 3340 | default: |
3215 | die("unknown op '%s'", arg->op.op); | 3341 | goto out_warning_op; |
3216 | } | 3342 | } |
3217 | break; | 3343 | break; |
3218 | case '>': | 3344 | case '>': |
@@ -3227,12 +3353,13 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg | |||
3227 | val = left >= right; | 3353 | val = left >= right; |
3228 | break; | 3354 | break; |
3229 | default: | 3355 | default: |
3230 | die("unknown op '%s'", arg->op.op); | 3356 | goto out_warning_op; |
3231 | } | 3357 | } |
3232 | break; | 3358 | break; |
3233 | case '=': | 3359 | case '=': |
3234 | if (arg->op.op[1] != '=') | 3360 | if (arg->op.op[1] != '=') |
3235 | die("unknown op '%s'", arg->op.op); | 3361 | goto out_warning_op; |
3362 | |||
3236 | val = left == right; | 3363 | val = left == right; |
3237 | break; | 3364 | break; |
3238 | case '-': | 3365 | case '-': |
@@ -3248,13 +3375,21 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg | |||
3248 | val = left * right; | 3375 | val = left * right; |
3249 | break; | 3376 | break; |
3250 | default: | 3377 | default: |
3251 | die("unknown op '%s'", arg->op.op); | 3378 | goto out_warning_op; |
3252 | } | 3379 | } |
3253 | break; | 3380 | break; |
3254 | default: /* not sure what to do there */ | 3381 | default: /* not sure what to do there */ |
3255 | return 0; | 3382 | return 0; |
3256 | } | 3383 | } |
3257 | return val; | 3384 | return val; |
3385 | |||
3386 | out_warning_op: | ||
3387 | do_warning("%s: unknown op '%s'", __func__, arg->op.op); | ||
3388 | return 0; | ||
3389 | |||
3390 | out_warning_field: | ||
3391 | do_warning("%s: field %s not found", __func__, arg->field.name); | ||
3392 | return 0; | ||
3258 | } | 3393 | } |
3259 | 3394 | ||
3260 | struct flag { | 3395 | struct flag { |
@@ -3331,8 +3466,10 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3331 | field = arg->field.field; | 3466 | field = arg->field.field; |
3332 | if (!field) { | 3467 | if (!field) { |
3333 | field = pevent_find_any_field(event, arg->field.name); | 3468 | field = pevent_find_any_field(event, arg->field.name); |
3334 | if (!field) | 3469 | if (!field) { |
3335 | die("field %s not found", arg->field.name); | 3470 | str = arg->field.name; |
3471 | goto out_warning_field; | ||
3472 | } | ||
3336 | arg->field.field = field; | 3473 | arg->field.field = field; |
3337 | } | 3474 | } |
3338 | /* Zero sized fields, mean the rest of the data */ | 3475 | /* Zero sized fields, mean the rest of the data */ |
@@ -3349,7 +3486,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3349 | trace_seq_printf(s, "%lx", addr); | 3486 | trace_seq_printf(s, "%lx", addr); |
3350 | break; | 3487 | break; |
3351 | } | 3488 | } |
3352 | str = malloc_or_die(len + 1); | 3489 | str = malloc(len + 1); |
3490 | if (!str) { | ||
3491 | do_warning("%s: not enough memory!", __func__); | ||
3492 | return; | ||
3493 | } | ||
3353 | memcpy(str, data + field->offset, len); | 3494 | memcpy(str, data + field->offset, len); |
3354 | str[len] = 0; | 3495 | str[len] = 0; |
3355 | print_str_to_seq(s, format, len_arg, str); | 3496 | print_str_to_seq(s, format, len_arg, str); |
@@ -3389,7 +3530,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3389 | str = arg->hex.field->field.name; | 3530 | str = arg->hex.field->field.name; |
3390 | field = pevent_find_any_field(event, str); | 3531 | field = pevent_find_any_field(event, str); |
3391 | if (!field) | 3532 | if (!field) |
3392 | die("field %s not found", str); | 3533 | goto out_warning_field; |
3393 | arg->hex.field->field.field = field; | 3534 | arg->hex.field->field.field = field; |
3394 | } | 3535 | } |
3395 | hex = data + field->offset; | 3536 | hex = data + field->offset; |
@@ -3441,6 +3582,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3441 | /* well... */ | 3582 | /* well... */ |
3442 | break; | 3583 | break; |
3443 | } | 3584 | } |
3585 | |||
3586 | return; | ||
3587 | |||
3588 | out_warning_field: | ||
3589 | do_warning("%s: field %s not found", __func__, arg->field.name); | ||
3444 | } | 3590 | } |
3445 | 3591 | ||
3446 | static unsigned long long | 3592 | static unsigned long long |
@@ -3467,7 +3613,11 @@ process_defined_func(struct trace_seq *s, void *data, int size, | |||
3467 | farg = arg->func.args; | 3613 | farg = arg->func.args; |
3468 | param = func_handle->params; | 3614 | param = func_handle->params; |
3469 | 3615 | ||
3470 | args = malloc_or_die(sizeof(*args) * func_handle->nr_args); | 3616 | ret = ULLONG_MAX; |
3617 | args = malloc(sizeof(*args) * func_handle->nr_args); | ||
3618 | if (!args) | ||
3619 | goto out; | ||
3620 | |||
3471 | for (i = 0; i < func_handle->nr_args; i++) { | 3621 | for (i = 0; i < func_handle->nr_args; i++) { |
3472 | switch (param->type) { | 3622 | switch (param->type) { |
3473 | case PEVENT_FUNC_ARG_INT: | 3623 | case PEVENT_FUNC_ARG_INT: |
@@ -3479,13 +3629,19 @@ process_defined_func(struct trace_seq *s, void *data, int size, | |||
3479 | trace_seq_init(&str); | 3629 | trace_seq_init(&str); |
3480 | print_str_arg(&str, data, size, event, "%s", -1, farg); | 3630 | print_str_arg(&str, data, size, event, "%s", -1, farg); |
3481 | trace_seq_terminate(&str); | 3631 | trace_seq_terminate(&str); |
3482 | string = malloc_or_die(sizeof(*string)); | 3632 | string = malloc(sizeof(*string)); |
3633 | if (!string) { | ||
3634 | do_warning("%s(%d): malloc str", __func__, __LINE__); | ||
3635 | goto out_free; | ||
3636 | } | ||
3483 | string->next = strings; | 3637 | string->next = strings; |
3484 | string->str = strdup(str.buffer); | 3638 | string->str = strdup(str.buffer); |
3485 | if (!string->str) | 3639 | if (!string->str) { |
3486 | die("malloc str"); | 3640 | free(string); |
3487 | 3641 | do_warning("%s(%d): malloc str", __func__, __LINE__); | |
3488 | args[i] = (unsigned long long)string->str; | 3642 | goto out_free; |
3643 | } | ||
3644 | args[i] = (uintptr_t)string->str; | ||
3489 | strings = string; | 3645 | strings = string; |
3490 | trace_seq_destroy(&str); | 3646 | trace_seq_destroy(&str); |
3491 | break; | 3647 | break; |
@@ -3494,14 +3650,15 @@ process_defined_func(struct trace_seq *s, void *data, int size, | |||
3494 | * Something went totally wrong, this is not | 3650 | * Something went totally wrong, this is not |
3495 | * an input error, something in this code broke. | 3651 | * an input error, something in this code broke. |
3496 | */ | 3652 | */ |
3497 | die("Unexpected end of arguments\n"); | 3653 | do_warning("Unexpected end of arguments\n"); |
3498 | break; | 3654 | goto out_free; |
3499 | } | 3655 | } |
3500 | farg = farg->next; | 3656 | farg = farg->next; |
3501 | param = param->next; | 3657 | param = param->next; |
3502 | } | 3658 | } |
3503 | 3659 | ||
3504 | ret = (*func_handle->func)(s, args); | 3660 | ret = (*func_handle->func)(s, args); |
3661 | out_free: | ||
3505 | free(args); | 3662 | free(args); |
3506 | while (strings) { | 3663 | while (strings) { |
3507 | string = strings; | 3664 | string = strings; |
@@ -3515,6 +3672,18 @@ process_defined_func(struct trace_seq *s, void *data, int size, | |||
3515 | return ret; | 3672 | return ret; |
3516 | } | 3673 | } |
3517 | 3674 | ||
3675 | static void free_args(struct print_arg *args) | ||
3676 | { | ||
3677 | struct print_arg *next; | ||
3678 | |||
3679 | while (args) { | ||
3680 | next = args->next; | ||
3681 | |||
3682 | free_arg(args); | ||
3683 | args = next; | ||
3684 | } | ||
3685 | } | ||
3686 | |||
3518 | static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event) | 3687 | static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event) |
3519 | { | 3688 | { |
3520 | struct pevent *pevent = event->pevent; | 3689 | struct pevent *pevent = event->pevent; |
@@ -3530,11 +3699,15 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc | |||
3530 | 3699 | ||
3531 | if (!field) { | 3700 | if (!field) { |
3532 | field = pevent_find_field(event, "buf"); | 3701 | field = pevent_find_field(event, "buf"); |
3533 | if (!field) | 3702 | if (!field) { |
3534 | die("can't find buffer field for binary printk"); | 3703 | do_warning("can't find buffer field for binary printk"); |
3704 | return NULL; | ||
3705 | } | ||
3535 | ip_field = pevent_find_field(event, "ip"); | 3706 | ip_field = pevent_find_field(event, "ip"); |
3536 | if (!ip_field) | 3707 | if (!ip_field) { |
3537 | die("can't find ip field for binary printk"); | 3708 | do_warning("can't find ip field for binary printk"); |
3709 | return NULL; | ||
3710 | } | ||
3538 | pevent->bprint_buf_field = field; | 3711 | pevent->bprint_buf_field = field; |
3539 | pevent->bprint_ip_field = ip_field; | 3712 | pevent->bprint_ip_field = ip_field; |
3540 | } | 3713 | } |
@@ -3545,13 +3718,18 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc | |||
3545 | * The first arg is the IP pointer. | 3718 | * The first arg is the IP pointer. |
3546 | */ | 3719 | */ |
3547 | args = alloc_arg(); | 3720 | args = alloc_arg(); |
3721 | if (!args) { | ||
3722 | do_warning("%s(%d): not enough memory!", __func__, __LINE__); | ||
3723 | return NULL; | ||
3724 | } | ||
3548 | arg = args; | 3725 | arg = args; |
3549 | arg->next = NULL; | 3726 | arg->next = NULL; |
3550 | next = &arg->next; | 3727 | next = &arg->next; |
3551 | 3728 | ||
3552 | arg->type = PRINT_ATOM; | 3729 | arg->type = PRINT_ATOM; |
3553 | arg->atom.atom = malloc_or_die(32); | 3730 | |
3554 | sprintf(arg->atom.atom, "%lld", ip); | 3731 | if (asprintf(&arg->atom.atom, "%lld", ip) < 0) |
3732 | goto out_free; | ||
3555 | 3733 | ||
3556 | /* skip the first "%pf : " */ | 3734 | /* skip the first "%pf : " */ |
3557 | for (ptr = fmt + 6, bptr = data + field->offset; | 3735 | for (ptr = fmt + 6, bptr = data + field->offset; |
@@ -3606,10 +3784,17 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc | |||
3606 | val = pevent_read_number(pevent, bptr, vsize); | 3784 | val = pevent_read_number(pevent, bptr, vsize); |
3607 | bptr += vsize; | 3785 | bptr += vsize; |
3608 | arg = alloc_arg(); | 3786 | arg = alloc_arg(); |
3787 | if (!arg) { | ||
3788 | do_warning("%s(%d): not enough memory!", | ||
3789 | __func__, __LINE__); | ||
3790 | goto out_free; | ||
3791 | } | ||
3609 | arg->next = NULL; | 3792 | arg->next = NULL; |
3610 | arg->type = PRINT_ATOM; | 3793 | arg->type = PRINT_ATOM; |
3611 | arg->atom.atom = malloc_or_die(32); | 3794 | if (asprintf(&arg->atom.atom, "%lld", val) < 0) { |
3612 | sprintf(arg->atom.atom, "%lld", val); | 3795 | free(arg); |
3796 | goto out_free; | ||
3797 | } | ||
3613 | *next = arg; | 3798 | *next = arg; |
3614 | next = &arg->next; | 3799 | next = &arg->next; |
3615 | /* | 3800 | /* |
@@ -3622,11 +3807,16 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc | |||
3622 | break; | 3807 | break; |
3623 | case 's': | 3808 | case 's': |
3624 | arg = alloc_arg(); | 3809 | arg = alloc_arg(); |
3810 | if (!arg) { | ||
3811 | do_warning("%s(%d): not enough memory!", | ||
3812 | __func__, __LINE__); | ||
3813 | goto out_free; | ||
3814 | } | ||
3625 | arg->next = NULL; | 3815 | arg->next = NULL; |
3626 | arg->type = PRINT_BSTRING; | 3816 | arg->type = PRINT_BSTRING; |
3627 | arg->string.string = strdup(bptr); | 3817 | arg->string.string = strdup(bptr); |
3628 | if (!arg->string.string) | 3818 | if (!arg->string.string) |
3629 | break; | 3819 | goto out_free; |
3630 | bptr += strlen(bptr) + 1; | 3820 | bptr += strlen(bptr) + 1; |
3631 | *next = arg; | 3821 | *next = arg; |
3632 | next = &arg->next; | 3822 | next = &arg->next; |
@@ -3637,22 +3827,15 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc | |||
3637 | } | 3827 | } |
3638 | 3828 | ||
3639 | return args; | 3829 | return args; |
3640 | } | ||
3641 | 3830 | ||
3642 | static void free_args(struct print_arg *args) | 3831 | out_free: |
3643 | { | 3832 | free_args(args); |
3644 | struct print_arg *next; | 3833 | return NULL; |
3645 | |||
3646 | while (args) { | ||
3647 | next = args->next; | ||
3648 | |||
3649 | free_arg(args); | ||
3650 | args = next; | ||
3651 | } | ||
3652 | } | 3834 | } |
3653 | 3835 | ||
3654 | static char * | 3836 | static char * |
3655 | get_bprint_format(void *data, int size __unused, struct event_format *event) | 3837 | get_bprint_format(void *data, int size __maybe_unused, |
3838 | struct event_format *event) | ||
3656 | { | 3839 | { |
3657 | struct pevent *pevent = event->pevent; | 3840 | struct pevent *pevent = event->pevent; |
3658 | unsigned long long addr; | 3841 | unsigned long long addr; |
@@ -3665,8 +3848,10 @@ get_bprint_format(void *data, int size __unused, struct event_format *event) | |||
3665 | 3848 | ||
3666 | if (!field) { | 3849 | if (!field) { |
3667 | field = pevent_find_field(event, "fmt"); | 3850 | field = pevent_find_field(event, "fmt"); |
3668 | if (!field) | 3851 | if (!field) { |
3669 | die("can't find format field for binary printk"); | 3852 | do_warning("can't find format field for binary printk"); |
3853 | return NULL; | ||
3854 | } | ||
3670 | pevent->bprint_fmt_field = field; | 3855 | pevent->bprint_fmt_field = field; |
3671 | } | 3856 | } |
3672 | 3857 | ||
@@ -3674,9 +3859,8 @@ get_bprint_format(void *data, int size __unused, struct event_format *event) | |||
3674 | 3859 | ||
3675 | printk = find_printk(pevent, addr); | 3860 | printk = find_printk(pevent, addr); |
3676 | if (!printk) { | 3861 | if (!printk) { |
3677 | format = malloc_or_die(45); | 3862 | if (asprintf(&format, "%%pf : (NO FORMAT FOUND at %llx)\n", addr) < 0) |
3678 | sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n", | 3863 | return NULL; |
3679 | addr); | ||
3680 | return format; | 3864 | return format; |
3681 | } | 3865 | } |
3682 | 3866 | ||
@@ -3684,8 +3868,8 @@ get_bprint_format(void *data, int size __unused, struct event_format *event) | |||
3684 | /* Remove any quotes. */ | 3868 | /* Remove any quotes. */ |
3685 | if (*p == '"') | 3869 | if (*p == '"') |
3686 | p++; | 3870 | p++; |
3687 | format = malloc_or_die(strlen(p) + 10); | 3871 | if (asprintf(&format, "%s : %s", "%pf", p) < 0) |
3688 | sprintf(format, "%s : %s", "%pf", p); | 3872 | return NULL; |
3689 | /* remove ending quotes and new line since we will add one too */ | 3873 | /* remove ending quotes and new line since we will add one too */ |
3690 | p = format + strlen(format) - 1; | 3874 | p = format + strlen(format) - 1; |
3691 | if (*p == '"') | 3875 | if (*p == '"') |
@@ -3720,8 +3904,11 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size, | |||
3720 | if (!arg->field.field) { | 3904 | if (!arg->field.field) { |
3721 | arg->field.field = | 3905 | arg->field.field = |
3722 | pevent_find_any_field(event, arg->field.name); | 3906 | pevent_find_any_field(event, arg->field.name); |
3723 | if (!arg->field.field) | 3907 | if (!arg->field.field) { |
3724 | die("field %s not found", arg->field.name); | 3908 | do_warning("%s: field %s not found", |
3909 | __func__, arg->field.name); | ||
3910 | return; | ||
3911 | } | ||
3725 | } | 3912 | } |
3726 | if (arg->field.field->size != 6) { | 3913 | if (arg->field.field->size != 6) { |
3727 | trace_seq_printf(s, "INVALIDMAC"); | 3914 | trace_seq_printf(s, "INVALIDMAC"); |
@@ -3888,8 +4075,11 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event | |||
3888 | goto cont_process; | 4075 | goto cont_process; |
3889 | case '*': | 4076 | case '*': |
3890 | /* The argument is the length. */ | 4077 | /* The argument is the length. */ |
3891 | if (!arg) | 4078 | if (!arg) { |
3892 | die("no argument match"); | 4079 | do_warning("no argument match"); |
4080 | event->flags |= EVENT_FL_FAILED; | ||
4081 | goto out_failed; | ||
4082 | } | ||
3893 | len_arg = eval_num_arg(data, size, event, arg); | 4083 | len_arg = eval_num_arg(data, size, event, arg); |
3894 | len_as_arg = 1; | 4084 | len_as_arg = 1; |
3895 | arg = arg->next; | 4085 | arg = arg->next; |
@@ -3922,15 +4112,21 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event | |||
3922 | case 'x': | 4112 | case 'x': |
3923 | case 'X': | 4113 | case 'X': |
3924 | case 'u': | 4114 | case 'u': |
3925 | if (!arg) | 4115 | if (!arg) { |
3926 | die("no argument match"); | 4116 | do_warning("no argument match"); |
4117 | event->flags |= EVENT_FL_FAILED; | ||
4118 | goto out_failed; | ||
4119 | } | ||
3927 | 4120 | ||
3928 | len = ((unsigned long)ptr + 1) - | 4121 | len = ((unsigned long)ptr + 1) - |
3929 | (unsigned long)saveptr; | 4122 | (unsigned long)saveptr; |
3930 | 4123 | ||
3931 | /* should never happen */ | 4124 | /* should never happen */ |
3932 | if (len > 31) | 4125 | if (len > 31) { |
3933 | die("bad format!"); | 4126 | do_warning("bad format!"); |
4127 | event->flags |= EVENT_FL_FAILED; | ||
4128 | len = 31; | ||
4129 | } | ||
3934 | 4130 | ||
3935 | memcpy(format, saveptr, len); | 4131 | memcpy(format, saveptr, len); |
3936 | format[len] = 0; | 4132 | format[len] = 0; |
@@ -3994,19 +4190,26 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event | |||
3994 | trace_seq_printf(s, format, (long long)val); | 4190 | trace_seq_printf(s, format, (long long)val); |
3995 | break; | 4191 | break; |
3996 | default: | 4192 | default: |
3997 | die("bad count (%d)", ls); | 4193 | do_warning("bad count (%d)", ls); |
4194 | event->flags |= EVENT_FL_FAILED; | ||
3998 | } | 4195 | } |
3999 | break; | 4196 | break; |
4000 | case 's': | 4197 | case 's': |
4001 | if (!arg) | 4198 | if (!arg) { |
4002 | die("no matching argument"); | 4199 | do_warning("no matching argument"); |
4200 | event->flags |= EVENT_FL_FAILED; | ||
4201 | goto out_failed; | ||
4202 | } | ||
4003 | 4203 | ||
4004 | len = ((unsigned long)ptr + 1) - | 4204 | len = ((unsigned long)ptr + 1) - |
4005 | (unsigned long)saveptr; | 4205 | (unsigned long)saveptr; |
4006 | 4206 | ||
4007 | /* should never happen */ | 4207 | /* should never happen */ |
4008 | if (len > 31) | 4208 | if (len > 31) { |
4009 | die("bad format!"); | 4209 | do_warning("bad format!"); |
4210 | event->flags |= EVENT_FL_FAILED; | ||
4211 | len = 31; | ||
4212 | } | ||
4010 | 4213 | ||
4011 | memcpy(format, saveptr, len); | 4214 | memcpy(format, saveptr, len); |
4012 | format[len] = 0; | 4215 | format[len] = 0; |
@@ -4024,6 +4227,11 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event | |||
4024 | trace_seq_putc(s, *ptr); | 4227 | trace_seq_putc(s, *ptr); |
4025 | } | 4228 | } |
4026 | 4229 | ||
4230 | if (event->flags & EVENT_FL_FAILED) { | ||
4231 | out_failed: | ||
4232 | trace_seq_printf(s, "[FAILED TO PARSE]"); | ||
4233 | } | ||
4234 | |||
4027 | if (args) { | 4235 | if (args) { |
4028 | free_args(args); | 4236 | free_args(args); |
4029 | free(bprint_fmt); | 4237 | free(bprint_fmt); |
@@ -4356,7 +4564,10 @@ get_event_fields(const char *type, const char *name, | |||
4356 | struct format_field *field; | 4564 | struct format_field *field; |
4357 | int i = 0; | 4565 | int i = 0; |
4358 | 4566 | ||
4359 | fields = malloc_or_die(sizeof(*fields) * (count + 1)); | 4567 | fields = malloc(sizeof(*fields) * (count + 1)); |
4568 | if (!fields) | ||
4569 | return NULL; | ||
4570 | |||
4360 | for (field = list; field; field = field->next) { | 4571 | for (field = list; field; field = field->next) { |
4361 | fields[i++] = field; | 4572 | fields[i++] = field; |
4362 | if (i == count + 1) { | 4573 | if (i == count + 1) { |
@@ -4672,8 +4883,7 @@ static int find_event_handle(struct pevent *pevent, struct event_format *event) | |||
4672 | } | 4883 | } |
4673 | 4884 | ||
4674 | /** | 4885 | /** |
4675 | * pevent_parse_event - parse the event format | 4886 | * __pevent_parse_format - parse the event format |
4676 | * @pevent: the handle to the pevent | ||
4677 | * @buf: the buffer storing the event format string | 4887 | * @buf: the buffer storing the event format string |
4678 | * @size: the size of @buf | 4888 | * @size: the size of @buf |
4679 | * @sys: the system the event belongs to | 4889 | * @sys: the system the event belongs to |
@@ -4685,28 +4895,27 @@ static int find_event_handle(struct pevent *pevent, struct event_format *event) | |||
4685 | * | 4895 | * |
4686 | * /sys/kernel/debug/tracing/events/.../.../format | 4896 | * /sys/kernel/debug/tracing/events/.../.../format |
4687 | */ | 4897 | */ |
4688 | int pevent_parse_event(struct pevent *pevent, | 4898 | enum pevent_errno __pevent_parse_format(struct event_format **eventp, |
4689 | const char *buf, unsigned long size, | 4899 | struct pevent *pevent, const char *buf, |
4690 | const char *sys) | 4900 | unsigned long size, const char *sys) |
4691 | { | 4901 | { |
4692 | struct event_format *event; | 4902 | struct event_format *event; |
4693 | int ret; | 4903 | int ret; |
4694 | 4904 | ||
4695 | init_input_buf(buf, size); | 4905 | init_input_buf(buf, size); |
4696 | 4906 | ||
4697 | event = alloc_event(); | 4907 | *eventp = event = alloc_event(); |
4698 | if (!event) | 4908 | if (!event) |
4699 | return -ENOMEM; | 4909 | return PEVENT_ERRNO__MEM_ALLOC_FAILED; |
4700 | 4910 | ||
4701 | event->name = event_read_name(); | 4911 | event->name = event_read_name(); |
4702 | if (!event->name) { | 4912 | if (!event->name) { |
4703 | /* Bad event? */ | 4913 | /* Bad event? */ |
4704 | free(event); | 4914 | ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; |
4705 | return -1; | 4915 | goto event_alloc_failed; |
4706 | } | 4916 | } |
4707 | 4917 | ||
4708 | if (strcmp(sys, "ftrace") == 0) { | 4918 | if (strcmp(sys, "ftrace") == 0) { |
4709 | |||
4710 | event->flags |= EVENT_FL_ISFTRACE; | 4919 | event->flags |= EVENT_FL_ISFTRACE; |
4711 | 4920 | ||
4712 | if (strcmp(event->name, "bprint") == 0) | 4921 | if (strcmp(event->name, "bprint") == 0) |
@@ -4714,74 +4923,189 @@ int pevent_parse_event(struct pevent *pevent, | |||
4714 | } | 4923 | } |
4715 | 4924 | ||
4716 | event->id = event_read_id(); | 4925 | event->id = event_read_id(); |
4717 | if (event->id < 0) | 4926 | if (event->id < 0) { |
4718 | die("failed to read event id"); | 4927 | ret = PEVENT_ERRNO__READ_ID_FAILED; |
4928 | /* | ||
4929 | * This isn't an allocation error actually. | ||
4930 | * But as the ID is critical, just bail out. | ||
4931 | */ | ||
4932 | goto event_alloc_failed; | ||
4933 | } | ||
4719 | 4934 | ||
4720 | event->system = strdup(sys); | 4935 | event->system = strdup(sys); |
4721 | if (!event->system) | 4936 | if (!event->system) { |
4722 | die("failed to allocate system"); | 4937 | ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; |
4723 | 4938 | goto event_alloc_failed; | |
4724 | /* Add pevent to event so that it can be referenced */ | 4939 | } |
4725 | event->pevent = pevent; | ||
4726 | 4940 | ||
4727 | ret = event_read_format(event); | 4941 | ret = event_read_format(event); |
4728 | if (ret < 0) { | 4942 | if (ret < 0) { |
4729 | do_warning("failed to read event format for %s", event->name); | 4943 | ret = PEVENT_ERRNO__READ_FORMAT_FAILED; |
4730 | goto event_failed; | 4944 | goto event_parse_failed; |
4731 | } | 4945 | } |
4732 | 4946 | ||
4733 | /* | 4947 | /* |
4734 | * If the event has an override, don't print warnings if the event | 4948 | * If the event has an override, don't print warnings if the event |
4735 | * print format fails to parse. | 4949 | * print format fails to parse. |
4736 | */ | 4950 | */ |
4737 | if (find_event_handle(pevent, event)) | 4951 | if (pevent && find_event_handle(pevent, event)) |
4738 | show_warning = 0; | 4952 | show_warning = 0; |
4739 | 4953 | ||
4740 | ret = event_read_print(event); | 4954 | ret = event_read_print(event); |
4741 | if (ret < 0) { | ||
4742 | do_warning("failed to read event print fmt for %s", | ||
4743 | event->name); | ||
4744 | show_warning = 1; | ||
4745 | goto event_failed; | ||
4746 | } | ||
4747 | show_warning = 1; | 4955 | show_warning = 1; |
4748 | 4956 | ||
4749 | add_event(pevent, event); | 4957 | if (ret < 0) { |
4958 | ret = PEVENT_ERRNO__READ_PRINT_FAILED; | ||
4959 | goto event_parse_failed; | ||
4960 | } | ||
4750 | 4961 | ||
4751 | if (!ret && (event->flags & EVENT_FL_ISFTRACE)) { | 4962 | if (!ret && (event->flags & EVENT_FL_ISFTRACE)) { |
4752 | struct format_field *field; | 4963 | struct format_field *field; |
4753 | struct print_arg *arg, **list; | 4964 | struct print_arg *arg, **list; |
4754 | 4965 | ||
4755 | /* old ftrace had no args */ | 4966 | /* old ftrace had no args */ |
4756 | |||
4757 | list = &event->print_fmt.args; | 4967 | list = &event->print_fmt.args; |
4758 | for (field = event->format.fields; field; field = field->next) { | 4968 | for (field = event->format.fields; field; field = field->next) { |
4759 | arg = alloc_arg(); | 4969 | arg = alloc_arg(); |
4760 | *list = arg; | 4970 | if (!arg) { |
4761 | list = &arg->next; | 4971 | event->flags |= EVENT_FL_FAILED; |
4972 | return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED; | ||
4973 | } | ||
4762 | arg->type = PRINT_FIELD; | 4974 | arg->type = PRINT_FIELD; |
4763 | arg->field.name = strdup(field->name); | 4975 | arg->field.name = strdup(field->name); |
4764 | if (!arg->field.name) { | 4976 | if (!arg->field.name) { |
4765 | do_warning("failed to allocate field name"); | ||
4766 | event->flags |= EVENT_FL_FAILED; | 4977 | event->flags |= EVENT_FL_FAILED; |
4767 | return -1; | 4978 | free_arg(arg); |
4979 | return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED; | ||
4768 | } | 4980 | } |
4769 | arg->field.field = field; | 4981 | arg->field.field = field; |
4982 | *list = arg; | ||
4983 | list = &arg->next; | ||
4770 | } | 4984 | } |
4771 | return 0; | 4985 | return 0; |
4772 | } | 4986 | } |
4773 | 4987 | ||
4988 | return 0; | ||
4989 | |||
4990 | event_parse_failed: | ||
4991 | event->flags |= EVENT_FL_FAILED; | ||
4992 | return ret; | ||
4993 | |||
4994 | event_alloc_failed: | ||
4995 | free(event->system); | ||
4996 | free(event->name); | ||
4997 | free(event); | ||
4998 | *eventp = NULL; | ||
4999 | return ret; | ||
5000 | } | ||
5001 | |||
5002 | /** | ||
5003 | * pevent_parse_format - parse the event format | ||
5004 | * @buf: the buffer storing the event format string | ||
5005 | * @size: the size of @buf | ||
5006 | * @sys: the system the event belongs to | ||
5007 | * | ||
5008 | * This parses the event format and creates an event structure | ||
5009 | * to quickly parse raw data for a given event. | ||
5010 | * | ||
5011 | * These files currently come from: | ||
5012 | * | ||
5013 | * /sys/kernel/debug/tracing/events/.../.../format | ||
5014 | */ | ||
5015 | enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf, | ||
5016 | unsigned long size, const char *sys) | ||
5017 | { | ||
5018 | return __pevent_parse_format(eventp, NULL, buf, size, sys); | ||
5019 | } | ||
5020 | |||
5021 | /** | ||
5022 | * pevent_parse_event - parse the event format | ||
5023 | * @pevent: the handle to the pevent | ||
5024 | * @buf: the buffer storing the event format string | ||
5025 | * @size: the size of @buf | ||
5026 | * @sys: the system the event belongs to | ||
5027 | * | ||
5028 | * This parses the event format and creates an event structure | ||
5029 | * to quickly parse raw data for a given event. | ||
5030 | * | ||
5031 | * These files currently come from: | ||
5032 | * | ||
5033 | * /sys/kernel/debug/tracing/events/.../.../format | ||
5034 | */ | ||
5035 | enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, | ||
5036 | unsigned long size, const char *sys) | ||
5037 | { | ||
5038 | struct event_format *event = NULL; | ||
5039 | int ret = __pevent_parse_format(&event, pevent, buf, size, sys); | ||
5040 | |||
5041 | if (event == NULL) | ||
5042 | return ret; | ||
5043 | |||
5044 | /* Add pevent to event so that it can be referenced */ | ||
5045 | event->pevent = pevent; | ||
5046 | |||
5047 | if (add_event(pevent, event)) { | ||
5048 | ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; | ||
5049 | goto event_add_failed; | ||
5050 | } | ||
5051 | |||
4774 | #define PRINT_ARGS 0 | 5052 | #define PRINT_ARGS 0 |
4775 | if (PRINT_ARGS && event->print_fmt.args) | 5053 | if (PRINT_ARGS && event->print_fmt.args) |
4776 | print_args(event->print_fmt.args); | 5054 | print_args(event->print_fmt.args); |
4777 | 5055 | ||
4778 | return 0; | 5056 | return 0; |
4779 | 5057 | ||
4780 | event_failed: | 5058 | event_add_failed: |
4781 | event->flags |= EVENT_FL_FAILED; | 5059 | pevent_free_format(event); |
4782 | /* still add it even if it failed */ | 5060 | return ret; |
4783 | add_event(pevent, event); | 5061 | } |
4784 | return -1; | 5062 | |
5063 | #undef _PE | ||
5064 | #define _PE(code, str) str | ||
5065 | static const char * const pevent_error_str[] = { | ||
5066 | PEVENT_ERRORS | ||
5067 | }; | ||
5068 | #undef _PE | ||
5069 | |||
5070 | int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum, | ||
5071 | char *buf, size_t buflen) | ||
5072 | { | ||
5073 | int idx; | ||
5074 | const char *msg; | ||
5075 | |||
5076 | if (errnum >= 0) { | ||
5077 | msg = strerror_r(errnum, buf, buflen); | ||
5078 | if (msg != buf) { | ||
5079 | size_t len = strlen(msg); | ||
5080 | memcpy(buf, msg, min(buflen - 1, len)); | ||
5081 | *(buf + min(buflen - 1, len)) = '\0'; | ||
5082 | } | ||
5083 | return 0; | ||
5084 | } | ||
5085 | |||
5086 | if (errnum <= __PEVENT_ERRNO__START || | ||
5087 | errnum >= __PEVENT_ERRNO__END) | ||
5088 | return -1; | ||
5089 | |||
5090 | idx = errnum - __PEVENT_ERRNO__START - 1; | ||
5091 | msg = pevent_error_str[idx]; | ||
5092 | |||
5093 | switch (errnum) { | ||
5094 | case PEVENT_ERRNO__MEM_ALLOC_FAILED: | ||
5095 | case PEVENT_ERRNO__PARSE_EVENT_FAILED: | ||
5096 | case PEVENT_ERRNO__READ_ID_FAILED: | ||
5097 | case PEVENT_ERRNO__READ_FORMAT_FAILED: | ||
5098 | case PEVENT_ERRNO__READ_PRINT_FAILED: | ||
5099 | case PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED: | ||
5100 | snprintf(buf, buflen, "%s", msg); | ||
5101 | break; | ||
5102 | |||
5103 | default: | ||
5104 | /* cannot reach here */ | ||
5105 | break; | ||
5106 | } | ||
5107 | |||
5108 | return 0; | ||
4785 | } | 5109 | } |
4786 | 5110 | ||
4787 | int get_field_val(struct trace_seq *s, struct format_field *field, | 5111 | int get_field_val(struct trace_seq *s, struct format_field *field, |
@@ -5000,6 +5324,7 @@ int pevent_register_print_function(struct pevent *pevent, | |||
5000 | struct pevent_func_params *param; | 5324 | struct pevent_func_params *param; |
5001 | enum pevent_func_arg_type type; | 5325 | enum pevent_func_arg_type type; |
5002 | va_list ap; | 5326 | va_list ap; |
5327 | int ret; | ||
5003 | 5328 | ||
5004 | func_handle = find_func_handler(pevent, name); | 5329 | func_handle = find_func_handler(pevent, name); |
5005 | if (func_handle) { | 5330 | if (func_handle) { |
@@ -5012,14 +5337,20 @@ int pevent_register_print_function(struct pevent *pevent, | |||
5012 | remove_func_handler(pevent, name); | 5337 | remove_func_handler(pevent, name); |
5013 | } | 5338 | } |
5014 | 5339 | ||
5015 | func_handle = malloc_or_die(sizeof(*func_handle)); | 5340 | func_handle = calloc(1, sizeof(*func_handle)); |
5016 | memset(func_handle, 0, sizeof(*func_handle)); | 5341 | if (!func_handle) { |
5342 | do_warning("Failed to allocate function handler"); | ||
5343 | return PEVENT_ERRNO__MEM_ALLOC_FAILED; | ||
5344 | } | ||
5017 | 5345 | ||
5018 | func_handle->ret_type = ret_type; | 5346 | func_handle->ret_type = ret_type; |
5019 | func_handle->name = strdup(name); | 5347 | func_handle->name = strdup(name); |
5020 | func_handle->func = func; | 5348 | func_handle->func = func; |
5021 | if (!func_handle->name) | 5349 | if (!func_handle->name) { |
5022 | die("Failed to allocate function name"); | 5350 | do_warning("Failed to allocate function name"); |
5351 | free(func_handle); | ||
5352 | return PEVENT_ERRNO__MEM_ALLOC_FAILED; | ||
5353 | } | ||
5023 | 5354 | ||
5024 | next_param = &(func_handle->params); | 5355 | next_param = &(func_handle->params); |
5025 | va_start(ap, name); | 5356 | va_start(ap, name); |
@@ -5029,11 +5360,17 @@ int pevent_register_print_function(struct pevent *pevent, | |||
5029 | break; | 5360 | break; |
5030 | 5361 | ||
5031 | if (type < 0 || type >= PEVENT_FUNC_ARG_MAX_TYPES) { | 5362 | if (type < 0 || type >= PEVENT_FUNC_ARG_MAX_TYPES) { |
5032 | warning("Invalid argument type %d", type); | 5363 | do_warning("Invalid argument type %d", type); |
5364 | ret = PEVENT_ERRNO__INVALID_ARG_TYPE; | ||
5033 | goto out_free; | 5365 | goto out_free; |
5034 | } | 5366 | } |
5035 | 5367 | ||
5036 | param = malloc_or_die(sizeof(*param)); | 5368 | param = malloc(sizeof(*param)); |
5369 | if (!param) { | ||
5370 | do_warning("Failed to allocate function param"); | ||
5371 | ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; | ||
5372 | goto out_free; | ||
5373 | } | ||
5037 | param->type = type; | 5374 | param->type = type; |
5038 | param->next = NULL; | 5375 | param->next = NULL; |
5039 | 5376 | ||
@@ -5051,7 +5388,7 @@ int pevent_register_print_function(struct pevent *pevent, | |||
5051 | out_free: | 5388 | out_free: |
5052 | va_end(ap); | 5389 | va_end(ap); |
5053 | free_func_handle(func_handle); | 5390 | free_func_handle(func_handle); |
5054 | return -1; | 5391 | return ret; |
5055 | } | 5392 | } |
5056 | 5393 | ||
5057 | /** | 5394 | /** |
@@ -5103,8 +5440,12 @@ int pevent_register_event_handler(struct pevent *pevent, | |||
5103 | 5440 | ||
5104 | not_found: | 5441 | not_found: |
5105 | /* Save for later use. */ | 5442 | /* Save for later use. */ |
5106 | handle = malloc_or_die(sizeof(*handle)); | 5443 | handle = calloc(1, sizeof(*handle)); |
5107 | memset(handle, 0, sizeof(*handle)); | 5444 | if (!handle) { |
5445 | do_warning("Failed to allocate event handler"); | ||
5446 | return PEVENT_ERRNO__MEM_ALLOC_FAILED; | ||
5447 | } | ||
5448 | |||
5108 | handle->id = id; | 5449 | handle->id = id; |
5109 | if (event_name) | 5450 | if (event_name) |
5110 | handle->event_name = strdup(event_name); | 5451 | handle->event_name = strdup(event_name); |
@@ -5113,7 +5454,11 @@ int pevent_register_event_handler(struct pevent *pevent, | |||
5113 | 5454 | ||
5114 | if ((event_name && !handle->event_name) || | 5455 | if ((event_name && !handle->event_name) || |
5115 | (sys_name && !handle->sys_name)) { | 5456 | (sys_name && !handle->sys_name)) { |
5116 | die("Failed to allocate event/sys name"); | 5457 | do_warning("Failed to allocate event/sys name"); |
5458 | free((void *)handle->event_name); | ||
5459 | free((void *)handle->sys_name); | ||
5460 | free(handle); | ||
5461 | return PEVENT_ERRNO__MEM_ALLOC_FAILED; | ||
5117 | } | 5462 | } |
5118 | 5463 | ||
5119 | handle->func = func; | 5464 | handle->func = func; |
@@ -5129,13 +5474,10 @@ int pevent_register_event_handler(struct pevent *pevent, | |||
5129 | */ | 5474 | */ |
5130 | struct pevent *pevent_alloc(void) | 5475 | struct pevent *pevent_alloc(void) |
5131 | { | 5476 | { |
5132 | struct pevent *pevent; | 5477 | struct pevent *pevent = calloc(1, sizeof(*pevent)); |
5133 | 5478 | ||
5134 | pevent = malloc(sizeof(*pevent)); | 5479 | if (pevent) |
5135 | if (!pevent) | 5480 | pevent->ref_count = 1; |
5136 | return NULL; | ||
5137 | memset(pevent, 0, sizeof(*pevent)); | ||
5138 | pevent->ref_count = 1; | ||
5139 | 5481 | ||
5140 | return pevent; | 5482 | return pevent; |
5141 | } | 5483 | } |
@@ -5164,7 +5506,7 @@ static void free_formats(struct format *format) | |||
5164 | free_format_fields(format->fields); | 5506 | free_format_fields(format->fields); |
5165 | } | 5507 | } |
5166 | 5508 | ||
5167 | static void free_event(struct event_format *event) | 5509 | void pevent_free_format(struct event_format *event) |
5168 | { | 5510 | { |
5169 | free(event->name); | 5511 | free(event->name); |
5170 | free(event->system); | 5512 | free(event->system); |
@@ -5250,7 +5592,7 @@ void pevent_free(struct pevent *pevent) | |||
5250 | } | 5592 | } |
5251 | 5593 | ||
5252 | for (i = 0; i < pevent->nr_events; i++) | 5594 | for (i = 0; i < pevent->nr_events; i++) |
5253 | free_event(pevent->events[i]); | 5595 | pevent_free_format(pevent->events[i]); |
5254 | 5596 | ||
5255 | while (pevent->handlers) { | 5597 | while (pevent->handlers) { |
5256 | handle = pevent->handlers; | 5598 | handle = pevent->handlers; |
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 5772ad8cb386..24a4bbabc5d5 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h | |||
@@ -24,8 +24,8 @@ | |||
24 | #include <stdarg.h> | 24 | #include <stdarg.h> |
25 | #include <regex.h> | 25 | #include <regex.h> |
26 | 26 | ||
27 | #ifndef __unused | 27 | #ifndef __maybe_unused |
28 | #define __unused __attribute__ ((unused)) | 28 | #define __maybe_unused __attribute__((unused)) |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* ----------------------- trace_seq ----------------------- */ | 31 | /* ----------------------- trace_seq ----------------------- */ |
@@ -49,7 +49,7 @@ struct pevent_record { | |||
49 | int cpu; | 49 | int cpu; |
50 | int ref_count; | 50 | int ref_count; |
51 | int locked; /* Do not free, even if ref_count is zero */ | 51 | int locked; /* Do not free, even if ref_count is zero */ |
52 | void *private; | 52 | void *priv; |
53 | #if DEBUG_RECORD | 53 | #if DEBUG_RECORD |
54 | struct pevent_record *prev; | 54 | struct pevent_record *prev; |
55 | struct pevent_record *next; | 55 | struct pevent_record *next; |
@@ -106,7 +106,7 @@ struct plugin_option { | |||
106 | char *plugin_alias; | 106 | char *plugin_alias; |
107 | char *description; | 107 | char *description; |
108 | char *value; | 108 | char *value; |
109 | void *private; | 109 | void *priv; |
110 | int set; | 110 | int set; |
111 | }; | 111 | }; |
112 | 112 | ||
@@ -345,6 +345,35 @@ enum pevent_flag { | |||
345 | PEVENT_NSEC_OUTPUT = 1, /* output in NSECS */ | 345 | PEVENT_NSEC_OUTPUT = 1, /* output in NSECS */ |
346 | }; | 346 | }; |
347 | 347 | ||
348 | #define PEVENT_ERRORS \ | ||
349 | _PE(MEM_ALLOC_FAILED, "failed to allocate memory"), \ | ||
350 | _PE(PARSE_EVENT_FAILED, "failed to parse event"), \ | ||
351 | _PE(READ_ID_FAILED, "failed to read event id"), \ | ||
352 | _PE(READ_FORMAT_FAILED, "failed to read event format"), \ | ||
353 | _PE(READ_PRINT_FAILED, "failed to read event print fmt"), \ | ||
354 | _PE(OLD_FTRACE_ARG_FAILED,"failed to allocate field name for ftrace"),\ | ||
355 | _PE(INVALID_ARG_TYPE, "invalid argument type") | ||
356 | |||
357 | #undef _PE | ||
358 | #define _PE(__code, __str) PEVENT_ERRNO__ ## __code | ||
359 | enum pevent_errno { | ||
360 | PEVENT_ERRNO__SUCCESS = 0, | ||
361 | |||
362 | /* | ||
363 | * Choose an arbitrary negative big number not to clash with standard | ||
364 | * errno since SUS requires the errno has distinct positive values. | ||
365 | * See 'Issue 6' in the link below. | ||
366 | * | ||
367 | * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html | ||
368 | */ | ||
369 | __PEVENT_ERRNO__START = -100000, | ||
370 | |||
371 | PEVENT_ERRORS, | ||
372 | |||
373 | __PEVENT_ERRNO__END, | ||
374 | }; | ||
375 | #undef _PE | ||
376 | |||
348 | struct cmdline; | 377 | struct cmdline; |
349 | struct cmdline_list; | 378 | struct cmdline_list; |
350 | struct func_map; | 379 | struct func_map; |
@@ -509,8 +538,11 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s, | |||
509 | int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size, | 538 | int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size, |
510 | int long_size); | 539 | int long_size); |
511 | 540 | ||
512 | int pevent_parse_event(struct pevent *pevent, const char *buf, | 541 | enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, |
513 | unsigned long size, const char *sys); | 542 | unsigned long size, const char *sys); |
543 | enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf, | ||
544 | unsigned long size, const char *sys); | ||
545 | void pevent_free_format(struct event_format *event); | ||
514 | 546 | ||
515 | void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event, | 547 | void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event, |
516 | const char *name, struct pevent_record *record, | 548 | const char *name, struct pevent_record *record, |
@@ -561,6 +593,8 @@ int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec); | |||
561 | const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid); | 593 | const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid); |
562 | void pevent_event_info(struct trace_seq *s, struct event_format *event, | 594 | void pevent_event_info(struct trace_seq *s, struct event_format *event, |
563 | struct pevent_record *record); | 595 | struct pevent_record *record); |
596 | int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum, | ||
597 | char *buf, size_t buflen); | ||
564 | 598 | ||
565 | struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type); | 599 | struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type); |
566 | struct format_field **pevent_event_common_fields(struct event_format *event); | 600 | struct format_field **pevent_event_common_fields(struct event_format *event); |
diff --git a/tools/lib/traceevent/event-utils.h b/tools/lib/traceevent/event-utils.h index 08296383d1e6..bc075006966e 100644 --- a/tools/lib/traceevent/event-utils.h +++ b/tools/lib/traceevent/event-utils.h | |||
@@ -39,6 +39,12 @@ void __vdie(const char *fmt, ...); | |||
39 | void __vwarning(const char *fmt, ...); | 39 | void __vwarning(const char *fmt, ...); |
40 | void __vpr_stat(const char *fmt, ...); | 40 | void __vpr_stat(const char *fmt, ...); |
41 | 41 | ||
42 | #define min(x, y) ({ \ | ||
43 | typeof(x) _min1 = (x); \ | ||
44 | typeof(y) _min2 = (y); \ | ||
45 | (void) (&_min1 == &_min2); \ | ||
46 | _min1 < _min2 ? _min1 : _min2; }) | ||
47 | |||
42 | static inline char *strim(char *string) | 48 | static inline char *strim(char *string) |
43 | { | 49 | { |
44 | char *ret; | 50 | char *ret; |
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index 26b823b61aa1..8f8fbc227a46 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore | |||
@@ -21,3 +21,5 @@ config.mak | |||
21 | config.mak.autogen | 21 | config.mak.autogen |
22 | *-bison.* | 22 | *-bison.* |
23 | *-flex.* | 23 | *-flex.* |
24 | *.pyc | ||
25 | *.pyo | ||
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index ca600e09c8d4..9f2e44f2b17a 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile | |||
@@ -195,10 +195,10 @@ install-pdf: pdf | |||
195 | #install-html: html | 195 | #install-html: html |
196 | # '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) | 196 | # '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) |
197 | 197 | ||
198 | ../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE | 198 | $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE |
199 | $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE | 199 | $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE |
200 | 200 | ||
201 | -include ../PERF-VERSION-FILE | 201 | -include $(OUTPUT)PERF-VERSION-FILE |
202 | 202 | ||
203 | # | 203 | # |
204 | # Determine "include::" file references in asciidoc files. | 204 | # Determine "include::" file references in asciidoc files. |
diff --git a/tools/perf/Documentation/jit-interface.txt b/tools/perf/Documentation/jit-interface.txt new file mode 100644 index 000000000000..a8656f564915 --- /dev/null +++ b/tools/perf/Documentation/jit-interface.txt | |||
@@ -0,0 +1,15 @@ | |||
1 | perf supports a simple JIT interface to resolve symbols for dynamic code generated | ||
2 | by a JIT. | ||
3 | |||
4 | The JIT has to write a /tmp/perf-%d.map (%d = pid of process) file | ||
5 | |||
6 | This is a text file. | ||
7 | |||
8 | Each line has the following format, fields separated with spaces: | ||
9 | |||
10 | START SIZE symbolname | ||
11 | |||
12 | START and SIZE are hex numbers without 0x. | ||
13 | symbolname is the rest of the line, so it could contain special characters. | ||
14 | |||
15 | The ownership of the file has to match the process. | ||
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index c89f9e1453f7..c8ffd9fd5c6a 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt | |||
@@ -85,6 +85,9 @@ OPTIONS | |||
85 | -M:: | 85 | -M:: |
86 | --disassembler-style=:: Set disassembler style for objdump. | 86 | --disassembler-style=:: Set disassembler style for objdump. |
87 | 87 | ||
88 | --objdump=<path>:: | ||
89 | Path to objdump binary. | ||
90 | |||
88 | SEE ALSO | 91 | SEE ALSO |
89 | -------- | 92 | -------- |
90 | linkperf:perf-record[1], linkperf:perf-report[1] | 93 | linkperf:perf-record[1], linkperf:perf-report[1] |
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt index 74d7481ed7a6..ab7f667de1b1 100644 --- a/tools/perf/Documentation/perf-diff.txt +++ b/tools/perf/Documentation/perf-diff.txt | |||
@@ -17,6 +17,9 @@ captured via perf record. | |||
17 | 17 | ||
18 | If no parameters are passed it will assume perf.data.old and perf.data. | 18 | If no parameters are passed it will assume perf.data.old and perf.data. |
19 | 19 | ||
20 | The differential profile is displayed only for events matching both | ||
21 | specified perf.data files. | ||
22 | |||
20 | OPTIONS | 23 | OPTIONS |
21 | ------- | 24 | ------- |
22 | -M:: | 25 | -M:: |
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt index dd84cb2f0a88..326f2cb333cb 100644 --- a/tools/perf/Documentation/perf-kvm.txt +++ b/tools/perf/Documentation/perf-kvm.txt | |||
@@ -12,7 +12,7 @@ SYNOPSIS | |||
12 | [--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]] | 12 | [--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]] |
13 | {top|record|report|diff|buildid-list} | 13 | {top|record|report|diff|buildid-list} |
14 | 'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path> | 14 | 'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path> |
15 | | --guestvmlinux=<path>] {top|record|report|diff|buildid-list} | 15 | | --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat} |
16 | 16 | ||
17 | DESCRIPTION | 17 | DESCRIPTION |
18 | ----------- | 18 | ----------- |
@@ -38,6 +38,18 @@ There are a couple of variants of perf kvm: | |||
38 | so that other tools can be used to fetch packages with matching symbol tables | 38 | so that other tools can be used to fetch packages with matching symbol tables |
39 | for use by perf report. | 39 | for use by perf report. |
40 | 40 | ||
41 | 'perf kvm stat <command>' to run a command and gather performance counter | ||
42 | statistics. | ||
43 | Especially, perf 'kvm stat record/report' generates a statistical analysis | ||
44 | of KVM events. Currently, vmexit, mmio and ioport events are supported. | ||
45 | 'perf kvm stat record <command>' records kvm events and the events between | ||
46 | start and end <command>. | ||
47 | And this command produces a file which contains tracing results of kvm | ||
48 | events. | ||
49 | |||
50 | 'perf kvm stat report' reports statistical data which includes events | ||
51 | handled time, samples, and so on. | ||
52 | |||
41 | OPTIONS | 53 | OPTIONS |
42 | ------- | 54 | ------- |
43 | -i:: | 55 | -i:: |
@@ -68,7 +80,21 @@ OPTIONS | |||
68 | --guestvmlinux=<path>:: | 80 | --guestvmlinux=<path>:: |
69 | Guest os kernel vmlinux. | 81 | Guest os kernel vmlinux. |
70 | 82 | ||
83 | STAT REPORT OPTIONS | ||
84 | ------------------- | ||
85 | --vcpu=<value>:: | ||
86 | analyze events which occures on this vcpu. (default: all vcpus) | ||
87 | |||
88 | --events=<value>:: | ||
89 | events to be analyzed. Possible values: vmexit, mmio, ioport. | ||
90 | (default: vmexit) | ||
91 | -k:: | ||
92 | --key=<value>:: | ||
93 | Sorting key. Possible values: sample (default, sort by samples | ||
94 | number), time (sort by average time). | ||
95 | |||
71 | SEE ALSO | 96 | SEE ALSO |
72 | -------- | 97 | -------- |
73 | linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1], | 98 | linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1], |
74 | linkperf:perf-diff[1], linkperf:perf-buildid-list[1] | 99 | linkperf:perf-diff[1], linkperf:perf-buildid-list[1], |
100 | linkperf:perf-stat[1] | ||
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index ddc22525228d..d1e39dc8c810 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt | |||
@@ -15,24 +15,43 @@ DESCRIPTION | |||
15 | This command displays the symbolic event types which can be selected in the | 15 | This command displays the symbolic event types which can be selected in the |
16 | various perf commands with the -e option. | 16 | various perf commands with the -e option. |
17 | 17 | ||
18 | [[EVENT_MODIFIERS]] | ||
18 | EVENT MODIFIERS | 19 | EVENT MODIFIERS |
19 | --------------- | 20 | --------------- |
20 | 21 | ||
21 | Events can optionally have a modifer by appending a colon and one or | 22 | Events can optionally have a modifer by appending a colon and one or |
22 | more modifiers. Modifiers allow the user to restrict when events are | 23 | more modifiers. Modifiers allow the user to restrict the events to be |
23 | counted with 'u' for user-space, 'k' for kernel, 'h' for hypervisor. | 24 | counted. The following modifiers exist: |
24 | Additional modifiers are 'G' for guest counting (in KVM guests) and 'H' | 25 | |
25 | for host counting (not in KVM guests). | 26 | u - user-space counting |
27 | k - kernel counting | ||
28 | h - hypervisor counting | ||
29 | G - guest counting (in KVM guests) | ||
30 | H - host counting (not in KVM guests) | ||
31 | p - precise level | ||
26 | 32 | ||
27 | The 'p' modifier can be used for specifying how precise the instruction | 33 | The 'p' modifier can be used for specifying how precise the instruction |
28 | address should be. The 'p' modifier is currently only implemented for | 34 | address should be. The 'p' modifier can be specified multiple times: |
29 | Intel PEBS and can be specified multiple times: | 35 | |
30 | 0 - SAMPLE_IP can have arbitrary skid | 36 | 0 - SAMPLE_IP can have arbitrary skid |
31 | 1 - SAMPLE_IP must have constant skid | 37 | 1 - SAMPLE_IP must have constant skid |
32 | 2 - SAMPLE_IP requested to have 0 skid | 38 | 2 - SAMPLE_IP requested to have 0 skid |
33 | 3 - SAMPLE_IP must have 0 skid | 39 | 3 - SAMPLE_IP must have 0 skid |
40 | |||
41 | For Intel systems precise event sampling is implemented with PEBS | ||
42 | which supports up to precise-level 2. | ||
34 | 43 | ||
35 | The PEBS implementation now supports up to 2. | 44 | On AMD systems it is implemented using IBS (up to precise-level 2). |
45 | The precise modifier works with event types 0x76 (cpu-cycles, CPU | ||
46 | clocks not halted) and 0xC1 (micro-ops retired). Both events map to | ||
47 | IBS execution sampling (IBS op) with the IBS Op Counter Control bit | ||
48 | (IbsOpCntCtl) set respectively (see AMD64 Architecture Programmer’s | ||
49 | Manual Volume 2: System Programming, 13.3 Instruction-Based | ||
50 | Sampling). Examples to use IBS: | ||
51 | |||
52 | perf record -a -e cpu-cycles:p ... # use ibs op counting cycles | ||
53 | perf record -a -e r076:p ... # same as -e cpu-cycles:p | ||
54 | perf record -a -e r0C1:p ... # use ibs op counting micro-ops | ||
36 | 55 | ||
37 | RAW HARDWARE EVENT DESCRIPTOR | 56 | RAW HARDWARE EVENT DESCRIPTOR |
38 | ----------------------------- | 57 | ----------------------------- |
@@ -44,6 +63,11 @@ layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Softwar | |||
44 | of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344, | 63 | of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344, |
45 | Figure 13-7 Performance Event-Select Register (PerfEvtSeln)). | 64 | Figure 13-7 Performance Event-Select Register (PerfEvtSeln)). |
46 | 65 | ||
66 | Note: Only the following bit fields can be set in x86 counter | ||
67 | registers: event, umask, edge, inv, cmask. Esp. guest/host only and | ||
68 | OS/user mode flags must be setup using <<EVENT_MODIFIERS, EVENT | ||
69 | MODIFIERS>>. | ||
70 | |||
47 | Example: | 71 | Example: |
48 | 72 | ||
49 | If the Intel docs for a QM720 Core i7 describe an event as: | 73 | If the Intel docs for a QM720 Core i7 describe an event as: |
@@ -91,4 +115,4 @@ SEE ALSO | |||
91 | linkperf:perf-stat[1], linkperf:perf-top[1], | 115 | linkperf:perf-stat[1], linkperf:perf-top[1], |
92 | linkperf:perf-record[1], | 116 | linkperf:perf-record[1], |
93 | http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide], | 117 | http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide], |
94 | http://support.amd.com/us/Processor_TechDocs/24593.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming] | 118 | http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming] |
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 495210a612c4..f4d91bebd59d 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt | |||
@@ -168,6 +168,9 @@ OPTIONS | |||
168 | branch stacks and it will automatically switch to the branch view mode, | 168 | branch stacks and it will automatically switch to the branch view mode, |
169 | unless --no-branch-stack is used. | 169 | unless --no-branch-stack is used. |
170 | 170 | ||
171 | --objdump=<path>:: | ||
172 | Path to objdump binary. | ||
173 | |||
171 | SEE ALSO | 174 | SEE ALSO |
172 | -------- | 175 | -------- |
173 | linkperf:perf-stat[1], linkperf:perf-annotate[1] | 176 | linkperf:perf-stat[1], linkperf:perf-annotate[1] |
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt index 3152cca15501..d00bef231340 100644 --- a/tools/perf/Documentation/perf-script-perl.txt +++ b/tools/perf/Documentation/perf-script-perl.txt | |||
@@ -116,8 +116,8 @@ search path and 'use'ing a few support modules (see module | |||
116 | descriptions below): | 116 | descriptions below): |
117 | 117 | ||
118 | ---- | 118 | ---- |
119 | use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/perf-script-Util/lib"; | 119 | use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; |
120 | use lib "./perf-script-Util/lib"; | 120 | use lib "./Perf-Trace-Util/lib"; |
121 | use Perf::Trace::Core; | 121 | use Perf::Trace::Core; |
122 | use Perf::Trace::Context; | 122 | use Perf::Trace::Context; |
123 | use Perf::Trace::Util; | 123 | use Perf::Trace::Util; |
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt index 471022069119..a4027f221a53 100644 --- a/tools/perf/Documentation/perf-script-python.txt +++ b/tools/perf/Documentation/perf-script-python.txt | |||
@@ -129,7 +129,7 @@ import os | |||
129 | import sys | 129 | import sys |
130 | 130 | ||
131 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 131 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
132 | '/scripts/python/perf-script-Util/lib/Perf/Trace') | 132 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
133 | 133 | ||
134 | from perf_trace_context import * | 134 | from perf_trace_context import * |
135 | from Core import * | 135 | from Core import * |
@@ -216,7 +216,7 @@ import os | |||
216 | import sys | 216 | import sys |
217 | 217 | ||
218 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 218 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
219 | '/scripts/python/perf-script-Util/lib/Perf/Trace') | 219 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
220 | 220 | ||
221 | from perf_trace_context import * | 221 | from perf_trace_context import * |
222 | from Core import * | 222 | from Core import * |
@@ -279,7 +279,7 @@ import os | |||
279 | import sys | 279 | import sys |
280 | 280 | ||
281 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 281 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
282 | '/scripts/python/perf-script-Util/lib/Perf/Trace') | 282 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
283 | 283 | ||
284 | from perf_trace_context import * | 284 | from perf_trace_context import * |
285 | from Core import * | 285 | from Core import * |
@@ -391,7 +391,7 @@ drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 . | |||
391 | drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. | 391 | drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. |
392 | drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin | 392 | drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin |
393 | -rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-script.py | 393 | -rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-script.py |
394 | drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 perf-script-Util | 394 | drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util |
395 | -rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py | 395 | -rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py |
396 | ---- | 396 | ---- |
397 | 397 | ||
@@ -518,7 +518,7 @@ descriptions below): | |||
518 | import sys | 518 | import sys |
519 | 519 | ||
520 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 520 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
521 | '/scripts/python/perf-script-Util/lib/Perf/Trace') | 521 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
522 | 522 | ||
523 | from perf_trace_context import * | 523 | from perf_trace_context import * |
524 | from Core import * | 524 | from Core import * |
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt new file mode 100644 index 000000000000..3a2ae37310a9 --- /dev/null +++ b/tools/perf/Documentation/perf-trace.txt | |||
@@ -0,0 +1,53 @@ | |||
1 | perf-trace(1) | ||
2 | ============= | ||
3 | |||
4 | NAME | ||
5 | ---- | ||
6 | perf-trace - strace inspired tool | ||
7 | |||
8 | SYNOPSIS | ||
9 | -------- | ||
10 | [verse] | ||
11 | 'perf trace' | ||
12 | |||
13 | DESCRIPTION | ||
14 | ----------- | ||
15 | This command will show the events associated with the target, initially | ||
16 | syscalls, but other system events like pagefaults, task lifetime events, | ||
17 | scheduling events, etc. | ||
18 | |||
19 | Initially this is a live mode only tool, but eventually will work with | ||
20 | perf.data files like the other tools, allowing a detached 'record' from | ||
21 | analysis phases. | ||
22 | |||
23 | OPTIONS | ||
24 | ------- | ||
25 | |||
26 | --all-cpus:: | ||
27 | System-wide collection from all CPUs. | ||
28 | |||
29 | -p:: | ||
30 | --pid=:: | ||
31 | Record events on existing process ID (comma separated list). | ||
32 | |||
33 | --tid=:: | ||
34 | Record events on existing thread ID (comma separated list). | ||
35 | |||
36 | --uid=:: | ||
37 | Record events in threads owned by uid. Name or number. | ||
38 | |||
39 | --no-inherit:: | ||
40 | Child tasks do not inherit counters. | ||
41 | |||
42 | --mmap-pages=:: | ||
43 | Number of mmap data pages. Must be a power of two. | ||
44 | |||
45 | --cpu:: | ||
46 | Collect samples only on the list of CPUs provided. Multiple CPUs can be provided as a | ||
47 | comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. | ||
48 | In per-thread mode with inheritance mode on (default), Events are captured only when | ||
49 | the thread executes on the designated CPUs. Default is to monitor all CPUs. | ||
50 | |||
51 | SEE ALSO | ||
52 | -------- | ||
53 | linkperf:perf-record[1], linkperf:perf-script[1] | ||
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index b4b572e8c100..80db3f4bcf7a 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
@@ -10,8 +10,12 @@ include/linux/stringify.h | |||
10 | lib/rbtree.c | 10 | lib/rbtree.c |
11 | include/linux/swab.h | 11 | include/linux/swab.h |
12 | arch/*/include/asm/unistd*.h | 12 | arch/*/include/asm/unistd*.h |
13 | arch/*/include/asm/perf_regs.h | ||
13 | arch/*/lib/memcpy*.S | 14 | arch/*/lib/memcpy*.S |
14 | arch/*/lib/memset*.S | 15 | arch/*/lib/memset*.S |
15 | include/linux/poison.h | 16 | include/linux/poison.h |
16 | include/linux/magic.h | 17 | include/linux/magic.h |
17 | include/linux/hw_breakpoint.h | 18 | include/linux/hw_breakpoint.h |
19 | arch/x86/include/asm/svm.h | ||
20 | arch/x86/include/asm/vmx.h | ||
21 | arch/x86/include/asm/kvm_host.h | ||
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 35655c3a7b7a..e5e71e7d95a0 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -37,7 +37,14 @@ include config/utilities.mak | |||
37 | # | 37 | # |
38 | # Define NO_NEWT if you do not want TUI support. | 38 | # Define NO_NEWT if you do not want TUI support. |
39 | # | 39 | # |
40 | # Define NO_GTK2 if you do not want GTK+ GUI support. | ||
41 | # | ||
40 | # Define NO_DEMANGLE if you do not want C++ symbol demangling. | 42 | # Define NO_DEMANGLE if you do not want C++ symbol demangling. |
43 | # | ||
44 | # Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds) | ||
45 | # | ||
46 | # Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf | ||
47 | # backtrace post unwind. | ||
41 | 48 | ||
42 | $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE | 49 | $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE |
43 | @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) | 50 | @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) |
@@ -50,16 +57,19 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ | |||
50 | -e s/s390x/s390/ -e s/parisc64/parisc/ \ | 57 | -e s/s390x/s390/ -e s/parisc64/parisc/ \ |
51 | -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ | 58 | -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ |
52 | -e s/sh[234].*/sh/ ) | 59 | -e s/sh[234].*/sh/ ) |
60 | NO_PERF_REGS := 1 | ||
53 | 61 | ||
54 | CC = $(CROSS_COMPILE)gcc | 62 | CC = $(CROSS_COMPILE)gcc |
55 | AR = $(CROSS_COMPILE)ar | 63 | AR = $(CROSS_COMPILE)ar |
56 | 64 | ||
57 | # Additional ARCH settings for x86 | 65 | # Additional ARCH settings for x86 |
58 | ifeq ($(ARCH),i386) | 66 | ifeq ($(ARCH),i386) |
59 | ARCH := x86 | 67 | override ARCH := x86 |
68 | NO_PERF_REGS := 0 | ||
69 | LIBUNWIND_LIBS = -lunwind -lunwind-x86 | ||
60 | endif | 70 | endif |
61 | ifeq ($(ARCH),x86_64) | 71 | ifeq ($(ARCH),x86_64) |
62 | ARCH := x86 | 72 | override ARCH := x86 |
63 | IS_X86_64 := 0 | 73 | IS_X86_64 := 0 |
64 | ifeq (, $(findstring m32,$(EXTRA_CFLAGS))) | 74 | ifeq (, $(findstring m32,$(EXTRA_CFLAGS))) |
65 | IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1) | 75 | IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1) |
@@ -69,6 +79,8 @@ ifeq ($(ARCH),x86_64) | |||
69 | ARCH_CFLAGS := -DARCH_X86_64 | 79 | ARCH_CFLAGS := -DARCH_X86_64 |
70 | ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S | 80 | ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S |
71 | endif | 81 | endif |
82 | NO_PERF_REGS := 0 | ||
83 | LIBUNWIND_LIBS = -lunwind -lunwind-x86_64 | ||
72 | endif | 84 | endif |
73 | 85 | ||
74 | # Treat warnings as errors unless directed not to | 86 | # Treat warnings as errors unless directed not to |
@@ -89,7 +101,7 @@ ifdef PARSER_DEBUG | |||
89 | PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG | 101 | PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG |
90 | endif | 102 | endif |
91 | 103 | ||
92 | CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS) | 104 | CFLAGS = -fno-omit-frame-pointer -ggdb3 -funwind-tables -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS) |
93 | EXTLIBS = -lpthread -lrt -lelf -lm | 105 | EXTLIBS = -lpthread -lrt -lelf -lm |
94 | ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE | 106 | ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE |
95 | ALL_LDFLAGS = $(LDFLAGS) | 107 | ALL_LDFLAGS = $(LDFLAGS) |
@@ -186,10 +198,10 @@ SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) | |||
186 | 198 | ||
187 | TRACE_EVENT_DIR = ../lib/traceevent/ | 199 | TRACE_EVENT_DIR = ../lib/traceevent/ |
188 | 200 | ||
189 | ifeq ("$(origin O)", "command line") | 201 | ifneq ($(OUTPUT),) |
190 | TE_PATH=$(OUTPUT)/ | 202 | TE_PATH=$(OUTPUT) |
191 | else | 203 | else |
192 | TE_PATH=$(TRACE_EVENT_DIR)/ | 204 | TE_PATH=$(TRACE_EVENT_DIR) |
193 | endif | 205 | endif |
194 | 206 | ||
195 | LIBTRACEEVENT = $(TE_PATH)libtraceevent.a | 207 | LIBTRACEEVENT = $(TE_PATH)libtraceevent.a |
@@ -221,13 +233,13 @@ export PERL_PATH | |||
221 | FLEX = flex | 233 | FLEX = flex |
222 | BISON= bison | 234 | BISON= bison |
223 | 235 | ||
224 | $(OUTPUT)util/parse-events-flex.c: util/parse-events.l | 236 | $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c |
225 | $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c | 237 | $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c |
226 | 238 | ||
227 | $(OUTPUT)util/parse-events-bison.c: util/parse-events.y | 239 | $(OUTPUT)util/parse-events-bison.c: util/parse-events.y |
228 | $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c | 240 | $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c |
229 | 241 | ||
230 | $(OUTPUT)util/pmu-flex.c: util/pmu.l | 242 | $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c |
231 | $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c | 243 | $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c |
232 | 244 | ||
233 | $(OUTPUT)util/pmu-bison.c: util/pmu.y | 245 | $(OUTPUT)util/pmu-bison.c: util/pmu.y |
@@ -252,6 +264,7 @@ LIB_H += util/include/linux/ctype.h | |||
252 | LIB_H += util/include/linux/kernel.h | 264 | LIB_H += util/include/linux/kernel.h |
253 | LIB_H += util/include/linux/list.h | 265 | LIB_H += util/include/linux/list.h |
254 | LIB_H += util/include/linux/export.h | 266 | LIB_H += util/include/linux/export.h |
267 | LIB_H += util/include/linux/magic.h | ||
255 | LIB_H += util/include/linux/poison.h | 268 | LIB_H += util/include/linux/poison.h |
256 | LIB_H += util/include/linux/prefetch.h | 269 | LIB_H += util/include/linux/prefetch.h |
257 | LIB_H += util/include/linux/rbtree.h | 270 | LIB_H += util/include/linux/rbtree.h |
@@ -321,6 +334,10 @@ LIB_H += $(TRACE_EVENT_DIR)event-parse.h | |||
321 | LIB_H += util/target.h | 334 | LIB_H += util/target.h |
322 | LIB_H += util/rblist.h | 335 | LIB_H += util/rblist.h |
323 | LIB_H += util/intlist.h | 336 | LIB_H += util/intlist.h |
337 | LIB_H += util/perf_regs.h | ||
338 | LIB_H += util/unwind.h | ||
339 | LIB_H += ui/helpline.h | ||
340 | LIB_H += util/vdso.h | ||
324 | 341 | ||
325 | LIB_OBJS += $(OUTPUT)util/abspath.o | 342 | LIB_OBJS += $(OUTPUT)util/abspath.o |
326 | LIB_OBJS += $(OUTPUT)util/alias.o | 343 | LIB_OBJS += $(OUTPUT)util/alias.o |
@@ -356,6 +373,7 @@ LIB_OBJS += $(OUTPUT)util/usage.o | |||
356 | LIB_OBJS += $(OUTPUT)util/wrapper.o | 373 | LIB_OBJS += $(OUTPUT)util/wrapper.o |
357 | LIB_OBJS += $(OUTPUT)util/sigchain.o | 374 | LIB_OBJS += $(OUTPUT)util/sigchain.o |
358 | LIB_OBJS += $(OUTPUT)util/symbol.o | 375 | LIB_OBJS += $(OUTPUT)util/symbol.o |
376 | LIB_OBJS += $(OUTPUT)util/symbol-elf.o | ||
359 | LIB_OBJS += $(OUTPUT)util/dso-test-data.o | 377 | LIB_OBJS += $(OUTPUT)util/dso-test-data.o |
360 | LIB_OBJS += $(OUTPUT)util/color.o | 378 | LIB_OBJS += $(OUTPUT)util/color.o |
361 | LIB_OBJS += $(OUTPUT)util/pager.o | 379 | LIB_OBJS += $(OUTPUT)util/pager.o |
@@ -387,11 +405,15 @@ LIB_OBJS += $(OUTPUT)util/cgroup.o | |||
387 | LIB_OBJS += $(OUTPUT)util/target.o | 405 | LIB_OBJS += $(OUTPUT)util/target.o |
388 | LIB_OBJS += $(OUTPUT)util/rblist.o | 406 | LIB_OBJS += $(OUTPUT)util/rblist.o |
389 | LIB_OBJS += $(OUTPUT)util/intlist.o | 407 | LIB_OBJS += $(OUTPUT)util/intlist.o |
408 | LIB_OBJS += $(OUTPUT)util/vdso.o | ||
409 | LIB_OBJS += $(OUTPUT)util/stat.o | ||
390 | 410 | ||
391 | BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o | 411 | LIB_OBJS += $(OUTPUT)ui/helpline.o |
412 | LIB_OBJS += $(OUTPUT)ui/hist.o | ||
413 | LIB_OBJS += $(OUTPUT)ui/stdio/hist.o | ||
392 | 414 | ||
415 | BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o | ||
393 | BUILTIN_OBJS += $(OUTPUT)builtin-bench.o | 416 | BUILTIN_OBJS += $(OUTPUT)builtin-bench.o |
394 | |||
395 | # Benchmark modules | 417 | # Benchmark modules |
396 | BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o | 418 | BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o |
397 | BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o | 419 | BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o |
@@ -449,34 +471,73 @@ PYRF_OBJS += $(OUTPUT)util/xyarray.o | |||
449 | -include config.mak.autogen | 471 | -include config.mak.autogen |
450 | -include config.mak | 472 | -include config.mak |
451 | 473 | ||
452 | ifndef NO_DWARF | 474 | ifdef NO_LIBELF |
453 | FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS) | ||
454 | ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y) | ||
455 | msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev); | ||
456 | NO_DWARF := 1 | 475 | NO_DWARF := 1 |
457 | endif # Dwarf support | 476 | NO_DEMANGLE := 1 |
458 | endif # NO_DWARF | 477 | NO_LIBUNWIND := 1 |
459 | 478 | else | |
460 | -include arch/$(ARCH)/Makefile | ||
461 | |||
462 | ifneq ($(OUTPUT),) | ||
463 | BASIC_CFLAGS += -I$(OUTPUT) | ||
464 | endif | ||
465 | |||
466 | FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) | 479 | FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) |
467 | ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y) | 480 | ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y) |
468 | FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS) | 481 | FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS) |
469 | ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y) | 482 | ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y) |
470 | msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); | 483 | msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); |
471 | else | 484 | else |
472 | msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); | 485 | NO_LIBELF := 1 |
486 | NO_DWARF := 1 | ||
487 | NO_DEMANGLE := 1 | ||
473 | endif | 488 | endif |
474 | endif | 489 | endif |
490 | endif # NO_LIBELF | ||
491 | |||
492 | ifndef NO_LIBUNWIND | ||
493 | # for linking with debug library, run like: | ||
494 | # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/ | ||
495 | ifdef LIBUNWIND_DIR | ||
496 | LIBUNWIND_CFLAGS := -I$(LIBUNWIND_DIR)/include | ||
497 | LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib | ||
498 | endif | ||
499 | |||
500 | FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(ALL_CFLAGS) $(LIBUNWIND_LDFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS) | ||
501 | ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND)),y) | ||
502 | msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99); | ||
503 | NO_LIBUNWIND := 1 | ||
504 | endif # Libunwind support | ||
505 | endif # NO_LIBUNWIND | ||
506 | |||
507 | -include arch/$(ARCH)/Makefile | ||
508 | |||
509 | ifneq ($(OUTPUT),) | ||
510 | BASIC_CFLAGS += -I$(OUTPUT) | ||
511 | endif | ||
512 | |||
513 | ifdef NO_LIBELF | ||
514 | BASIC_CFLAGS += -DNO_LIBELF_SUPPORT | ||
515 | |||
516 | EXTLIBS := $(filter-out -lelf,$(EXTLIBS)) | ||
517 | |||
518 | # Remove ELF/DWARF dependent codes | ||
519 | LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS)) | ||
520 | LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS)) | ||
521 | LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS)) | ||
522 | LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS)) | ||
523 | |||
524 | BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS)) | ||
525 | |||
526 | # Use minimal symbol handling | ||
527 | LIB_OBJS += $(OUTPUT)util/symbol-minimal.o | ||
528 | |||
529 | else # NO_LIBELF | ||
475 | 530 | ||
476 | ifneq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y) | 531 | ifneq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y) |
477 | BASIC_CFLAGS += -DLIBELF_NO_MMAP | 532 | BASIC_CFLAGS += -DLIBELF_NO_MMAP |
478 | endif | 533 | endif |
479 | 534 | ||
535 | FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS) | ||
536 | ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y) | ||
537 | msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev); | ||
538 | NO_DWARF := 1 | ||
539 | endif # Dwarf support | ||
540 | |||
480 | ifndef NO_DWARF | 541 | ifndef NO_DWARF |
481 | ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) | 542 | ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) |
482 | msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); | 543 | msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); |
@@ -487,6 +548,29 @@ else | |||
487 | LIB_OBJS += $(OUTPUT)util/dwarf-aux.o | 548 | LIB_OBJS += $(OUTPUT)util/dwarf-aux.o |
488 | endif # PERF_HAVE_DWARF_REGS | 549 | endif # PERF_HAVE_DWARF_REGS |
489 | endif # NO_DWARF | 550 | endif # NO_DWARF |
551 | endif # NO_LIBELF | ||
552 | |||
553 | ifdef NO_LIBUNWIND | ||
554 | BASIC_CFLAGS += -DNO_LIBUNWIND_SUPPORT | ||
555 | else | ||
556 | EXTLIBS += $(LIBUNWIND_LIBS) | ||
557 | BASIC_CFLAGS := $(LIBUNWIND_CFLAGS) $(BASIC_CFLAGS) | ||
558 | BASIC_LDFLAGS := $(LIBUNWIND_LDFLAGS) $(BASIC_LDFLAGS) | ||
559 | LIB_OBJS += $(OUTPUT)util/unwind.o | ||
560 | endif | ||
561 | |||
562 | ifdef NO_LIBAUDIT | ||
563 | BASIC_CFLAGS += -DNO_LIBAUDIT_SUPPORT | ||
564 | else | ||
565 | FLAGS_LIBAUDIT = $(ALL_CFLAGS) $(ALL_LDFLAGS) -laudit | ||
566 | ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT)),y) | ||
567 | msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev); | ||
568 | BASIC_CFLAGS += -DNO_LIBAUDIT_SUPPORT | ||
569 | else | ||
570 | BUILTIN_OBJS += $(OUTPUT)builtin-trace.o | ||
571 | EXTLIBS += -laudit | ||
572 | endif | ||
573 | endif | ||
490 | 574 | ||
491 | ifdef NO_NEWT | 575 | ifdef NO_NEWT |
492 | BASIC_CFLAGS += -DNO_NEWT_SUPPORT | 576 | BASIC_CFLAGS += -DNO_NEWT_SUPPORT |
@@ -504,14 +588,13 @@ else | |||
504 | LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o | 588 | LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o |
505 | LIB_OBJS += $(OUTPUT)ui/browsers/hists.o | 589 | LIB_OBJS += $(OUTPUT)ui/browsers/hists.o |
506 | LIB_OBJS += $(OUTPUT)ui/browsers/map.o | 590 | LIB_OBJS += $(OUTPUT)ui/browsers/map.o |
507 | LIB_OBJS += $(OUTPUT)ui/helpline.o | ||
508 | LIB_OBJS += $(OUTPUT)ui/progress.o | 591 | LIB_OBJS += $(OUTPUT)ui/progress.o |
509 | LIB_OBJS += $(OUTPUT)ui/util.o | 592 | LIB_OBJS += $(OUTPUT)ui/util.o |
510 | LIB_OBJS += $(OUTPUT)ui/tui/setup.o | 593 | LIB_OBJS += $(OUTPUT)ui/tui/setup.o |
511 | LIB_OBJS += $(OUTPUT)ui/tui/util.o | 594 | LIB_OBJS += $(OUTPUT)ui/tui/util.o |
595 | LIB_OBJS += $(OUTPUT)ui/tui/helpline.o | ||
512 | LIB_H += ui/browser.h | 596 | LIB_H += ui/browser.h |
513 | LIB_H += ui/browsers/map.h | 597 | LIB_H += ui/browsers/map.h |
514 | LIB_H += ui/helpline.h | ||
515 | LIB_H += ui/keysyms.h | 598 | LIB_H += ui/keysyms.h |
516 | LIB_H += ui/libslang.h | 599 | LIB_H += ui/libslang.h |
517 | LIB_H += ui/progress.h | 600 | LIB_H += ui/progress.h |
@@ -523,7 +606,7 @@ endif | |||
523 | ifdef NO_GTK2 | 606 | ifdef NO_GTK2 |
524 | BASIC_CFLAGS += -DNO_GTK2_SUPPORT | 607 | BASIC_CFLAGS += -DNO_GTK2_SUPPORT |
525 | else | 608 | else |
526 | FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0) | 609 | FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null) |
527 | ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y) | 610 | ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y) |
528 | msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev); | 611 | msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev); |
529 | BASIC_CFLAGS += -DNO_GTK2_SUPPORT | 612 | BASIC_CFLAGS += -DNO_GTK2_SUPPORT |
@@ -531,11 +614,12 @@ else | |||
531 | ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y) | 614 | ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y) |
532 | BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR | 615 | BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR |
533 | endif | 616 | endif |
534 | BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0) | 617 | BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null) |
535 | EXTLIBS += $(shell pkg-config --libs gtk+-2.0) | 618 | EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null) |
536 | LIB_OBJS += $(OUTPUT)ui/gtk/browser.o | 619 | LIB_OBJS += $(OUTPUT)ui/gtk/browser.o |
537 | LIB_OBJS += $(OUTPUT)ui/gtk/setup.o | 620 | LIB_OBJS += $(OUTPUT)ui/gtk/setup.o |
538 | LIB_OBJS += $(OUTPUT)ui/gtk/util.o | 621 | LIB_OBJS += $(OUTPUT)ui/gtk/util.o |
622 | LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o | ||
539 | # Make sure that it'd be included only once. | 623 | # Make sure that it'd be included only once. |
540 | ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),) | 624 | ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),) |
541 | LIB_OBJS += $(OUTPUT)ui/setup.o | 625 | LIB_OBJS += $(OUTPUT)ui/setup.o |
@@ -644,7 +728,7 @@ else | |||
644 | EXTLIBS += -liberty | 728 | EXTLIBS += -liberty |
645 | BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE | 729 | BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE |
646 | else | 730 | else |
647 | FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd | 731 | FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd |
648 | has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD)) | 732 | has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD)) |
649 | ifeq ($(has_bfd),y) | 733 | ifeq ($(has_bfd),y) |
650 | EXTLIBS += -lbfd | 734 | EXTLIBS += -lbfd |
@@ -674,6 +758,13 @@ else | |||
674 | endif | 758 | endif |
675 | endif | 759 | endif |
676 | 760 | ||
761 | ifeq ($(NO_PERF_REGS),0) | ||
762 | ifeq ($(ARCH),x86) | ||
763 | LIB_H += arch/x86/include/perf_regs.h | ||
764 | endif | ||
765 | else | ||
766 | BASIC_CFLAGS += -DNO_PERF_REGS | ||
767 | endif | ||
677 | 768 | ||
678 | ifdef NO_STRLCPY | 769 | ifdef NO_STRLCPY |
679 | BASIC_CFLAGS += -DNO_STRLCPY | 770 | BASIC_CFLAGS += -DNO_STRLCPY |
@@ -683,6 +774,14 @@ else | |||
683 | endif | 774 | endif |
684 | endif | 775 | endif |
685 | 776 | ||
777 | ifdef NO_BACKTRACE | ||
778 | BASIC_CFLAGS += -DNO_BACKTRACE | ||
779 | else | ||
780 | ifneq ($(call try-cc,$(SOURCE_BACKTRACE),),y) | ||
781 | BASIC_CFLAGS += -DNO_BACKTRACE | ||
782 | endif | ||
783 | endif | ||
784 | |||
686 | ifdef ASCIIDOC8 | 785 | ifdef ASCIIDOC8 |
687 | export ASCIIDOC8 | 786 | export ASCIIDOC8 |
688 | endif | 787 | endif |
@@ -700,6 +799,7 @@ perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) | |||
700 | template_dir_SQ = $(subst ','\'',$(template_dir)) | 799 | template_dir_SQ = $(subst ','\'',$(template_dir)) |
701 | htmldir_SQ = $(subst ','\'',$(htmldir)) | 800 | htmldir_SQ = $(subst ','\'',$(htmldir)) |
702 | prefix_SQ = $(subst ','\'',$(prefix)) | 801 | prefix_SQ = $(subst ','\'',$(prefix)) |
802 | sysconfdir_SQ = $(subst ','\'',$(sysconfdir)) | ||
703 | 803 | ||
704 | SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) | 804 | SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) |
705 | 805 | ||
@@ -767,10 +867,10 @@ $(OUTPUT)perf.o perf.spec \ | |||
767 | # over the general rule for .o | 867 | # over the general rule for .o |
768 | 868 | ||
769 | $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS | 869 | $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS |
770 | $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $< | 870 | $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -w $< |
771 | 871 | ||
772 | $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS | 872 | $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS |
773 | $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $< | 873 | $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $< |
774 | 874 | ||
775 | $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS | 875 | $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS |
776 | $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< | 876 | $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< |
@@ -842,7 +942,10 @@ $(LIB_FILE): $(LIB_OBJS) | |||
842 | 942 | ||
843 | # libtraceevent.a | 943 | # libtraceevent.a |
844 | $(LIBTRACEEVENT): | 944 | $(LIBTRACEEVENT): |
845 | $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) $(COMMAND_O) libtraceevent.a | 945 | $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a |
946 | |||
947 | $(LIBTRACEEVENT)-clean: | ||
948 | $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean | ||
846 | 949 | ||
847 | help: | 950 | help: |
848 | @echo 'Perf make targets:' | 951 | @echo 'Perf make targets:' |
@@ -951,6 +1054,8 @@ install: all | |||
951 | $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' | 1054 | $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' |
952 | $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' | 1055 | $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' |
953 | $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' | 1056 | $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' |
1057 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d' | ||
1058 | $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf' | ||
954 | 1059 | ||
955 | install-python_ext: | 1060 | install-python_ext: |
956 | $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' | 1061 | $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' |
@@ -981,7 +1086,7 @@ quick-install-html: | |||
981 | 1086 | ||
982 | ### Cleaning rules | 1087 | ### Cleaning rules |
983 | 1088 | ||
984 | clean: | 1089 | clean: $(LIBTRACEEVENT)-clean |
985 | $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) | 1090 | $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) |
986 | $(RM) $(ALL_PROGRAMS) perf | 1091 | $(RM) $(ALL_PROGRAMS) perf |
987 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* | 1092 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* |
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile index 744e629797be..815841c04eb2 100644 --- a/tools/perf/arch/x86/Makefile +++ b/tools/perf/arch/x86/Makefile | |||
@@ -2,4 +2,7 @@ ifndef NO_DWARF | |||
2 | PERF_HAVE_DWARF_REGS := 1 | 2 | PERF_HAVE_DWARF_REGS := 1 |
3 | LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o | 3 | LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o |
4 | endif | 4 | endif |
5 | ifndef NO_LIBUNWIND | ||
6 | LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o | ||
7 | endif | ||
5 | LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o | 8 | LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o |
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h new file mode 100644 index 000000000000..46fc9f15c6b3 --- /dev/null +++ b/tools/perf/arch/x86/include/perf_regs.h | |||
@@ -0,0 +1,80 @@ | |||
1 | #ifndef ARCH_PERF_REGS_H | ||
2 | #define ARCH_PERF_REGS_H | ||
3 | |||
4 | #include <stdlib.h> | ||
5 | #include "../../util/types.h" | ||
6 | #include "../../../../../arch/x86/include/asm/perf_regs.h" | ||
7 | |||
8 | #ifndef ARCH_X86_64 | ||
9 | #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1) | ||
10 | #else | ||
11 | #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ | ||
12 | (1ULL << PERF_REG_X86_ES) | \ | ||
13 | (1ULL << PERF_REG_X86_FS) | \ | ||
14 | (1ULL << PERF_REG_X86_GS)) | ||
15 | #define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT) | ||
16 | #endif | ||
17 | #define PERF_REG_IP PERF_REG_X86_IP | ||
18 | #define PERF_REG_SP PERF_REG_X86_SP | ||
19 | |||
20 | static inline const char *perf_reg_name(int id) | ||
21 | { | ||
22 | switch (id) { | ||
23 | case PERF_REG_X86_AX: | ||
24 | return "AX"; | ||
25 | case PERF_REG_X86_BX: | ||
26 | return "BX"; | ||
27 | case PERF_REG_X86_CX: | ||
28 | return "CX"; | ||
29 | case PERF_REG_X86_DX: | ||
30 | return "DX"; | ||
31 | case PERF_REG_X86_SI: | ||
32 | return "SI"; | ||
33 | case PERF_REG_X86_DI: | ||
34 | return "DI"; | ||
35 | case PERF_REG_X86_BP: | ||
36 | return "BP"; | ||
37 | case PERF_REG_X86_SP: | ||
38 | return "SP"; | ||
39 | case PERF_REG_X86_IP: | ||
40 | return "IP"; | ||
41 | case PERF_REG_X86_FLAGS: | ||
42 | return "FLAGS"; | ||
43 | case PERF_REG_X86_CS: | ||
44 | return "CS"; | ||
45 | case PERF_REG_X86_SS: | ||
46 | return "SS"; | ||
47 | case PERF_REG_X86_DS: | ||
48 | return "DS"; | ||
49 | case PERF_REG_X86_ES: | ||
50 | return "ES"; | ||
51 | case PERF_REG_X86_FS: | ||
52 | return "FS"; | ||
53 | case PERF_REG_X86_GS: | ||
54 | return "GS"; | ||
55 | #ifdef ARCH_X86_64 | ||
56 | case PERF_REG_X86_R8: | ||
57 | return "R8"; | ||
58 | case PERF_REG_X86_R9: | ||
59 | return "R9"; | ||
60 | case PERF_REG_X86_R10: | ||
61 | return "R10"; | ||
62 | case PERF_REG_X86_R11: | ||
63 | return "R11"; | ||
64 | case PERF_REG_X86_R12: | ||
65 | return "R12"; | ||
66 | case PERF_REG_X86_R13: | ||
67 | return "R13"; | ||
68 | case PERF_REG_X86_R14: | ||
69 | return "R14"; | ||
70 | case PERF_REG_X86_R15: | ||
71 | return "R15"; | ||
72 | #endif /* ARCH_X86_64 */ | ||
73 | default: | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | return NULL; | ||
78 | } | ||
79 | |||
80 | #endif /* ARCH_PERF_REGS_H */ | ||
diff --git a/tools/perf/arch/x86/util/unwind.c b/tools/perf/arch/x86/util/unwind.c new file mode 100644 index 000000000000..78d956eff96f --- /dev/null +++ b/tools/perf/arch/x86/util/unwind.c | |||
@@ -0,0 +1,111 @@ | |||
1 | |||
2 | #include <errno.h> | ||
3 | #include <libunwind.h> | ||
4 | #include "perf_regs.h" | ||
5 | #include "../../util/unwind.h" | ||
6 | |||
7 | #ifdef ARCH_X86_64 | ||
8 | int unwind__arch_reg_id(int regnum) | ||
9 | { | ||
10 | int id; | ||
11 | |||
12 | switch (regnum) { | ||
13 | case UNW_X86_64_RAX: | ||
14 | id = PERF_REG_X86_AX; | ||
15 | break; | ||
16 | case UNW_X86_64_RDX: | ||
17 | id = PERF_REG_X86_DX; | ||
18 | break; | ||
19 | case UNW_X86_64_RCX: | ||
20 | id = PERF_REG_X86_CX; | ||
21 | break; | ||
22 | case UNW_X86_64_RBX: | ||
23 | id = PERF_REG_X86_BX; | ||
24 | break; | ||
25 | case UNW_X86_64_RSI: | ||
26 | id = PERF_REG_X86_SI; | ||
27 | break; | ||
28 | case UNW_X86_64_RDI: | ||
29 | id = PERF_REG_X86_DI; | ||
30 | break; | ||
31 | case UNW_X86_64_RBP: | ||
32 | id = PERF_REG_X86_BP; | ||
33 | break; | ||
34 | case UNW_X86_64_RSP: | ||
35 | id = PERF_REG_X86_SP; | ||
36 | break; | ||
37 | case UNW_X86_64_R8: | ||
38 | id = PERF_REG_X86_R8; | ||
39 | break; | ||
40 | case UNW_X86_64_R9: | ||
41 | id = PERF_REG_X86_R9; | ||
42 | break; | ||
43 | case UNW_X86_64_R10: | ||
44 | id = PERF_REG_X86_R10; | ||
45 | break; | ||
46 | case UNW_X86_64_R11: | ||
47 | id = PERF_REG_X86_R11; | ||
48 | break; | ||
49 | case UNW_X86_64_R12: | ||
50 | id = PERF_REG_X86_R12; | ||
51 | break; | ||
52 | case UNW_X86_64_R13: | ||
53 | id = PERF_REG_X86_R13; | ||
54 | break; | ||
55 | case UNW_X86_64_R14: | ||
56 | id = PERF_REG_X86_R14; | ||
57 | break; | ||
58 | case UNW_X86_64_R15: | ||
59 | id = PERF_REG_X86_R15; | ||
60 | break; | ||
61 | case UNW_X86_64_RIP: | ||
62 | id = PERF_REG_X86_IP; | ||
63 | break; | ||
64 | default: | ||
65 | pr_err("unwind: invalid reg id %d\n", regnum); | ||
66 | return -EINVAL; | ||
67 | } | ||
68 | |||
69 | return id; | ||
70 | } | ||
71 | #else | ||
72 | int unwind__arch_reg_id(int regnum) | ||
73 | { | ||
74 | int id; | ||
75 | |||
76 | switch (regnum) { | ||
77 | case UNW_X86_EAX: | ||
78 | id = PERF_REG_X86_AX; | ||
79 | break; | ||
80 | case UNW_X86_EDX: | ||
81 | id = PERF_REG_X86_DX; | ||
82 | break; | ||
83 | case UNW_X86_ECX: | ||
84 | id = PERF_REG_X86_CX; | ||
85 | break; | ||
86 | case UNW_X86_EBX: | ||
87 | id = PERF_REG_X86_BX; | ||
88 | break; | ||
89 | case UNW_X86_ESI: | ||
90 | id = PERF_REG_X86_SI; | ||
91 | break; | ||
92 | case UNW_X86_EDI: | ||
93 | id = PERF_REG_X86_DI; | ||
94 | break; | ||
95 | case UNW_X86_EBP: | ||
96 | id = PERF_REG_X86_BP; | ||
97 | break; | ||
98 | case UNW_X86_ESP: | ||
99 | id = PERF_REG_X86_SP; | ||
100 | break; | ||
101 | case UNW_X86_EIP: | ||
102 | id = PERF_REG_X86_IP; | ||
103 | break; | ||
104 | default: | ||
105 | pr_err("unwind: invalid reg id %d\n", regnum); | ||
106 | return -EINVAL; | ||
107 | } | ||
108 | |||
109 | return id; | ||
110 | } | ||
111 | #endif /* ARCH_X86_64 */ | ||
diff --git a/tools/perf/bash_completion b/tools/perf/bash_completion new file mode 100644 index 000000000000..1958fa539d0f --- /dev/null +++ b/tools/perf/bash_completion | |||
@@ -0,0 +1,26 @@ | |||
1 | # perf completion | ||
2 | |||
3 | have perf && | ||
4 | _perf() | ||
5 | { | ||
6 | local cur cmd | ||
7 | |||
8 | COMPREPLY=() | ||
9 | _get_comp_words_by_ref cur prev | ||
10 | |||
11 | cmd=${COMP_WORDS[0]} | ||
12 | |||
13 | # List perf subcommands | ||
14 | if [ $COMP_CWORD -eq 1 ]; then | ||
15 | cmds=$($cmd --list-cmds) | ||
16 | COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) ) | ||
17 | # List possible events for -e option | ||
18 | elif [[ $prev == "-e" && "${COMP_WORDS[1]}" == @(record|stat|top) ]]; then | ||
19 | cmds=$($cmd list --raw-dump) | ||
20 | COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) ) | ||
21 | # Fall down to list regular files | ||
22 | else | ||
23 | _filedir | ||
24 | fi | ||
25 | } && | ||
26 | complete -F _perf perf | ||
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index a09bece6dad2..8f89998eeaf4 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h | |||
@@ -3,7 +3,8 @@ | |||
3 | 3 | ||
4 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); | 4 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); |
5 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); | 5 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); |
6 | extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used); | 6 | extern int bench_mem_memcpy(int argc, const char **argv, |
7 | const char *prefix __maybe_unused); | ||
7 | extern int bench_mem_memset(int argc, const char **argv, const char *prefix); | 8 | extern int bench_mem_memset(int argc, const char **argv, const char *prefix); |
8 | 9 | ||
9 | #define BENCH_FORMAT_DEFAULT_STR "default" | 10 | #define BENCH_FORMAT_DEFAULT_STR "default" |
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index 02dad5d3359b..93c83e3cb4a7 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c | |||
@@ -177,7 +177,7 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault) | |||
177 | } while (0) | 177 | } while (0) |
178 | 178 | ||
179 | int bench_mem_memcpy(int argc, const char **argv, | 179 | int bench_mem_memcpy(int argc, const char **argv, |
180 | const char *prefix __used) | 180 | const char *prefix __maybe_unused) |
181 | { | 181 | { |
182 | int i; | 182 | int i; |
183 | size_t len; | 183 | size_t len; |
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c index 350cc9557265..c6e4bc523492 100644 --- a/tools/perf/bench/mem-memset.c +++ b/tools/perf/bench/mem-memset.c | |||
@@ -171,7 +171,7 @@ static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault) | |||
171 | } while (0) | 171 | } while (0) |
172 | 172 | ||
173 | int bench_mem_memset(int argc, const char **argv, | 173 | int bench_mem_memset(int argc, const char **argv, |
174 | const char *prefix __used) | 174 | const char *prefix __maybe_unused) |
175 | { | 175 | { |
176 | int i; | 176 | int i; |
177 | size_t len; | 177 | size_t len; |
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c index d1d1b30f99c1..cc1190a0849b 100644 --- a/tools/perf/bench/sched-messaging.c +++ b/tools/perf/bench/sched-messaging.c | |||
@@ -267,7 +267,7 @@ static const char * const bench_sched_message_usage[] = { | |||
267 | }; | 267 | }; |
268 | 268 | ||
269 | int bench_sched_messaging(int argc, const char **argv, | 269 | int bench_sched_messaging(int argc, const char **argv, |
270 | const char *prefix __used) | 270 | const char *prefix __maybe_unused) |
271 | { | 271 | { |
272 | unsigned int i, total_children; | 272 | unsigned int i, total_children; |
273 | struct timeval start, stop, diff; | 273 | struct timeval start, stop, diff; |
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index 0c7454f8b8a9..69cfba8d4c6c 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c | |||
@@ -43,7 +43,7 @@ static const char * const bench_sched_pipe_usage[] = { | |||
43 | }; | 43 | }; |
44 | 44 | ||
45 | int bench_sched_pipe(int argc, const char **argv, | 45 | int bench_sched_pipe(int argc, const char **argv, |
46 | const char *prefix __used) | 46 | const char *prefix __maybe_unused) |
47 | { | 47 | { |
48 | int pipe_1[2], pipe_2[2]; | 48 | int pipe_1[2], pipe_2[2]; |
49 | int m = 0, i; | 49 | int m = 0, i; |
@@ -55,14 +55,14 @@ int bench_sched_pipe(int argc, const char **argv, | |||
55 | * discarding returned value of read(), write() | 55 | * discarding returned value of read(), write() |
56 | * causes error in building environment for perf | 56 | * causes error in building environment for perf |
57 | */ | 57 | */ |
58 | int __used ret, wait_stat; | 58 | int __maybe_unused ret, wait_stat; |
59 | pid_t pid, retpid; | 59 | pid_t pid, retpid __maybe_unused; |
60 | 60 | ||
61 | argc = parse_options(argc, argv, options, | 61 | argc = parse_options(argc, argv, options, |
62 | bench_sched_pipe_usage, 0); | 62 | bench_sched_pipe_usage, 0); |
63 | 63 | ||
64 | assert(!pipe(pipe_1)); | 64 | BUG_ON(pipe(pipe_1)); |
65 | assert(!pipe(pipe_2)); | 65 | BUG_ON(pipe(pipe_2)); |
66 | 66 | ||
67 | pid = fork(); | 67 | pid = fork(); |
68 | assert(pid >= 0); | 68 | assert(pid >= 0); |
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 67522cf87405..9ea38540b873 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -239,7 +239,7 @@ static const char * const annotate_usage[] = { | |||
239 | NULL | 239 | NULL |
240 | }; | 240 | }; |
241 | 241 | ||
242 | int cmd_annotate(int argc, const char **argv, const char *prefix __used) | 242 | int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) |
243 | { | 243 | { |
244 | struct perf_annotate annotate = { | 244 | struct perf_annotate annotate = { |
245 | .tool = { | 245 | .tool = { |
@@ -282,6 +282,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) | |||
282 | "Display raw encoding of assembly instructions (default)"), | 282 | "Display raw encoding of assembly instructions (default)"), |
283 | OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", | 283 | OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", |
284 | "Specify disassembler style (e.g. -M intel for intel syntax)"), | 284 | "Specify disassembler style (e.g. -M intel for intel syntax)"), |
285 | OPT_STRING(0, "objdump", &objdump_path, "path", | ||
286 | "objdump binary to use for disassembly and annotations"), | ||
285 | OPT_END() | 287 | OPT_END() |
286 | }; | 288 | }; |
287 | 289 | ||
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index 1f3100216448..cae9a5fd2ecf 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c | |||
@@ -173,7 +173,7 @@ static void all_subsystem(void) | |||
173 | all_suite(&subsystems[i]); | 173 | all_suite(&subsystems[i]); |
174 | } | 174 | } |
175 | 175 | ||
176 | int cmd_bench(int argc, const char **argv, const char *prefix __used) | 176 | int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused) |
177 | { | 177 | { |
178 | int i, j, status = 0; | 178 | int i, j, status = 0; |
179 | 179 | ||
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index 29ad20e67919..83654557e108 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c | |||
@@ -43,15 +43,16 @@ static int build_id_cache__add_file(const char *filename, const char *debugdir) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | build_id__sprintf(build_id, sizeof(build_id), sbuild_id); | 45 | build_id__sprintf(build_id, sizeof(build_id), sbuild_id); |
46 | err = build_id_cache__add_s(sbuild_id, debugdir, filename, false); | 46 | err = build_id_cache__add_s(sbuild_id, debugdir, filename, |
47 | false, false); | ||
47 | if (verbose) | 48 | if (verbose) |
48 | pr_info("Adding %s %s: %s\n", sbuild_id, filename, | 49 | pr_info("Adding %s %s: %s\n", sbuild_id, filename, |
49 | err ? "FAIL" : "Ok"); | 50 | err ? "FAIL" : "Ok"); |
50 | return err; | 51 | return err; |
51 | } | 52 | } |
52 | 53 | ||
53 | static int build_id_cache__remove_file(const char *filename __used, | 54 | static int build_id_cache__remove_file(const char *filename __maybe_unused, |
54 | const char *debugdir __used) | 55 | const char *debugdir __maybe_unused) |
55 | { | 56 | { |
56 | u8 build_id[BUILD_ID_SIZE]; | 57 | u8 build_id[BUILD_ID_SIZE]; |
57 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | 58 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; |
@@ -119,7 +120,8 @@ static int __cmd_buildid_cache(void) | |||
119 | return 0; | 120 | return 0; |
120 | } | 121 | } |
121 | 122 | ||
122 | int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used) | 123 | int cmd_buildid_cache(int argc, const char **argv, |
124 | const char *prefix __maybe_unused) | ||
123 | { | 125 | { |
124 | argc = parse_options(argc, argv, buildid_cache_options, | 126 | argc = parse_options(argc, argv, buildid_cache_options, |
125 | buildid_cache_usage, 0); | 127 | buildid_cache_usage, 0); |
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 6b2bcfbde150..1159feeebb19 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c | |||
@@ -16,8 +16,6 @@ | |||
16 | #include "util/session.h" | 16 | #include "util/session.h" |
17 | #include "util/symbol.h" | 17 | #include "util/symbol.h" |
18 | 18 | ||
19 | #include <libelf.h> | ||
20 | |||
21 | static const char *input_name; | 19 | static const char *input_name; |
22 | static bool force; | 20 | static bool force; |
23 | static bool show_kernel; | 21 | static bool show_kernel; |
@@ -71,7 +69,7 @@ static int perf_session__list_build_ids(void) | |||
71 | { | 69 | { |
72 | struct perf_session *session; | 70 | struct perf_session *session; |
73 | 71 | ||
74 | elf_version(EV_CURRENT); | 72 | symbol__elf_init(); |
75 | 73 | ||
76 | session = perf_session__new(input_name, O_RDONLY, force, false, | 74 | session = perf_session__new(input_name, O_RDONLY, force, false, |
77 | &build_id__mark_dso_hit_ops); | 75 | &build_id__mark_dso_hit_ops); |
@@ -105,7 +103,8 @@ static int __cmd_buildid_list(void) | |||
105 | return perf_session__list_build_ids(); | 103 | return perf_session__list_build_ids(); |
106 | } | 104 | } |
107 | 105 | ||
108 | int cmd_buildid_list(int argc, const char **argv, const char *prefix __used) | 106 | int cmd_buildid_list(int argc, const char **argv, |
107 | const char *prefix __maybe_unused) | ||
109 | { | 108 | { |
110 | argc = parse_options(argc, argv, options, buildid_list_usage, 0); | 109 | argc = parse_options(argc, argv, options, buildid_list_usage, 0); |
111 | setup_pager(); | 110 | setup_pager(); |
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index d29d350fb2b7..761f4197a9e2 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include "util/event.h" | 10 | #include "util/event.h" |
11 | #include "util/hist.h" | 11 | #include "util/hist.h" |
12 | #include "util/evsel.h" | 12 | #include "util/evsel.h" |
13 | #include "util/evlist.h" | ||
13 | #include "util/session.h" | 14 | #include "util/session.h" |
14 | #include "util/tool.h" | 15 | #include "util/tool.h" |
15 | #include "util/sort.h" | 16 | #include "util/sort.h" |
@@ -24,11 +25,6 @@ static char diff__default_sort_order[] = "dso,symbol"; | |||
24 | static bool force; | 25 | static bool force; |
25 | static bool show_displacement; | 26 | static bool show_displacement; |
26 | 27 | ||
27 | struct perf_diff { | ||
28 | struct perf_tool tool; | ||
29 | struct perf_session *session; | ||
30 | }; | ||
31 | |||
32 | static int hists__add_entry(struct hists *self, | 28 | static int hists__add_entry(struct hists *self, |
33 | struct addr_location *al, u64 period) | 29 | struct addr_location *al, u64 period) |
34 | { | 30 | { |
@@ -37,14 +33,12 @@ static int hists__add_entry(struct hists *self, | |||
37 | return -ENOMEM; | 33 | return -ENOMEM; |
38 | } | 34 | } |
39 | 35 | ||
40 | static int diff__process_sample_event(struct perf_tool *tool, | 36 | static int diff__process_sample_event(struct perf_tool *tool __maybe_unused, |
41 | union perf_event *event, | 37 | union perf_event *event, |
42 | struct perf_sample *sample, | 38 | struct perf_sample *sample, |
43 | struct perf_evsel *evsel __used, | 39 | struct perf_evsel *evsel, |
44 | struct machine *machine) | 40 | struct machine *machine) |
45 | { | 41 | { |
46 | struct perf_diff *_diff = container_of(tool, struct perf_diff, tool); | ||
47 | struct perf_session *session = _diff->session; | ||
48 | struct addr_location al; | 42 | struct addr_location al; |
49 | 43 | ||
50 | if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { | 44 | if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { |
@@ -56,26 +50,24 @@ static int diff__process_sample_event(struct perf_tool *tool, | |||
56 | if (al.filtered || al.sym == NULL) | 50 | if (al.filtered || al.sym == NULL) |
57 | return 0; | 51 | return 0; |
58 | 52 | ||
59 | if (hists__add_entry(&session->hists, &al, sample->period)) { | 53 | if (hists__add_entry(&evsel->hists, &al, sample->period)) { |
60 | pr_warning("problem incrementing symbol period, skipping event\n"); | 54 | pr_warning("problem incrementing symbol period, skipping event\n"); |
61 | return -1; | 55 | return -1; |
62 | } | 56 | } |
63 | 57 | ||
64 | session->hists.stats.total_period += sample->period; | 58 | evsel->hists.stats.total_period += sample->period; |
65 | return 0; | 59 | return 0; |
66 | } | 60 | } |
67 | 61 | ||
68 | static struct perf_diff diff = { | 62 | static struct perf_tool tool = { |
69 | .tool = { | 63 | .sample = diff__process_sample_event, |
70 | .sample = diff__process_sample_event, | 64 | .mmap = perf_event__process_mmap, |
71 | .mmap = perf_event__process_mmap, | 65 | .comm = perf_event__process_comm, |
72 | .comm = perf_event__process_comm, | 66 | .exit = perf_event__process_task, |
73 | .exit = perf_event__process_task, | 67 | .fork = perf_event__process_task, |
74 | .fork = perf_event__process_task, | 68 | .lost = perf_event__process_lost, |
75 | .lost = perf_event__process_lost, | 69 | .ordered_samples = true, |
76 | .ordered_samples = true, | 70 | .ordering_requires_timestamps = true, |
77 | .ordering_requires_timestamps = true, | ||
78 | }, | ||
79 | }; | 71 | }; |
80 | 72 | ||
81 | static void perf_session__insert_hist_entry_by_name(struct rb_root *root, | 73 | static void perf_session__insert_hist_entry_by_name(struct rb_root *root, |
@@ -146,34 +138,71 @@ static void hists__match(struct hists *older, struct hists *newer) | |||
146 | } | 138 | } |
147 | } | 139 | } |
148 | 140 | ||
141 | static struct perf_evsel *evsel_match(struct perf_evsel *evsel, | ||
142 | struct perf_evlist *evlist) | ||
143 | { | ||
144 | struct perf_evsel *e; | ||
145 | |||
146 | list_for_each_entry(e, &evlist->entries, node) | ||
147 | if (perf_evsel__match2(evsel, e)) | ||
148 | return e; | ||
149 | |||
150 | return NULL; | ||
151 | } | ||
152 | |||
149 | static int __cmd_diff(void) | 153 | static int __cmd_diff(void) |
150 | { | 154 | { |
151 | int ret, i; | 155 | int ret, i; |
152 | #define older (session[0]) | 156 | #define older (session[0]) |
153 | #define newer (session[1]) | 157 | #define newer (session[1]) |
154 | struct perf_session *session[2]; | 158 | struct perf_session *session[2]; |
159 | struct perf_evlist *evlist_new, *evlist_old; | ||
160 | struct perf_evsel *evsel; | ||
161 | bool first = true; | ||
155 | 162 | ||
156 | older = perf_session__new(input_old, O_RDONLY, force, false, | 163 | older = perf_session__new(input_old, O_RDONLY, force, false, |
157 | &diff.tool); | 164 | &tool); |
158 | newer = perf_session__new(input_new, O_RDONLY, force, false, | 165 | newer = perf_session__new(input_new, O_RDONLY, force, false, |
159 | &diff.tool); | 166 | &tool); |
160 | if (session[0] == NULL || session[1] == NULL) | 167 | if (session[0] == NULL || session[1] == NULL) |
161 | return -ENOMEM; | 168 | return -ENOMEM; |
162 | 169 | ||
163 | for (i = 0; i < 2; ++i) { | 170 | for (i = 0; i < 2; ++i) { |
164 | diff.session = session[i]; | 171 | ret = perf_session__process_events(session[i], &tool); |
165 | ret = perf_session__process_events(session[i], &diff.tool); | ||
166 | if (ret) | 172 | if (ret) |
167 | goto out_delete; | 173 | goto out_delete; |
168 | hists__output_resort(&session[i]->hists); | ||
169 | } | 174 | } |
170 | 175 | ||
171 | if (show_displacement) | 176 | evlist_old = older->evlist; |
172 | hists__resort_entries(&older->hists); | 177 | evlist_new = newer->evlist; |
178 | |||
179 | list_for_each_entry(evsel, &evlist_new->entries, node) | ||
180 | hists__output_resort(&evsel->hists); | ||
181 | |||
182 | list_for_each_entry(evsel, &evlist_old->entries, node) { | ||
183 | hists__output_resort(&evsel->hists); | ||
184 | |||
185 | if (show_displacement) | ||
186 | hists__resort_entries(&evsel->hists); | ||
187 | } | ||
188 | |||
189 | list_for_each_entry(evsel, &evlist_new->entries, node) { | ||
190 | struct perf_evsel *evsel_old; | ||
191 | |||
192 | evsel_old = evsel_match(evsel, evlist_old); | ||
193 | if (!evsel_old) | ||
194 | continue; | ||
195 | |||
196 | fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n", | ||
197 | perf_evsel__name(evsel)); | ||
198 | |||
199 | first = false; | ||
200 | |||
201 | hists__match(&evsel_old->hists, &evsel->hists); | ||
202 | hists__fprintf(&evsel->hists, &evsel_old->hists, | ||
203 | show_displacement, true, 0, 0, stdout); | ||
204 | } | ||
173 | 205 | ||
174 | hists__match(&older->hists, &newer->hists); | ||
175 | hists__fprintf(&newer->hists, &older->hists, | ||
176 | show_displacement, true, 0, 0, stdout); | ||
177 | out_delete: | 206 | out_delete: |
178 | for (i = 0; i < 2; ++i) | 207 | for (i = 0; i < 2; ++i) |
179 | perf_session__delete(session[i]); | 208 | perf_session__delete(session[i]); |
@@ -213,7 +242,7 @@ static const struct option options[] = { | |||
213 | OPT_END() | 242 | OPT_END() |
214 | }; | 243 | }; |
215 | 244 | ||
216 | int cmd_diff(int argc, const char **argv, const char *prefix __used) | 245 | int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) |
217 | { | 246 | { |
218 | sort_order = diff__default_sort_order; | 247 | sort_order = diff__default_sort_order; |
219 | argc = parse_options(argc, argv, options, diff_usage, 0); | 248 | argc = parse_options(argc, argv, options, diff_usage, 0); |
@@ -235,6 +264,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix __used) | |||
235 | if (symbol__init() < 0) | 264 | if (symbol__init() < 0) |
236 | return -1; | 265 | return -1; |
237 | 266 | ||
267 | perf_hpp__init(true, show_displacement); | ||
238 | setup_sorting(diff_usage, options); | 268 | setup_sorting(diff_usage, options); |
239 | setup_pager(); | 269 | setup_pager(); |
240 | 270 | ||
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c index 0dd5a058f766..1fb164164fd0 100644 --- a/tools/perf/builtin-evlist.c +++ b/tools/perf/builtin-evlist.c | |||
@@ -113,7 +113,7 @@ static const char * const evlist_usage[] = { | |||
113 | NULL | 113 | NULL |
114 | }; | 114 | }; |
115 | 115 | ||
116 | int cmd_evlist(int argc, const char **argv, const char *prefix __used) | 116 | int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) |
117 | { | 117 | { |
118 | struct perf_attr_details details = { .verbose = false, }; | 118 | struct perf_attr_details details = { .verbose = false, }; |
119 | const char *input_name = NULL; | 119 | const char *input_name = NULL; |
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 6d5a8a7faf48..25c8b942ff85 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c | |||
@@ -24,13 +24,14 @@ static struct man_viewer_info_list { | |||
24 | } *man_viewer_info_list; | 24 | } *man_viewer_info_list; |
25 | 25 | ||
26 | enum help_format { | 26 | enum help_format { |
27 | HELP_FORMAT_NONE, | ||
27 | HELP_FORMAT_MAN, | 28 | HELP_FORMAT_MAN, |
28 | HELP_FORMAT_INFO, | 29 | HELP_FORMAT_INFO, |
29 | HELP_FORMAT_WEB, | 30 | HELP_FORMAT_WEB, |
30 | }; | 31 | }; |
31 | 32 | ||
32 | static bool show_all = false; | 33 | static bool show_all = false; |
33 | static enum help_format help_format = HELP_FORMAT_MAN; | 34 | static enum help_format help_format = HELP_FORMAT_NONE; |
34 | static struct option builtin_help_options[] = { | 35 | static struct option builtin_help_options[] = { |
35 | OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), | 36 | OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), |
36 | OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), | 37 | OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), |
@@ -54,7 +55,9 @@ static enum help_format parse_help_format(const char *format) | |||
54 | return HELP_FORMAT_INFO; | 55 | return HELP_FORMAT_INFO; |
55 | if (!strcmp(format, "web") || !strcmp(format, "html")) | 56 | if (!strcmp(format, "web") || !strcmp(format, "html")) |
56 | return HELP_FORMAT_WEB; | 57 | return HELP_FORMAT_WEB; |
57 | die("unrecognized help format '%s'", format); | 58 | |
59 | pr_err("unrecognized help format '%s'", format); | ||
60 | return HELP_FORMAT_NONE; | ||
58 | } | 61 | } |
59 | 62 | ||
60 | static const char *get_man_viewer_info(const char *name) | 63 | static const char *get_man_viewer_info(const char *name) |
@@ -259,6 +262,8 @@ static int perf_help_config(const char *var, const char *value, void *cb) | |||
259 | if (!value) | 262 | if (!value) |
260 | return config_error_nonbool(var); | 263 | return config_error_nonbool(var); |
261 | help_format = parse_help_format(value); | 264 | help_format = parse_help_format(value); |
265 | if (help_format == HELP_FORMAT_NONE) | ||
266 | return -1; | ||
262 | return 0; | 267 | return 0; |
263 | } | 268 | } |
264 | if (!strcmp(var, "man.viewer")) { | 269 | if (!strcmp(var, "man.viewer")) { |
@@ -352,7 +357,7 @@ static void exec_viewer(const char *name, const char *page) | |||
352 | warning("'%s': unknown man viewer.", name); | 357 | warning("'%s': unknown man viewer.", name); |
353 | } | 358 | } |
354 | 359 | ||
355 | static void show_man_page(const char *perf_cmd) | 360 | static int show_man_page(const char *perf_cmd) |
356 | { | 361 | { |
357 | struct man_viewer_list *viewer; | 362 | struct man_viewer_list *viewer; |
358 | const char *page = cmd_to_page(perf_cmd); | 363 | const char *page = cmd_to_page(perf_cmd); |
@@ -365,28 +370,35 @@ static void show_man_page(const char *perf_cmd) | |||
365 | if (fallback) | 370 | if (fallback) |
366 | exec_viewer(fallback, page); | 371 | exec_viewer(fallback, page); |
367 | exec_viewer("man", page); | 372 | exec_viewer("man", page); |
368 | die("no man viewer handled the request"); | 373 | |
374 | pr_err("no man viewer handled the request"); | ||
375 | return -1; | ||
369 | } | 376 | } |
370 | 377 | ||
371 | static void show_info_page(const char *perf_cmd) | 378 | static int show_info_page(const char *perf_cmd) |
372 | { | 379 | { |
373 | const char *page = cmd_to_page(perf_cmd); | 380 | const char *page = cmd_to_page(perf_cmd); |
374 | setenv("INFOPATH", system_path(PERF_INFO_PATH), 1); | 381 | setenv("INFOPATH", system_path(PERF_INFO_PATH), 1); |
375 | execlp("info", "info", "perfman", page, NULL); | 382 | execlp("info", "info", "perfman", page, NULL); |
383 | return -1; | ||
376 | } | 384 | } |
377 | 385 | ||
378 | static void get_html_page_path(struct strbuf *page_path, const char *page) | 386 | static int get_html_page_path(struct strbuf *page_path, const char *page) |
379 | { | 387 | { |
380 | struct stat st; | 388 | struct stat st; |
381 | const char *html_path = system_path(PERF_HTML_PATH); | 389 | const char *html_path = system_path(PERF_HTML_PATH); |
382 | 390 | ||
383 | /* Check that we have a perf documentation directory. */ | 391 | /* Check that we have a perf documentation directory. */ |
384 | if (stat(mkpath("%s/perf.html", html_path), &st) | 392 | if (stat(mkpath("%s/perf.html", html_path), &st) |
385 | || !S_ISREG(st.st_mode)) | 393 | || !S_ISREG(st.st_mode)) { |
386 | die("'%s': not a documentation directory.", html_path); | 394 | pr_err("'%s': not a documentation directory.", html_path); |
395 | return -1; | ||
396 | } | ||
387 | 397 | ||
388 | strbuf_init(page_path, 0); | 398 | strbuf_init(page_path, 0); |
389 | strbuf_addf(page_path, "%s/%s.html", html_path, page); | 399 | strbuf_addf(page_path, "%s/%s.html", html_path, page); |
400 | |||
401 | return 0; | ||
390 | } | 402 | } |
391 | 403 | ||
392 | /* | 404 | /* |
@@ -401,19 +413,23 @@ static void open_html(const char *path) | |||
401 | } | 413 | } |
402 | #endif | 414 | #endif |
403 | 415 | ||
404 | static void show_html_page(const char *perf_cmd) | 416 | static int show_html_page(const char *perf_cmd) |
405 | { | 417 | { |
406 | const char *page = cmd_to_page(perf_cmd); | 418 | const char *page = cmd_to_page(perf_cmd); |
407 | struct strbuf page_path; /* it leaks but we exec bellow */ | 419 | struct strbuf page_path; /* it leaks but we exec bellow */ |
408 | 420 | ||
409 | get_html_page_path(&page_path, page); | 421 | if (get_html_page_path(&page_path, page) != 0) |
422 | return -1; | ||
410 | 423 | ||
411 | open_html(page_path.buf); | 424 | open_html(page_path.buf); |
425 | |||
426 | return 0; | ||
412 | } | 427 | } |
413 | 428 | ||
414 | int cmd_help(int argc, const char **argv, const char *prefix __used) | 429 | int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused) |
415 | { | 430 | { |
416 | const char *alias; | 431 | const char *alias; |
432 | int rc = 0; | ||
417 | 433 | ||
418 | load_command_list("perf-", &main_cmds, &other_cmds); | 434 | load_command_list("perf-", &main_cmds, &other_cmds); |
419 | 435 | ||
@@ -444,16 +460,20 @@ int cmd_help(int argc, const char **argv, const char *prefix __used) | |||
444 | 460 | ||
445 | switch (help_format) { | 461 | switch (help_format) { |
446 | case HELP_FORMAT_MAN: | 462 | case HELP_FORMAT_MAN: |
447 | show_man_page(argv[0]); | 463 | rc = show_man_page(argv[0]); |
448 | break; | 464 | break; |
449 | case HELP_FORMAT_INFO: | 465 | case HELP_FORMAT_INFO: |
450 | show_info_page(argv[0]); | 466 | rc = show_info_page(argv[0]); |
451 | break; | 467 | break; |
452 | case HELP_FORMAT_WEB: | 468 | case HELP_FORMAT_WEB: |
453 | show_html_page(argv[0]); | 469 | rc = show_html_page(argv[0]); |
470 | break; | ||
471 | case HELP_FORMAT_NONE: | ||
472 | /* fall-through */ | ||
454 | default: | 473 | default: |
474 | rc = -1; | ||
455 | break; | 475 | break; |
456 | } | 476 | } |
457 | 477 | ||
458 | return 0; | 478 | return rc; |
459 | } | 479 | } |
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 3beab489afc5..1eaa6617c814 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c | |||
@@ -17,9 +17,9 @@ | |||
17 | static char const *input_name = "-"; | 17 | static char const *input_name = "-"; |
18 | static bool inject_build_ids; | 18 | static bool inject_build_ids; |
19 | 19 | ||
20 | static int perf_event__repipe_synth(struct perf_tool *tool __used, | 20 | static int perf_event__repipe_synth(struct perf_tool *tool __maybe_unused, |
21 | union perf_event *event, | 21 | union perf_event *event, |
22 | struct machine *machine __used) | 22 | struct machine *machine __maybe_unused) |
23 | { | 23 | { |
24 | uint32_t size; | 24 | uint32_t size; |
25 | void *buf = event; | 25 | void *buf = event; |
@@ -40,7 +40,8 @@ static int perf_event__repipe_synth(struct perf_tool *tool __used, | |||
40 | 40 | ||
41 | static int perf_event__repipe_op2_synth(struct perf_tool *tool, | 41 | static int perf_event__repipe_op2_synth(struct perf_tool *tool, |
42 | union perf_event *event, | 42 | union perf_event *event, |
43 | struct perf_session *session __used) | 43 | struct perf_session *session |
44 | __maybe_unused) | ||
44 | { | 45 | { |
45 | return perf_event__repipe_synth(tool, event, NULL); | 46 | return perf_event__repipe_synth(tool, event, NULL); |
46 | } | 47 | } |
@@ -52,13 +53,14 @@ static int perf_event__repipe_event_type_synth(struct perf_tool *tool, | |||
52 | } | 53 | } |
53 | 54 | ||
54 | static int perf_event__repipe_tracing_data_synth(union perf_event *event, | 55 | static int perf_event__repipe_tracing_data_synth(union perf_event *event, |
55 | struct perf_session *session __used) | 56 | struct perf_session *session |
57 | __maybe_unused) | ||
56 | { | 58 | { |
57 | return perf_event__repipe_synth(NULL, event, NULL); | 59 | return perf_event__repipe_synth(NULL, event, NULL); |
58 | } | 60 | } |
59 | 61 | ||
60 | static int perf_event__repipe_attr(union perf_event *event, | 62 | static int perf_event__repipe_attr(union perf_event *event, |
61 | struct perf_evlist **pevlist __used) | 63 | struct perf_evlist **pevlist __maybe_unused) |
62 | { | 64 | { |
63 | int ret; | 65 | int ret; |
64 | ret = perf_event__process_attr(event, pevlist); | 66 | ret = perf_event__process_attr(event, pevlist); |
@@ -70,7 +72,7 @@ static int perf_event__repipe_attr(union perf_event *event, | |||
70 | 72 | ||
71 | static int perf_event__repipe(struct perf_tool *tool, | 73 | static int perf_event__repipe(struct perf_tool *tool, |
72 | union perf_event *event, | 74 | union perf_event *event, |
73 | struct perf_sample *sample __used, | 75 | struct perf_sample *sample __maybe_unused, |
74 | struct machine *machine) | 76 | struct machine *machine) |
75 | { | 77 | { |
76 | return perf_event__repipe_synth(tool, event, machine); | 78 | return perf_event__repipe_synth(tool, event, machine); |
@@ -78,8 +80,8 @@ static int perf_event__repipe(struct perf_tool *tool, | |||
78 | 80 | ||
79 | static int perf_event__repipe_sample(struct perf_tool *tool, | 81 | static int perf_event__repipe_sample(struct perf_tool *tool, |
80 | union perf_event *event, | 82 | union perf_event *event, |
81 | struct perf_sample *sample __used, | 83 | struct perf_sample *sample __maybe_unused, |
82 | struct perf_evsel *evsel __used, | 84 | struct perf_evsel *evsel __maybe_unused, |
83 | struct machine *machine) | 85 | struct machine *machine) |
84 | { | 86 | { |
85 | return perf_event__repipe_synth(tool, event, machine); | 87 | return perf_event__repipe_synth(tool, event, machine); |
@@ -163,7 +165,7 @@ static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, | |||
163 | static int perf_event__inject_buildid(struct perf_tool *tool, | 165 | static int perf_event__inject_buildid(struct perf_tool *tool, |
164 | union perf_event *event, | 166 | union perf_event *event, |
165 | struct perf_sample *sample, | 167 | struct perf_sample *sample, |
166 | struct perf_evsel *evsel __used, | 168 | struct perf_evsel *evsel __maybe_unused, |
167 | struct machine *machine) | 169 | struct machine *machine) |
168 | { | 170 | { |
169 | struct addr_location al; | 171 | struct addr_location al; |
@@ -191,10 +193,13 @@ static int perf_event__inject_buildid(struct perf_tool *tool, | |||
191 | * If this fails, too bad, let the other side | 193 | * If this fails, too bad, let the other side |
192 | * account this as unresolved. | 194 | * account this as unresolved. |
193 | */ | 195 | */ |
194 | } else | 196 | } else { |
197 | #ifndef NO_LIBELF_SUPPORT | ||
195 | pr_warning("no symbols found in %s, maybe " | 198 | pr_warning("no symbols found in %s, maybe " |
196 | "install a debug package?\n", | 199 | "install a debug package?\n", |
197 | al.map->dso->long_name); | 200 | al.map->dso->long_name); |
201 | #endif | ||
202 | } | ||
198 | } | 203 | } |
199 | } | 204 | } |
200 | 205 | ||
@@ -221,7 +226,7 @@ struct perf_tool perf_inject = { | |||
221 | 226 | ||
222 | extern volatile int session_done; | 227 | extern volatile int session_done; |
223 | 228 | ||
224 | static void sig_handler(int sig __attribute__((__unused__))) | 229 | static void sig_handler(int sig __maybe_unused) |
225 | { | 230 | { |
226 | session_done = 1; | 231 | session_done = 1; |
227 | } | 232 | } |
@@ -264,7 +269,7 @@ static const struct option options[] = { | |||
264 | OPT_END() | 269 | OPT_END() |
265 | }; | 270 | }; |
266 | 271 | ||
267 | int cmd_inject(int argc, const char **argv, const char *prefix __used) | 272 | int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) |
268 | { | 273 | { |
269 | argc = parse_options(argc, argv, options, report_usage, 0); | 274 | argc = parse_options(argc, argv, options, report_usage, 0); |
270 | 275 | ||
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index ce35015f2dc6..bc912c68f49a 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c | |||
@@ -1,6 +1,8 @@ | |||
1 | #include "builtin.h" | 1 | #include "builtin.h" |
2 | #include "perf.h" | 2 | #include "perf.h" |
3 | 3 | ||
4 | #include "util/evlist.h" | ||
5 | #include "util/evsel.h" | ||
4 | #include "util/util.h" | 6 | #include "util/util.h" |
5 | #include "util/cache.h" | 7 | #include "util/cache.h" |
6 | #include "util/symbol.h" | 8 | #include "util/symbol.h" |
@@ -57,46 +59,52 @@ static unsigned long nr_allocs, nr_cross_allocs; | |||
57 | 59 | ||
58 | #define PATH_SYS_NODE "/sys/devices/system/node" | 60 | #define PATH_SYS_NODE "/sys/devices/system/node" |
59 | 61 | ||
60 | struct perf_kmem { | 62 | static int init_cpunode_map(void) |
61 | struct perf_tool tool; | ||
62 | struct perf_session *session; | ||
63 | }; | ||
64 | |||
65 | static void init_cpunode_map(void) | ||
66 | { | 63 | { |
67 | FILE *fp; | 64 | FILE *fp; |
68 | int i; | 65 | int i, err = -1; |
69 | 66 | ||
70 | fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); | 67 | fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); |
71 | if (!fp) { | 68 | if (!fp) { |
72 | max_cpu_num = 4096; | 69 | max_cpu_num = 4096; |
73 | return; | 70 | return 0; |
71 | } | ||
72 | |||
73 | if (fscanf(fp, "%d", &max_cpu_num) < 1) { | ||
74 | pr_err("Failed to read 'kernel_max' from sysfs"); | ||
75 | goto out_close; | ||
74 | } | 76 | } |
75 | 77 | ||
76 | if (fscanf(fp, "%d", &max_cpu_num) < 1) | ||
77 | die("Failed to read 'kernel_max' from sysfs"); | ||
78 | max_cpu_num++; | 78 | max_cpu_num++; |
79 | 79 | ||
80 | cpunode_map = calloc(max_cpu_num, sizeof(int)); | 80 | cpunode_map = calloc(max_cpu_num, sizeof(int)); |
81 | if (!cpunode_map) | 81 | if (!cpunode_map) { |
82 | die("calloc"); | 82 | pr_err("%s: calloc failed\n", __func__); |
83 | goto out_close; | ||
84 | } | ||
85 | |||
83 | for (i = 0; i < max_cpu_num; i++) | 86 | for (i = 0; i < max_cpu_num; i++) |
84 | cpunode_map[i] = -1; | 87 | cpunode_map[i] = -1; |
88 | |||
89 | err = 0; | ||
90 | out_close: | ||
85 | fclose(fp); | 91 | fclose(fp); |
92 | return err; | ||
86 | } | 93 | } |
87 | 94 | ||
88 | static void setup_cpunode_map(void) | 95 | static int setup_cpunode_map(void) |
89 | { | 96 | { |
90 | struct dirent *dent1, *dent2; | 97 | struct dirent *dent1, *dent2; |
91 | DIR *dir1, *dir2; | 98 | DIR *dir1, *dir2; |
92 | unsigned int cpu, mem; | 99 | unsigned int cpu, mem; |
93 | char buf[PATH_MAX]; | 100 | char buf[PATH_MAX]; |
94 | 101 | ||
95 | init_cpunode_map(); | 102 | if (init_cpunode_map()) |
103 | return -1; | ||
96 | 104 | ||
97 | dir1 = opendir(PATH_SYS_NODE); | 105 | dir1 = opendir(PATH_SYS_NODE); |
98 | if (!dir1) | 106 | if (!dir1) |
99 | return; | 107 | return -1; |
100 | 108 | ||
101 | while ((dent1 = readdir(dir1)) != NULL) { | 109 | while ((dent1 = readdir(dir1)) != NULL) { |
102 | if (dent1->d_type != DT_DIR || | 110 | if (dent1->d_type != DT_DIR || |
@@ -116,10 +124,11 @@ static void setup_cpunode_map(void) | |||
116 | closedir(dir2); | 124 | closedir(dir2); |
117 | } | 125 | } |
118 | closedir(dir1); | 126 | closedir(dir1); |
127 | return 0; | ||
119 | } | 128 | } |
120 | 129 | ||
121 | static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, | 130 | static int insert_alloc_stat(unsigned long call_site, unsigned long ptr, |
122 | int bytes_req, int bytes_alloc, int cpu) | 131 | int bytes_req, int bytes_alloc, int cpu) |
123 | { | 132 | { |
124 | struct rb_node **node = &root_alloc_stat.rb_node; | 133 | struct rb_node **node = &root_alloc_stat.rb_node; |
125 | struct rb_node *parent = NULL; | 134 | struct rb_node *parent = NULL; |
@@ -143,8 +152,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, | |||
143 | data->bytes_alloc += bytes_alloc; | 152 | data->bytes_alloc += bytes_alloc; |
144 | } else { | 153 | } else { |
145 | data = malloc(sizeof(*data)); | 154 | data = malloc(sizeof(*data)); |
146 | if (!data) | 155 | if (!data) { |
147 | die("malloc"); | 156 | pr_err("%s: malloc failed\n", __func__); |
157 | return -1; | ||
158 | } | ||
148 | data->ptr = ptr; | 159 | data->ptr = ptr; |
149 | data->pingpong = 0; | 160 | data->pingpong = 0; |
150 | data->hit = 1; | 161 | data->hit = 1; |
@@ -156,9 +167,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, | |||
156 | } | 167 | } |
157 | data->call_site = call_site; | 168 | data->call_site = call_site; |
158 | data->alloc_cpu = cpu; | 169 | data->alloc_cpu = cpu; |
170 | return 0; | ||
159 | } | 171 | } |
160 | 172 | ||
161 | static void insert_caller_stat(unsigned long call_site, | 173 | static int insert_caller_stat(unsigned long call_site, |
162 | int bytes_req, int bytes_alloc) | 174 | int bytes_req, int bytes_alloc) |
163 | { | 175 | { |
164 | struct rb_node **node = &root_caller_stat.rb_node; | 176 | struct rb_node **node = &root_caller_stat.rb_node; |
@@ -183,8 +195,10 @@ static void insert_caller_stat(unsigned long call_site, | |||
183 | data->bytes_alloc += bytes_alloc; | 195 | data->bytes_alloc += bytes_alloc; |
184 | } else { | 196 | } else { |
185 | data = malloc(sizeof(*data)); | 197 | data = malloc(sizeof(*data)); |
186 | if (!data) | 198 | if (!data) { |
187 | die("malloc"); | 199 | pr_err("%s: malloc failed\n", __func__); |
200 | return -1; | ||
201 | } | ||
188 | data->call_site = call_site; | 202 | data->call_site = call_site; |
189 | data->pingpong = 0; | 203 | data->pingpong = 0; |
190 | data->hit = 1; | 204 | data->hit = 1; |
@@ -194,39 +208,43 @@ static void insert_caller_stat(unsigned long call_site, | |||
194 | rb_link_node(&data->node, parent, node); | 208 | rb_link_node(&data->node, parent, node); |
195 | rb_insert_color(&data->node, &root_caller_stat); | 209 | rb_insert_color(&data->node, &root_caller_stat); |
196 | } | 210 | } |
211 | |||
212 | return 0; | ||
197 | } | 213 | } |
198 | 214 | ||
199 | static void process_alloc_event(void *data, | 215 | static int perf_evsel__process_alloc_event(struct perf_evsel *evsel, |
200 | struct event_format *event, | 216 | struct perf_sample *sample) |
201 | int cpu, | ||
202 | u64 timestamp __used, | ||
203 | struct thread *thread __used, | ||
204 | int node) | ||
205 | { | 217 | { |
206 | unsigned long call_site; | 218 | unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"), |
207 | unsigned long ptr; | 219 | call_site = perf_evsel__intval(evsel, sample, "call_site"); |
208 | int bytes_req; | 220 | int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"), |
209 | int bytes_alloc; | 221 | bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc"); |
210 | int node1, node2; | ||
211 | |||
212 | ptr = raw_field_value(event, "ptr", data); | ||
213 | call_site = raw_field_value(event, "call_site", data); | ||
214 | bytes_req = raw_field_value(event, "bytes_req", data); | ||
215 | bytes_alloc = raw_field_value(event, "bytes_alloc", data); | ||
216 | 222 | ||
217 | insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); | 223 | if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) || |
218 | insert_caller_stat(call_site, bytes_req, bytes_alloc); | 224 | insert_caller_stat(call_site, bytes_req, bytes_alloc)) |
225 | return -1; | ||
219 | 226 | ||
220 | total_requested += bytes_req; | 227 | total_requested += bytes_req; |
221 | total_allocated += bytes_alloc; | 228 | total_allocated += bytes_alloc; |
222 | 229 | ||
223 | if (node) { | 230 | nr_allocs++; |
224 | node1 = cpunode_map[cpu]; | 231 | return 0; |
225 | node2 = raw_field_value(event, "node", data); | 232 | } |
233 | |||
234 | static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel, | ||
235 | struct perf_sample *sample) | ||
236 | { | ||
237 | int ret = perf_evsel__process_alloc_event(evsel, sample); | ||
238 | |||
239 | if (!ret) { | ||
240 | int node1 = cpunode_map[sample->cpu], | ||
241 | node2 = perf_evsel__intval(evsel, sample, "node"); | ||
242 | |||
226 | if (node1 != node2) | 243 | if (node1 != node2) |
227 | nr_cross_allocs++; | 244 | nr_cross_allocs++; |
228 | } | 245 | } |
229 | nr_allocs++; | 246 | |
247 | return ret; | ||
230 | } | 248 | } |
231 | 249 | ||
232 | static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); | 250 | static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); |
@@ -257,66 +275,37 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr, | |||
257 | return NULL; | 275 | return NULL; |
258 | } | 276 | } |
259 | 277 | ||
260 | static void process_free_event(void *data, | 278 | static int perf_evsel__process_free_event(struct perf_evsel *evsel, |
261 | struct event_format *event, | 279 | struct perf_sample *sample) |
262 | int cpu, | ||
263 | u64 timestamp __used, | ||
264 | struct thread *thread __used) | ||
265 | { | 280 | { |
266 | unsigned long ptr; | 281 | unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"); |
267 | struct alloc_stat *s_alloc, *s_caller; | 282 | struct alloc_stat *s_alloc, *s_caller; |
268 | 283 | ||
269 | ptr = raw_field_value(event, "ptr", data); | ||
270 | |||
271 | s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); | 284 | s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); |
272 | if (!s_alloc) | 285 | if (!s_alloc) |
273 | return; | 286 | return 0; |
274 | 287 | ||
275 | if (cpu != s_alloc->alloc_cpu) { | 288 | if ((short)sample->cpu != s_alloc->alloc_cpu) { |
276 | s_alloc->pingpong++; | 289 | s_alloc->pingpong++; |
277 | 290 | ||
278 | s_caller = search_alloc_stat(0, s_alloc->call_site, | 291 | s_caller = search_alloc_stat(0, s_alloc->call_site, |
279 | &root_caller_stat, callsite_cmp); | 292 | &root_caller_stat, callsite_cmp); |
280 | assert(s_caller); | 293 | if (!s_caller) |
294 | return -1; | ||
281 | s_caller->pingpong++; | 295 | s_caller->pingpong++; |
282 | } | 296 | } |
283 | s_alloc->alloc_cpu = -1; | 297 | s_alloc->alloc_cpu = -1; |
284 | } | ||
285 | 298 | ||
286 | static void process_raw_event(struct perf_tool *tool, | 299 | return 0; |
287 | union perf_event *raw_event __used, void *data, | ||
288 | int cpu, u64 timestamp, struct thread *thread) | ||
289 | { | ||
290 | struct perf_kmem *kmem = container_of(tool, struct perf_kmem, tool); | ||
291 | struct event_format *event; | ||
292 | int type; | ||
293 | |||
294 | type = trace_parse_common_type(kmem->session->pevent, data); | ||
295 | event = pevent_find_event(kmem->session->pevent, type); | ||
296 | |||
297 | if (!strcmp(event->name, "kmalloc") || | ||
298 | !strcmp(event->name, "kmem_cache_alloc")) { | ||
299 | process_alloc_event(data, event, cpu, timestamp, thread, 0); | ||
300 | return; | ||
301 | } | ||
302 | |||
303 | if (!strcmp(event->name, "kmalloc_node") || | ||
304 | !strcmp(event->name, "kmem_cache_alloc_node")) { | ||
305 | process_alloc_event(data, event, cpu, timestamp, thread, 1); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | if (!strcmp(event->name, "kfree") || | ||
310 | !strcmp(event->name, "kmem_cache_free")) { | ||
311 | process_free_event(data, event, cpu, timestamp, thread); | ||
312 | return; | ||
313 | } | ||
314 | } | 300 | } |
315 | 301 | ||
316 | static int process_sample_event(struct perf_tool *tool, | 302 | typedef int (*tracepoint_handler)(struct perf_evsel *evsel, |
303 | struct perf_sample *sample); | ||
304 | |||
305 | static int process_sample_event(struct perf_tool *tool __maybe_unused, | ||
317 | union perf_event *event, | 306 | union perf_event *event, |
318 | struct perf_sample *sample, | 307 | struct perf_sample *sample, |
319 | struct perf_evsel *evsel __used, | 308 | struct perf_evsel *evsel, |
320 | struct machine *machine) | 309 | struct machine *machine) |
321 | { | 310 | { |
322 | struct thread *thread = machine__findnew_thread(machine, event->ip.pid); | 311 | struct thread *thread = machine__findnew_thread(machine, event->ip.pid); |
@@ -329,18 +318,18 @@ static int process_sample_event(struct perf_tool *tool, | |||
329 | 318 | ||
330 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | 319 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); |
331 | 320 | ||
332 | process_raw_event(tool, event, sample->raw_data, sample->cpu, | 321 | if (evsel->handler.func != NULL) { |
333 | sample->time, thread); | 322 | tracepoint_handler f = evsel->handler.func; |
323 | return f(evsel, sample); | ||
324 | } | ||
334 | 325 | ||
335 | return 0; | 326 | return 0; |
336 | } | 327 | } |
337 | 328 | ||
338 | static struct perf_kmem perf_kmem = { | 329 | static struct perf_tool perf_kmem = { |
339 | .tool = { | 330 | .sample = process_sample_event, |
340 | .sample = process_sample_event, | 331 | .comm = perf_event__process_comm, |
341 | .comm = perf_event__process_comm, | 332 | .ordered_samples = true, |
342 | .ordered_samples = true, | ||
343 | }, | ||
344 | }; | 333 | }; |
345 | 334 | ||
346 | static double fragmentation(unsigned long n_req, unsigned long n_alloc) | 335 | static double fragmentation(unsigned long n_req, unsigned long n_alloc) |
@@ -496,22 +485,32 @@ static int __cmd_kmem(void) | |||
496 | { | 485 | { |
497 | int err = -EINVAL; | 486 | int err = -EINVAL; |
498 | struct perf_session *session; | 487 | struct perf_session *session; |
499 | 488 | const struct perf_evsel_str_handler kmem_tracepoints[] = { | |
500 | session = perf_session__new(input_name, O_RDONLY, 0, false, | 489 | { "kmem:kmalloc", perf_evsel__process_alloc_event, }, |
501 | &perf_kmem.tool); | 490 | { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, }, |
491 | { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, }, | ||
492 | { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, }, | ||
493 | { "kmem:kfree", perf_evsel__process_free_event, }, | ||
494 | { "kmem:kmem_cache_free", perf_evsel__process_free_event, }, | ||
495 | }; | ||
496 | |||
497 | session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem); | ||
502 | if (session == NULL) | 498 | if (session == NULL) |
503 | return -ENOMEM; | 499 | return -ENOMEM; |
504 | 500 | ||
505 | perf_kmem.session = session; | ||
506 | |||
507 | if (perf_session__create_kernel_maps(session) < 0) | 501 | if (perf_session__create_kernel_maps(session) < 0) |
508 | goto out_delete; | 502 | goto out_delete; |
509 | 503 | ||
510 | if (!perf_session__has_traces(session, "kmem record")) | 504 | if (!perf_session__has_traces(session, "kmem record")) |
511 | goto out_delete; | 505 | goto out_delete; |
512 | 506 | ||
507 | if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) { | ||
508 | pr_err("Initializing perf session tracepoint handlers failed\n"); | ||
509 | return -1; | ||
510 | } | ||
511 | |||
513 | setup_pager(); | 512 | setup_pager(); |
514 | err = perf_session__process_events(session, &perf_kmem.tool); | 513 | err = perf_session__process_events(session, &perf_kmem); |
515 | if (err != 0) | 514 | if (err != 0) |
516 | goto out_delete; | 515 | goto out_delete; |
517 | sort_result(); | 516 | sort_result(); |
@@ -635,8 +634,10 @@ static int sort_dimension__add(const char *tok, struct list_head *list) | |||
635 | for (i = 0; i < NUM_AVAIL_SORTS; i++) { | 634 | for (i = 0; i < NUM_AVAIL_SORTS; i++) { |
636 | if (!strcmp(avail_sorts[i]->name, tok)) { | 635 | if (!strcmp(avail_sorts[i]->name, tok)) { |
637 | sort = malloc(sizeof(*sort)); | 636 | sort = malloc(sizeof(*sort)); |
638 | if (!sort) | 637 | if (!sort) { |
639 | die("malloc"); | 638 | pr_err("%s: malloc failed\n", __func__); |
639 | return -1; | ||
640 | } | ||
640 | memcpy(sort, avail_sorts[i], sizeof(*sort)); | 641 | memcpy(sort, avail_sorts[i], sizeof(*sort)); |
641 | list_add_tail(&sort->list, list); | 642 | list_add_tail(&sort->list, list); |
642 | return 0; | 643 | return 0; |
@@ -651,8 +652,10 @@ static int setup_sorting(struct list_head *sort_list, const char *arg) | |||
651 | char *tok; | 652 | char *tok; |
652 | char *str = strdup(arg); | 653 | char *str = strdup(arg); |
653 | 654 | ||
654 | if (!str) | 655 | if (!str) { |
655 | die("strdup"); | 656 | pr_err("%s: strdup failed\n", __func__); |
657 | return -1; | ||
658 | } | ||
656 | 659 | ||
657 | while (true) { | 660 | while (true) { |
658 | tok = strsep(&str, ","); | 661 | tok = strsep(&str, ","); |
@@ -669,8 +672,8 @@ static int setup_sorting(struct list_head *sort_list, const char *arg) | |||
669 | return 0; | 672 | return 0; |
670 | } | 673 | } |
671 | 674 | ||
672 | static int parse_sort_opt(const struct option *opt __used, | 675 | static int parse_sort_opt(const struct option *opt __maybe_unused, |
673 | const char *arg, int unset __used) | 676 | const char *arg, int unset __maybe_unused) |
674 | { | 677 | { |
675 | if (!arg) | 678 | if (!arg) |
676 | return -1; | 679 | return -1; |
@@ -683,22 +686,24 @@ static int parse_sort_opt(const struct option *opt __used, | |||
683 | return 0; | 686 | return 0; |
684 | } | 687 | } |
685 | 688 | ||
686 | static int parse_caller_opt(const struct option *opt __used, | 689 | static int parse_caller_opt(const struct option *opt __maybe_unused, |
687 | const char *arg __used, int unset __used) | 690 | const char *arg __maybe_unused, |
691 | int unset __maybe_unused) | ||
688 | { | 692 | { |
689 | caller_flag = (alloc_flag + 1); | 693 | caller_flag = (alloc_flag + 1); |
690 | return 0; | 694 | return 0; |
691 | } | 695 | } |
692 | 696 | ||
693 | static int parse_alloc_opt(const struct option *opt __used, | 697 | static int parse_alloc_opt(const struct option *opt __maybe_unused, |
694 | const char *arg __used, int unset __used) | 698 | const char *arg __maybe_unused, |
699 | int unset __maybe_unused) | ||
695 | { | 700 | { |
696 | alloc_flag = (caller_flag + 1); | 701 | alloc_flag = (caller_flag + 1); |
697 | return 0; | 702 | return 0; |
698 | } | 703 | } |
699 | 704 | ||
700 | static int parse_line_opt(const struct option *opt __used, | 705 | static int parse_line_opt(const struct option *opt __maybe_unused, |
701 | const char *arg, int unset __used) | 706 | const char *arg, int unset __maybe_unused) |
702 | { | 707 | { |
703 | int lines; | 708 | int lines; |
704 | 709 | ||
@@ -768,7 +773,7 @@ static int __cmd_record(int argc, const char **argv) | |||
768 | return cmd_record(i, rec_argv, NULL); | 773 | return cmd_record(i, rec_argv, NULL); |
769 | } | 774 | } |
770 | 775 | ||
771 | int cmd_kmem(int argc, const char **argv, const char *prefix __used) | 776 | int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) |
772 | { | 777 | { |
773 | argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); | 778 | argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); |
774 | 779 | ||
@@ -780,7 +785,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used) | |||
780 | if (!strncmp(argv[0], "rec", 3)) { | 785 | if (!strncmp(argv[0], "rec", 3)) { |
781 | return __cmd_record(argc, argv); | 786 | return __cmd_record(argc, argv); |
782 | } else if (!strcmp(argv[0], "stat")) { | 787 | } else if (!strcmp(argv[0], "stat")) { |
783 | setup_cpunode_map(); | 788 | if (setup_cpunode_map()) |
789 | return -1; | ||
784 | 790 | ||
785 | if (list_empty(&caller_sort)) | 791 | if (list_empty(&caller_sort)) |
786 | setup_sorting(&caller_sort, default_sort_order); | 792 | setup_sorting(&caller_sort, default_sort_order); |
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 9fc6e0fa3dce..a28c9cad9048 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include "builtin.h" | 1 | #include "builtin.h" |
2 | #include "perf.h" | 2 | #include "perf.h" |
3 | 3 | ||
4 | #include "util/evsel.h" | ||
4 | #include "util/util.h" | 5 | #include "util/util.h" |
5 | #include "util/cache.h" | 6 | #include "util/cache.h" |
6 | #include "util/symbol.h" | 7 | #include "util/symbol.h" |
@@ -10,8 +11,10 @@ | |||
10 | 11 | ||
11 | #include "util/parse-options.h" | 12 | #include "util/parse-options.h" |
12 | #include "util/trace-event.h" | 13 | #include "util/trace-event.h" |
13 | |||
14 | #include "util/debug.h" | 14 | #include "util/debug.h" |
15 | #include "util/debugfs.h" | ||
16 | #include "util/tool.h" | ||
17 | #include "util/stat.h" | ||
15 | 18 | ||
16 | #include <sys/prctl.h> | 19 | #include <sys/prctl.h> |
17 | 20 | ||
@@ -19,11 +22,836 @@ | |||
19 | #include <pthread.h> | 22 | #include <pthread.h> |
20 | #include <math.h> | 23 | #include <math.h> |
21 | 24 | ||
22 | static const char *file_name; | 25 | #include "../../arch/x86/include/asm/svm.h" |
26 | #include "../../arch/x86/include/asm/vmx.h" | ||
27 | #include "../../arch/x86/include/asm/kvm.h" | ||
28 | |||
29 | struct event_key { | ||
30 | #define INVALID_KEY (~0ULL) | ||
31 | u64 key; | ||
32 | int info; | ||
33 | }; | ||
34 | |||
35 | struct kvm_events_ops { | ||
36 | bool (*is_begin_event)(struct perf_evsel *evsel, | ||
37 | struct perf_sample *sample, | ||
38 | struct event_key *key); | ||
39 | bool (*is_end_event)(struct perf_evsel *evsel, | ||
40 | struct perf_sample *sample, struct event_key *key); | ||
41 | void (*decode_key)(struct event_key *key, char decode[20]); | ||
42 | const char *name; | ||
43 | }; | ||
44 | |||
45 | static void exit_event_get_key(struct perf_evsel *evsel, | ||
46 | struct perf_sample *sample, | ||
47 | struct event_key *key) | ||
48 | { | ||
49 | key->info = 0; | ||
50 | key->key = perf_evsel__intval(evsel, sample, "exit_reason"); | ||
51 | } | ||
52 | |||
53 | static bool kvm_exit_event(struct perf_evsel *evsel) | ||
54 | { | ||
55 | return !strcmp(evsel->name, "kvm:kvm_exit"); | ||
56 | } | ||
57 | |||
58 | static bool exit_event_begin(struct perf_evsel *evsel, | ||
59 | struct perf_sample *sample, struct event_key *key) | ||
60 | { | ||
61 | if (kvm_exit_event(evsel)) { | ||
62 | exit_event_get_key(evsel, sample, key); | ||
63 | return true; | ||
64 | } | ||
65 | |||
66 | return false; | ||
67 | } | ||
68 | |||
69 | static bool kvm_entry_event(struct perf_evsel *evsel) | ||
70 | { | ||
71 | return !strcmp(evsel->name, "kvm:kvm_entry"); | ||
72 | } | ||
73 | |||
74 | static bool exit_event_end(struct perf_evsel *evsel, | ||
75 | struct perf_sample *sample __maybe_unused, | ||
76 | struct event_key *key __maybe_unused) | ||
77 | { | ||
78 | return kvm_entry_event(evsel); | ||
79 | } | ||
80 | |||
81 | struct exit_reasons_table { | ||
82 | unsigned long exit_code; | ||
83 | const char *reason; | ||
84 | }; | ||
85 | |||
86 | struct exit_reasons_table vmx_exit_reasons[] = { | ||
87 | VMX_EXIT_REASONS | ||
88 | }; | ||
89 | |||
90 | struct exit_reasons_table svm_exit_reasons[] = { | ||
91 | SVM_EXIT_REASONS | ||
92 | }; | ||
93 | |||
94 | static int cpu_isa; | ||
95 | |||
96 | static const char *get_exit_reason(u64 exit_code) | ||
97 | { | ||
98 | int table_size = ARRAY_SIZE(svm_exit_reasons); | ||
99 | struct exit_reasons_table *table = svm_exit_reasons; | ||
100 | |||
101 | if (cpu_isa == 1) { | ||
102 | table = vmx_exit_reasons; | ||
103 | table_size = ARRAY_SIZE(vmx_exit_reasons); | ||
104 | } | ||
105 | |||
106 | while (table_size--) { | ||
107 | if (table->exit_code == exit_code) | ||
108 | return table->reason; | ||
109 | table++; | ||
110 | } | ||
111 | |||
112 | pr_err("unknown kvm exit code:%lld on %s\n", | ||
113 | (unsigned long long)exit_code, cpu_isa ? "VMX" : "SVM"); | ||
114 | return "UNKNOWN"; | ||
115 | } | ||
116 | |||
117 | static void exit_event_decode_key(struct event_key *key, char decode[20]) | ||
118 | { | ||
119 | const char *exit_reason = get_exit_reason(key->key); | ||
120 | |||
121 | scnprintf(decode, 20, "%s", exit_reason); | ||
122 | } | ||
123 | |||
124 | static struct kvm_events_ops exit_events = { | ||
125 | .is_begin_event = exit_event_begin, | ||
126 | .is_end_event = exit_event_end, | ||
127 | .decode_key = exit_event_decode_key, | ||
128 | .name = "VM-EXIT" | ||
129 | }; | ||
130 | |||
131 | /* | ||
132 | * For the mmio events, we treat: | ||
133 | * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry | ||
134 | * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...). | ||
135 | */ | ||
136 | static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, | ||
137 | struct event_key *key) | ||
138 | { | ||
139 | key->key = perf_evsel__intval(evsel, sample, "gpa"); | ||
140 | key->info = perf_evsel__intval(evsel, sample, "type"); | ||
141 | } | ||
142 | |||
143 | #define KVM_TRACE_MMIO_READ_UNSATISFIED 0 | ||
144 | #define KVM_TRACE_MMIO_READ 1 | ||
145 | #define KVM_TRACE_MMIO_WRITE 2 | ||
146 | |||
147 | static bool mmio_event_begin(struct perf_evsel *evsel, | ||
148 | struct perf_sample *sample, struct event_key *key) | ||
149 | { | ||
150 | /* MMIO read begin event in kernel. */ | ||
151 | if (kvm_exit_event(evsel)) | ||
152 | return true; | ||
153 | |||
154 | /* MMIO write begin event in kernel. */ | ||
155 | if (!strcmp(evsel->name, "kvm:kvm_mmio") && | ||
156 | perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { | ||
157 | mmio_event_get_key(evsel, sample, key); | ||
158 | return true; | ||
159 | } | ||
160 | |||
161 | return false; | ||
162 | } | ||
163 | |||
164 | static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample, | ||
165 | struct event_key *key) | ||
166 | { | ||
167 | /* MMIO write end event in kernel. */ | ||
168 | if (kvm_entry_event(evsel)) | ||
169 | return true; | ||
170 | |||
171 | /* MMIO read end event in kernel.*/ | ||
172 | if (!strcmp(evsel->name, "kvm:kvm_mmio") && | ||
173 | perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { | ||
174 | mmio_event_get_key(evsel, sample, key); | ||
175 | return true; | ||
176 | } | ||
177 | |||
178 | return false; | ||
179 | } | ||
180 | |||
181 | static void mmio_event_decode_key(struct event_key *key, char decode[20]) | ||
182 | { | ||
183 | scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key, | ||
184 | key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R"); | ||
185 | } | ||
186 | |||
187 | static struct kvm_events_ops mmio_events = { | ||
188 | .is_begin_event = mmio_event_begin, | ||
189 | .is_end_event = mmio_event_end, | ||
190 | .decode_key = mmio_event_decode_key, | ||
191 | .name = "MMIO Access" | ||
192 | }; | ||
193 | |||
194 | /* The time of emulation pio access is from kvm_pio to kvm_entry. */ | ||
195 | static void ioport_event_get_key(struct perf_evsel *evsel, | ||
196 | struct perf_sample *sample, | ||
197 | struct event_key *key) | ||
198 | { | ||
199 | key->key = perf_evsel__intval(evsel, sample, "port"); | ||
200 | key->info = perf_evsel__intval(evsel, sample, "rw"); | ||
201 | } | ||
202 | |||
203 | static bool ioport_event_begin(struct perf_evsel *evsel, | ||
204 | struct perf_sample *sample, | ||
205 | struct event_key *key) | ||
206 | { | ||
207 | if (!strcmp(evsel->name, "kvm:kvm_pio")) { | ||
208 | ioport_event_get_key(evsel, sample, key); | ||
209 | return true; | ||
210 | } | ||
211 | |||
212 | return false; | ||
213 | } | ||
214 | |||
215 | static bool ioport_event_end(struct perf_evsel *evsel, | ||
216 | struct perf_sample *sample __maybe_unused, | ||
217 | struct event_key *key __maybe_unused) | ||
218 | { | ||
219 | return kvm_entry_event(evsel); | ||
220 | } | ||
221 | |||
222 | static void ioport_event_decode_key(struct event_key *key, char decode[20]) | ||
223 | { | ||
224 | scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key, | ||
225 | key->info ? "POUT" : "PIN"); | ||
226 | } | ||
227 | |||
228 | static struct kvm_events_ops ioport_events = { | ||
229 | .is_begin_event = ioport_event_begin, | ||
230 | .is_end_event = ioport_event_end, | ||
231 | .decode_key = ioport_event_decode_key, | ||
232 | .name = "IO Port Access" | ||
233 | }; | ||
234 | |||
235 | static const char *report_event = "vmexit"; | ||
236 | struct kvm_events_ops *events_ops; | ||
237 | |||
238 | static bool register_kvm_events_ops(void) | ||
239 | { | ||
240 | bool ret = true; | ||
241 | |||
242 | if (!strcmp(report_event, "vmexit")) | ||
243 | events_ops = &exit_events; | ||
244 | else if (!strcmp(report_event, "mmio")) | ||
245 | events_ops = &mmio_events; | ||
246 | else if (!strcmp(report_event, "ioport")) | ||
247 | events_ops = &ioport_events; | ||
248 | else { | ||
249 | pr_err("Unknown report event:%s\n", report_event); | ||
250 | ret = false; | ||
251 | } | ||
252 | |||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | struct kvm_event_stats { | ||
257 | u64 time; | ||
258 | struct stats stats; | ||
259 | }; | ||
260 | |||
261 | struct kvm_event { | ||
262 | struct list_head hash_entry; | ||
263 | struct rb_node rb; | ||
264 | |||
265 | struct event_key key; | ||
266 | |||
267 | struct kvm_event_stats total; | ||
268 | |||
269 | #define DEFAULT_VCPU_NUM 8 | ||
270 | int max_vcpu; | ||
271 | struct kvm_event_stats *vcpu; | ||
272 | }; | ||
273 | |||
274 | struct vcpu_event_record { | ||
275 | int vcpu_id; | ||
276 | u64 start_time; | ||
277 | struct kvm_event *last_event; | ||
278 | }; | ||
279 | |||
280 | #define EVENTS_BITS 12 | ||
281 | #define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS) | ||
282 | |||
283 | static u64 total_time; | ||
284 | static u64 total_count; | ||
285 | static struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; | ||
286 | |||
287 | static void init_kvm_event_record(void) | ||
288 | { | ||
289 | int i; | ||
290 | |||
291 | for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++) | ||
292 | INIT_LIST_HEAD(&kvm_events_cache[i]); | ||
293 | } | ||
294 | |||
295 | static int kvm_events_hash_fn(u64 key) | ||
296 | { | ||
297 | return key & (EVENTS_CACHE_SIZE - 1); | ||
298 | } | ||
299 | |||
300 | static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) | ||
301 | { | ||
302 | int old_max_vcpu = event->max_vcpu; | ||
303 | |||
304 | if (vcpu_id < event->max_vcpu) | ||
305 | return true; | ||
306 | |||
307 | while (event->max_vcpu <= vcpu_id) | ||
308 | event->max_vcpu += DEFAULT_VCPU_NUM; | ||
309 | |||
310 | event->vcpu = realloc(event->vcpu, | ||
311 | event->max_vcpu * sizeof(*event->vcpu)); | ||
312 | if (!event->vcpu) { | ||
313 | pr_err("Not enough memory\n"); | ||
314 | return false; | ||
315 | } | ||
316 | |||
317 | memset(event->vcpu + old_max_vcpu, 0, | ||
318 | (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); | ||
319 | return true; | ||
320 | } | ||
321 | |||
322 | static struct kvm_event *kvm_alloc_init_event(struct event_key *key) | ||
323 | { | ||
324 | struct kvm_event *event; | ||
325 | |||
326 | event = zalloc(sizeof(*event)); | ||
327 | if (!event) { | ||
328 | pr_err("Not enough memory\n"); | ||
329 | return NULL; | ||
330 | } | ||
331 | |||
332 | event->key = *key; | ||
333 | return event; | ||
334 | } | ||
335 | |||
336 | static struct kvm_event *find_create_kvm_event(struct event_key *key) | ||
337 | { | ||
338 | struct kvm_event *event; | ||
339 | struct list_head *head; | ||
340 | |||
341 | BUG_ON(key->key == INVALID_KEY); | ||
342 | |||
343 | head = &kvm_events_cache[kvm_events_hash_fn(key->key)]; | ||
344 | list_for_each_entry(event, head, hash_entry) | ||
345 | if (event->key.key == key->key && event->key.info == key->info) | ||
346 | return event; | ||
347 | |||
348 | event = kvm_alloc_init_event(key); | ||
349 | if (!event) | ||
350 | return NULL; | ||
351 | |||
352 | list_add(&event->hash_entry, head); | ||
353 | return event; | ||
354 | } | ||
355 | |||
356 | static bool handle_begin_event(struct vcpu_event_record *vcpu_record, | ||
357 | struct event_key *key, u64 timestamp) | ||
358 | { | ||
359 | struct kvm_event *event = NULL; | ||
360 | |||
361 | if (key->key != INVALID_KEY) | ||
362 | event = find_create_kvm_event(key); | ||
363 | |||
364 | vcpu_record->last_event = event; | ||
365 | vcpu_record->start_time = timestamp; | ||
366 | return true; | ||
367 | } | ||
368 | |||
369 | static void | ||
370 | kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff) | ||
371 | { | ||
372 | kvm_stats->time += time_diff; | ||
373 | update_stats(&kvm_stats->stats, time_diff); | ||
374 | } | ||
375 | |||
376 | static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) | ||
377 | { | ||
378 | struct kvm_event_stats *kvm_stats = &event->total; | ||
379 | |||
380 | if (vcpu_id != -1) | ||
381 | kvm_stats = &event->vcpu[vcpu_id]; | ||
382 | |||
383 | return rel_stddev_stats(stddev_stats(&kvm_stats->stats), | ||
384 | avg_stats(&kvm_stats->stats)); | ||
385 | } | ||
386 | |||
387 | static bool update_kvm_event(struct kvm_event *event, int vcpu_id, | ||
388 | u64 time_diff) | ||
389 | { | ||
390 | kvm_update_event_stats(&event->total, time_diff); | ||
391 | |||
392 | if (!kvm_event_expand(event, vcpu_id)) | ||
393 | return false; | ||
394 | |||
395 | kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); | ||
396 | return true; | ||
397 | } | ||
398 | |||
399 | static bool handle_end_event(struct vcpu_event_record *vcpu_record, | ||
400 | struct event_key *key, u64 timestamp) | ||
401 | { | ||
402 | struct kvm_event *event; | ||
403 | u64 time_begin, time_diff; | ||
404 | |||
405 | event = vcpu_record->last_event; | ||
406 | time_begin = vcpu_record->start_time; | ||
407 | |||
408 | /* The begin event is not caught. */ | ||
409 | if (!time_begin) | ||
410 | return true; | ||
411 | |||
412 | /* | ||
413 | * In some case, the 'begin event' only records the start timestamp, | ||
414 | * the actual event is recognized in the 'end event' (e.g. mmio-event). | ||
415 | */ | ||
416 | |||
417 | /* Both begin and end events did not get the key. */ | ||
418 | if (!event && key->key == INVALID_KEY) | ||
419 | return true; | ||
420 | |||
421 | if (!event) | ||
422 | event = find_create_kvm_event(key); | ||
423 | |||
424 | if (!event) | ||
425 | return false; | ||
426 | |||
427 | vcpu_record->last_event = NULL; | ||
428 | vcpu_record->start_time = 0; | ||
429 | |||
430 | BUG_ON(timestamp < time_begin); | ||
431 | |||
432 | time_diff = timestamp - time_begin; | ||
433 | return update_kvm_event(event, vcpu_record->vcpu_id, time_diff); | ||
434 | } | ||
435 | |||
436 | static | ||
437 | struct vcpu_event_record *per_vcpu_record(struct thread *thread, | ||
438 | struct perf_evsel *evsel, | ||
439 | struct perf_sample *sample) | ||
440 | { | ||
441 | /* Only kvm_entry records vcpu id. */ | ||
442 | if (!thread->priv && kvm_entry_event(evsel)) { | ||
443 | struct vcpu_event_record *vcpu_record; | ||
444 | |||
445 | vcpu_record = zalloc(sizeof(*vcpu_record)); | ||
446 | if (!vcpu_record) { | ||
447 | pr_err("%s: Not enough memory\n", __func__); | ||
448 | return NULL; | ||
449 | } | ||
450 | |||
451 | vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id"); | ||
452 | thread->priv = vcpu_record; | ||
453 | } | ||
454 | |||
455 | return thread->priv; | ||
456 | } | ||
457 | |||
458 | static bool handle_kvm_event(struct thread *thread, struct perf_evsel *evsel, | ||
459 | struct perf_sample *sample) | ||
460 | { | ||
461 | struct vcpu_event_record *vcpu_record; | ||
462 | struct event_key key = {.key = INVALID_KEY}; | ||
463 | |||
464 | vcpu_record = per_vcpu_record(thread, evsel, sample); | ||
465 | if (!vcpu_record) | ||
466 | return true; | ||
467 | |||
468 | if (events_ops->is_begin_event(evsel, sample, &key)) | ||
469 | return handle_begin_event(vcpu_record, &key, sample->time); | ||
470 | |||
471 | if (events_ops->is_end_event(evsel, sample, &key)) | ||
472 | return handle_end_event(vcpu_record, &key, sample->time); | ||
473 | |||
474 | return true; | ||
475 | } | ||
476 | |||
477 | typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int); | ||
478 | struct kvm_event_key { | ||
479 | const char *name; | ||
480 | key_cmp_fun key; | ||
481 | }; | ||
482 | |||
483 | static int trace_vcpu = -1; | ||
484 | #define GET_EVENT_KEY(func, field) \ | ||
485 | static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \ | ||
486 | { \ | ||
487 | if (vcpu == -1) \ | ||
488 | return event->total.field; \ | ||
489 | \ | ||
490 | if (vcpu >= event->max_vcpu) \ | ||
491 | return 0; \ | ||
492 | \ | ||
493 | return event->vcpu[vcpu].field; \ | ||
494 | } | ||
495 | |||
496 | #define COMPARE_EVENT_KEY(func, field) \ | ||
497 | GET_EVENT_KEY(func, field) \ | ||
498 | static int compare_kvm_event_ ## func(struct kvm_event *one, \ | ||
499 | struct kvm_event *two, int vcpu)\ | ||
500 | { \ | ||
501 | return get_event_ ##func(one, vcpu) > \ | ||
502 | get_event_ ##func(two, vcpu); \ | ||
503 | } | ||
504 | |||
505 | GET_EVENT_KEY(time, time); | ||
506 | COMPARE_EVENT_KEY(count, stats.n); | ||
507 | COMPARE_EVENT_KEY(mean, stats.mean); | ||
508 | |||
509 | #define DEF_SORT_NAME_KEY(name, compare_key) \ | ||
510 | { #name, compare_kvm_event_ ## compare_key } | ||
511 | |||
512 | static struct kvm_event_key keys[] = { | ||
513 | DEF_SORT_NAME_KEY(sample, count), | ||
514 | DEF_SORT_NAME_KEY(time, mean), | ||
515 | { NULL, NULL } | ||
516 | }; | ||
517 | |||
518 | static const char *sort_key = "sample"; | ||
519 | static key_cmp_fun compare; | ||
520 | |||
521 | static bool select_key(void) | ||
522 | { | ||
523 | int i; | ||
524 | |||
525 | for (i = 0; keys[i].name; i++) { | ||
526 | if (!strcmp(keys[i].name, sort_key)) { | ||
527 | compare = keys[i].key; | ||
528 | return true; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | pr_err("Unknown compare key:%s\n", sort_key); | ||
533 | return false; | ||
534 | } | ||
535 | |||
536 | static struct rb_root result; | ||
537 | static void insert_to_result(struct kvm_event *event, key_cmp_fun bigger, | ||
538 | int vcpu) | ||
539 | { | ||
540 | struct rb_node **rb = &result.rb_node; | ||
541 | struct rb_node *parent = NULL; | ||
542 | struct kvm_event *p; | ||
543 | |||
544 | while (*rb) { | ||
545 | p = container_of(*rb, struct kvm_event, rb); | ||
546 | parent = *rb; | ||
547 | |||
548 | if (bigger(event, p, vcpu)) | ||
549 | rb = &(*rb)->rb_left; | ||
550 | else | ||
551 | rb = &(*rb)->rb_right; | ||
552 | } | ||
553 | |||
554 | rb_link_node(&event->rb, parent, rb); | ||
555 | rb_insert_color(&event->rb, &result); | ||
556 | } | ||
557 | |||
558 | static void update_total_count(struct kvm_event *event, int vcpu) | ||
559 | { | ||
560 | total_count += get_event_count(event, vcpu); | ||
561 | total_time += get_event_time(event, vcpu); | ||
562 | } | ||
563 | |||
564 | static bool event_is_valid(struct kvm_event *event, int vcpu) | ||
565 | { | ||
566 | return !!get_event_count(event, vcpu); | ||
567 | } | ||
568 | |||
569 | static void sort_result(int vcpu) | ||
570 | { | ||
571 | unsigned int i; | ||
572 | struct kvm_event *event; | ||
573 | |||
574 | for (i = 0; i < EVENTS_CACHE_SIZE; i++) | ||
575 | list_for_each_entry(event, &kvm_events_cache[i], hash_entry) | ||
576 | if (event_is_valid(event, vcpu)) { | ||
577 | update_total_count(event, vcpu); | ||
578 | insert_to_result(event, compare, vcpu); | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /* returns left most element of result, and erase it */ | ||
583 | static struct kvm_event *pop_from_result(void) | ||
584 | { | ||
585 | struct rb_node *node = rb_first(&result); | ||
586 | |||
587 | if (!node) | ||
588 | return NULL; | ||
589 | |||
590 | rb_erase(node, &result); | ||
591 | return container_of(node, struct kvm_event, rb); | ||
592 | } | ||
593 | |||
594 | static void print_vcpu_info(int vcpu) | ||
595 | { | ||
596 | pr_info("Analyze events for "); | ||
597 | |||
598 | if (vcpu == -1) | ||
599 | pr_info("all VCPUs:\n\n"); | ||
600 | else | ||
601 | pr_info("VCPU %d:\n\n", vcpu); | ||
602 | } | ||
603 | |||
604 | static void print_result(int vcpu) | ||
605 | { | ||
606 | char decode[20]; | ||
607 | struct kvm_event *event; | ||
608 | |||
609 | pr_info("\n\n"); | ||
610 | print_vcpu_info(vcpu); | ||
611 | pr_info("%20s ", events_ops->name); | ||
612 | pr_info("%10s ", "Samples"); | ||
613 | pr_info("%9s ", "Samples%"); | ||
614 | |||
615 | pr_info("%9s ", "Time%"); | ||
616 | pr_info("%16s ", "Avg time"); | ||
617 | pr_info("\n\n"); | ||
618 | |||
619 | while ((event = pop_from_result())) { | ||
620 | u64 ecount, etime; | ||
621 | |||
622 | ecount = get_event_count(event, vcpu); | ||
623 | etime = get_event_time(event, vcpu); | ||
624 | |||
625 | events_ops->decode_key(&event->key, decode); | ||
626 | pr_info("%20s ", decode); | ||
627 | pr_info("%10llu ", (unsigned long long)ecount); | ||
628 | pr_info("%8.2f%% ", (double)ecount / total_count * 100); | ||
629 | pr_info("%8.2f%% ", (double)etime / total_time * 100); | ||
630 | pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3, | ||
631 | kvm_event_rel_stddev(vcpu, event)); | ||
632 | pr_info("\n"); | ||
633 | } | ||
634 | |||
635 | pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n", | ||
636 | (unsigned long long)total_count, total_time / 1e3); | ||
637 | } | ||
638 | |||
639 | static int process_sample_event(struct perf_tool *tool __maybe_unused, | ||
640 | union perf_event *event, | ||
641 | struct perf_sample *sample, | ||
642 | struct perf_evsel *evsel, | ||
643 | struct machine *machine) | ||
644 | { | ||
645 | struct thread *thread = machine__findnew_thread(machine, sample->tid); | ||
646 | |||
647 | if (thread == NULL) { | ||
648 | pr_debug("problem processing %d event, skipping it.\n", | ||
649 | event->header.type); | ||
650 | return -1; | ||
651 | } | ||
652 | |||
653 | if (!handle_kvm_event(thread, evsel, sample)) | ||
654 | return -1; | ||
655 | |||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | static struct perf_tool eops = { | ||
660 | .sample = process_sample_event, | ||
661 | .comm = perf_event__process_comm, | ||
662 | .ordered_samples = true, | ||
663 | }; | ||
664 | |||
665 | static int get_cpu_isa(struct perf_session *session) | ||
666 | { | ||
667 | char *cpuid = session->header.env.cpuid; | ||
668 | int isa; | ||
669 | |||
670 | if (strstr(cpuid, "Intel")) | ||
671 | isa = 1; | ||
672 | else if (strstr(cpuid, "AMD")) | ||
673 | isa = 0; | ||
674 | else { | ||
675 | pr_err("CPU %s is not supported.\n", cpuid); | ||
676 | isa = -ENOTSUP; | ||
677 | } | ||
678 | |||
679 | return isa; | ||
680 | } | ||
681 | |||
682 | static const char *file_name; | ||
683 | |||
684 | static int read_events(void) | ||
685 | { | ||
686 | struct perf_session *kvm_session; | ||
687 | int ret; | ||
688 | |||
689 | kvm_session = perf_session__new(file_name, O_RDONLY, 0, false, &eops); | ||
690 | if (!kvm_session) { | ||
691 | pr_err("Initializing perf session failed\n"); | ||
692 | return -EINVAL; | ||
693 | } | ||
694 | |||
695 | if (!perf_session__has_traces(kvm_session, "kvm record")) | ||
696 | return -EINVAL; | ||
697 | |||
698 | /* | ||
699 | * Do not use 'isa' recorded in kvm_exit tracepoint since it is not | ||
700 | * traced in the old kernel. | ||
701 | */ | ||
702 | ret = get_cpu_isa(kvm_session); | ||
703 | |||
704 | if (ret < 0) | ||
705 | return ret; | ||
706 | |||
707 | cpu_isa = ret; | ||
708 | |||
709 | return perf_session__process_events(kvm_session, &eops); | ||
710 | } | ||
711 | |||
712 | static bool verify_vcpu(int vcpu) | ||
713 | { | ||
714 | if (vcpu != -1 && vcpu < 0) { | ||
715 | pr_err("Invalid vcpu:%d.\n", vcpu); | ||
716 | return false; | ||
717 | } | ||
718 | |||
719 | return true; | ||
720 | } | ||
721 | |||
722 | static int kvm_events_report_vcpu(int vcpu) | ||
723 | { | ||
724 | int ret = -EINVAL; | ||
725 | |||
726 | if (!verify_vcpu(vcpu)) | ||
727 | goto exit; | ||
728 | |||
729 | if (!select_key()) | ||
730 | goto exit; | ||
731 | |||
732 | if (!register_kvm_events_ops()) | ||
733 | goto exit; | ||
734 | |||
735 | init_kvm_event_record(); | ||
736 | setup_pager(); | ||
737 | |||
738 | ret = read_events(); | ||
739 | if (ret) | ||
740 | goto exit; | ||
741 | |||
742 | sort_result(vcpu); | ||
743 | print_result(vcpu); | ||
744 | exit: | ||
745 | return ret; | ||
746 | } | ||
747 | |||
748 | static const char * const record_args[] = { | ||
749 | "record", | ||
750 | "-R", | ||
751 | "-f", | ||
752 | "-m", "1024", | ||
753 | "-c", "1", | ||
754 | "-e", "kvm:kvm_entry", | ||
755 | "-e", "kvm:kvm_exit", | ||
756 | "-e", "kvm:kvm_mmio", | ||
757 | "-e", "kvm:kvm_pio", | ||
758 | }; | ||
759 | |||
760 | #define STRDUP_FAIL_EXIT(s) \ | ||
761 | ({ char *_p; \ | ||
762 | _p = strdup(s); \ | ||
763 | if (!_p) \ | ||
764 | return -ENOMEM; \ | ||
765 | _p; \ | ||
766 | }) | ||
767 | |||
768 | static int kvm_events_record(int argc, const char **argv) | ||
769 | { | ||
770 | unsigned int rec_argc, i, j; | ||
771 | const char **rec_argv; | ||
772 | |||
773 | rec_argc = ARRAY_SIZE(record_args) + argc + 2; | ||
774 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | ||
775 | |||
776 | if (rec_argv == NULL) | ||
777 | return -ENOMEM; | ||
778 | |||
779 | for (i = 0; i < ARRAY_SIZE(record_args); i++) | ||
780 | rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]); | ||
781 | |||
782 | rec_argv[i++] = STRDUP_FAIL_EXIT("-o"); | ||
783 | rec_argv[i++] = STRDUP_FAIL_EXIT(file_name); | ||
784 | |||
785 | for (j = 1; j < (unsigned int)argc; j++, i++) | ||
786 | rec_argv[i] = argv[j]; | ||
787 | |||
788 | return cmd_record(i, rec_argv, NULL); | ||
789 | } | ||
790 | |||
791 | static const char * const kvm_events_report_usage[] = { | ||
792 | "perf kvm stat report [<options>]", | ||
793 | NULL | ||
794 | }; | ||
795 | |||
796 | static const struct option kvm_events_report_options[] = { | ||
797 | OPT_STRING(0, "event", &report_event, "report event", | ||
798 | "event for reporting: vmexit, mmio, ioport"), | ||
799 | OPT_INTEGER(0, "vcpu", &trace_vcpu, | ||
800 | "vcpu id to report"), | ||
801 | OPT_STRING('k', "key", &sort_key, "sort-key", | ||
802 | "key for sorting: sample(sort by samples number)" | ||
803 | " time (sort by avg time)"), | ||
804 | OPT_END() | ||
805 | }; | ||
806 | |||
807 | static int kvm_events_report(int argc, const char **argv) | ||
808 | { | ||
809 | symbol__init(); | ||
810 | |||
811 | if (argc) { | ||
812 | argc = parse_options(argc, argv, | ||
813 | kvm_events_report_options, | ||
814 | kvm_events_report_usage, 0); | ||
815 | if (argc) | ||
816 | usage_with_options(kvm_events_report_usage, | ||
817 | kvm_events_report_options); | ||
818 | } | ||
819 | |||
820 | return kvm_events_report_vcpu(trace_vcpu); | ||
821 | } | ||
822 | |||
823 | static void print_kvm_stat_usage(void) | ||
824 | { | ||
825 | printf("Usage: perf kvm stat <command>\n\n"); | ||
826 | |||
827 | printf("# Available commands:\n"); | ||
828 | printf("\trecord: record kvm events\n"); | ||
829 | printf("\treport: report statistical data of kvm events\n"); | ||
830 | |||
831 | printf("\nOtherwise, it is the alias of 'perf stat':\n"); | ||
832 | } | ||
833 | |||
834 | static int kvm_cmd_stat(int argc, const char **argv) | ||
835 | { | ||
836 | if (argc == 1) { | ||
837 | print_kvm_stat_usage(); | ||
838 | goto perf_stat; | ||
839 | } | ||
840 | |||
841 | if (!strncmp(argv[1], "rec", 3)) | ||
842 | return kvm_events_record(argc - 1, argv + 1); | ||
843 | |||
844 | if (!strncmp(argv[1], "rep", 3)) | ||
845 | return kvm_events_report(argc - 1 , argv + 1); | ||
846 | |||
847 | perf_stat: | ||
848 | return cmd_stat(argc, argv, NULL); | ||
849 | } | ||
850 | |||
23 | static char name_buffer[256]; | 851 | static char name_buffer[256]; |
24 | 852 | ||
25 | static const char * const kvm_usage[] = { | 853 | static const char * const kvm_usage[] = { |
26 | "perf kvm [<options>] {top|record|report|diff|buildid-list}", | 854 | "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}", |
27 | NULL | 855 | NULL |
28 | }; | 856 | }; |
29 | 857 | ||
@@ -102,7 +930,7 @@ static int __cmd_buildid_list(int argc, const char **argv) | |||
102 | return cmd_buildid_list(i, rec_argv, NULL); | 930 | return cmd_buildid_list(i, rec_argv, NULL); |
103 | } | 931 | } |
104 | 932 | ||
105 | int cmd_kvm(int argc, const char **argv, const char *prefix __used) | 933 | int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) |
106 | { | 934 | { |
107 | perf_host = 0; | 935 | perf_host = 0; |
108 | perf_guest = 1; | 936 | perf_guest = 1; |
@@ -135,6 +963,8 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __used) | |||
135 | return cmd_top(argc, argv, NULL); | 963 | return cmd_top(argc, argv, NULL); |
136 | else if (!strncmp(argv[0], "buildid-list", 12)) | 964 | else if (!strncmp(argv[0], "buildid-list", 12)) |
137 | return __cmd_buildid_list(argc, argv); | 965 | return __cmd_buildid_list(argc, argv); |
966 | else if (!strncmp(argv[0], "stat", 4)) | ||
967 | return kvm_cmd_stat(argc, argv); | ||
138 | else | 968 | else |
139 | usage_with_options(kvm_usage, kvm_options); | 969 | usage_with_options(kvm_usage, kvm_options); |
140 | 970 | ||
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index 6313b6eb3ebb..1948eceb517a 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c | |||
@@ -14,20 +14,20 @@ | |||
14 | #include "util/parse-events.h" | 14 | #include "util/parse-events.h" |
15 | #include "util/cache.h" | 15 | #include "util/cache.h" |
16 | 16 | ||
17 | int cmd_list(int argc, const char **argv, const char *prefix __used) | 17 | int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) |
18 | { | 18 | { |
19 | setup_pager(); | 19 | setup_pager(); |
20 | 20 | ||
21 | if (argc == 1) | 21 | if (argc == 1) |
22 | print_events(NULL); | 22 | print_events(NULL, false); |
23 | else { | 23 | else { |
24 | int i; | 24 | int i; |
25 | 25 | ||
26 | for (i = 1; i < argc; ++i) { | 26 | for (i = 1; i < argc; ++i) { |
27 | if (i > 1) | 27 | if (i > 2) |
28 | putchar('\n'); | 28 | putchar('\n'); |
29 | if (strncmp(argv[i], "tracepoint", 10) == 0) | 29 | if (strncmp(argv[i], "tracepoint", 10) == 0) |
30 | print_tracepoint_events(NULL, NULL); | 30 | print_tracepoint_events(NULL, NULL, false); |
31 | else if (strcmp(argv[i], "hw") == 0 || | 31 | else if (strcmp(argv[i], "hw") == 0 || |
32 | strcmp(argv[i], "hardware") == 0) | 32 | strcmp(argv[i], "hardware") == 0) |
33 | print_events_type(PERF_TYPE_HARDWARE); | 33 | print_events_type(PERF_TYPE_HARDWARE); |
@@ -36,13 +36,15 @@ int cmd_list(int argc, const char **argv, const char *prefix __used) | |||
36 | print_events_type(PERF_TYPE_SOFTWARE); | 36 | print_events_type(PERF_TYPE_SOFTWARE); |
37 | else if (strcmp(argv[i], "cache") == 0 || | 37 | else if (strcmp(argv[i], "cache") == 0 || |
38 | strcmp(argv[i], "hwcache") == 0) | 38 | strcmp(argv[i], "hwcache") == 0) |
39 | print_hwcache_events(NULL); | 39 | print_hwcache_events(NULL, false); |
40 | else if (strcmp(argv[i], "--raw-dump") == 0) | ||
41 | print_events(NULL, true); | ||
40 | else { | 42 | else { |
41 | char *sep = strchr(argv[i], ':'), *s; | 43 | char *sep = strchr(argv[i], ':'), *s; |
42 | int sep_idx; | 44 | int sep_idx; |
43 | 45 | ||
44 | if (sep == NULL) { | 46 | if (sep == NULL) { |
45 | print_events(argv[i]); | 47 | print_events(argv[i], false); |
46 | continue; | 48 | continue; |
47 | } | 49 | } |
48 | sep_idx = sep - argv[i]; | 50 | sep_idx = sep - argv[i]; |
@@ -51,7 +53,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __used) | |||
51 | return -1; | 53 | return -1; |
52 | 54 | ||
53 | s[sep_idx] = '\0'; | 55 | s[sep_idx] = '\0'; |
54 | print_tracepoint_events(s, s + sep_idx + 1); | 56 | print_tracepoint_events(s, s + sep_idx + 1, false); |
55 | free(s); | 57 | free(s); |
56 | } | 58 | } |
57 | } | 59 | } |
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index b3c428548868..7d6e09949880 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c | |||
@@ -1,6 +1,8 @@ | |||
1 | #include "builtin.h" | 1 | #include "builtin.h" |
2 | #include "perf.h" | 2 | #include "perf.h" |
3 | 3 | ||
4 | #include "util/evlist.h" | ||
5 | #include "util/evsel.h" | ||
4 | #include "util/util.h" | 6 | #include "util/util.h" |
5 | #include "util/cache.h" | 7 | #include "util/cache.h" |
6 | #include "util/symbol.h" | 8 | #include "util/symbol.h" |
@@ -40,7 +42,7 @@ struct lock_stat { | |||
40 | struct rb_node rb; /* used for sorting */ | 42 | struct rb_node rb; /* used for sorting */ |
41 | 43 | ||
42 | /* | 44 | /* |
43 | * FIXME: raw_field_value() returns unsigned long long, | 45 | * FIXME: perf_evsel__intval() returns u64, |
44 | * so address of lockdep_map should be dealed as 64bit. | 46 | * so address of lockdep_map should be dealed as 64bit. |
45 | * Is there more better solution? | 47 | * Is there more better solution? |
46 | */ | 48 | */ |
@@ -160,8 +162,10 @@ static struct thread_stat *thread_stat_findnew_after_first(u32 tid) | |||
160 | return st; | 162 | return st; |
161 | 163 | ||
162 | st = zalloc(sizeof(struct thread_stat)); | 164 | st = zalloc(sizeof(struct thread_stat)); |
163 | if (!st) | 165 | if (!st) { |
164 | die("memory allocation failed\n"); | 166 | pr_err("memory allocation failed\n"); |
167 | return NULL; | ||
168 | } | ||
165 | 169 | ||
166 | st->tid = tid; | 170 | st->tid = tid; |
167 | INIT_LIST_HEAD(&st->seq_list); | 171 | INIT_LIST_HEAD(&st->seq_list); |
@@ -180,8 +184,10 @@ static struct thread_stat *thread_stat_findnew_first(u32 tid) | |||
180 | struct thread_stat *st; | 184 | struct thread_stat *st; |
181 | 185 | ||
182 | st = zalloc(sizeof(struct thread_stat)); | 186 | st = zalloc(sizeof(struct thread_stat)); |
183 | if (!st) | 187 | if (!st) { |
184 | die("memory allocation failed\n"); | 188 | pr_err("memory allocation failed\n"); |
189 | return NULL; | ||
190 | } | ||
185 | st->tid = tid; | 191 | st->tid = tid; |
186 | INIT_LIST_HEAD(&st->seq_list); | 192 | INIT_LIST_HEAD(&st->seq_list); |
187 | 193 | ||
@@ -247,18 +253,20 @@ struct lock_key keys[] = { | |||
247 | { NULL, NULL } | 253 | { NULL, NULL } |
248 | }; | 254 | }; |
249 | 255 | ||
250 | static void select_key(void) | 256 | static int select_key(void) |
251 | { | 257 | { |
252 | int i; | 258 | int i; |
253 | 259 | ||
254 | for (i = 0; keys[i].name; i++) { | 260 | for (i = 0; keys[i].name; i++) { |
255 | if (!strcmp(keys[i].name, sort_key)) { | 261 | if (!strcmp(keys[i].name, sort_key)) { |
256 | compare = keys[i].key; | 262 | compare = keys[i].key; |
257 | return; | 263 | return 0; |
258 | } | 264 | } |
259 | } | 265 | } |
260 | 266 | ||
261 | die("Unknown compare key:%s\n", sort_key); | 267 | pr_err("Unknown compare key: %s\n", sort_key); |
268 | |||
269 | return -1; | ||
262 | } | 270 | } |
263 | 271 | ||
264 | static void insert_to_result(struct lock_stat *st, | 272 | static void insert_to_result(struct lock_stat *st, |
@@ -323,61 +331,24 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name) | |||
323 | return new; | 331 | return new; |
324 | 332 | ||
325 | alloc_failed: | 333 | alloc_failed: |
326 | die("memory allocation failed\n"); | 334 | pr_err("memory allocation failed\n"); |
335 | return NULL; | ||
327 | } | 336 | } |
328 | 337 | ||
329 | static const char *input_name; | 338 | static const char *input_name; |
330 | 339 | ||
331 | struct raw_event_sample { | 340 | struct trace_lock_handler { |
332 | u32 size; | 341 | int (*acquire_event)(struct perf_evsel *evsel, |
333 | char data[0]; | 342 | struct perf_sample *sample); |
334 | }; | ||
335 | |||
336 | struct trace_acquire_event { | ||
337 | void *addr; | ||
338 | const char *name; | ||
339 | int flag; | ||
340 | }; | ||
341 | |||
342 | struct trace_acquired_event { | ||
343 | void *addr; | ||
344 | const char *name; | ||
345 | }; | ||
346 | 343 | ||
347 | struct trace_contended_event { | 344 | int (*acquired_event)(struct perf_evsel *evsel, |
348 | void *addr; | 345 | struct perf_sample *sample); |
349 | const char *name; | ||
350 | }; | ||
351 | 346 | ||
352 | struct trace_release_event { | 347 | int (*contended_event)(struct perf_evsel *evsel, |
353 | void *addr; | 348 | struct perf_sample *sample); |
354 | const char *name; | ||
355 | }; | ||
356 | 349 | ||
357 | struct trace_lock_handler { | 350 | int (*release_event)(struct perf_evsel *evsel, |
358 | void (*acquire_event)(struct trace_acquire_event *, | 351 | struct perf_sample *sample); |
359 | struct event_format *, | ||
360 | int cpu, | ||
361 | u64 timestamp, | ||
362 | struct thread *thread); | ||
363 | |||
364 | void (*acquired_event)(struct trace_acquired_event *, | ||
365 | struct event_format *, | ||
366 | int cpu, | ||
367 | u64 timestamp, | ||
368 | struct thread *thread); | ||
369 | |||
370 | void (*contended_event)(struct trace_contended_event *, | ||
371 | struct event_format *, | ||
372 | int cpu, | ||
373 | u64 timestamp, | ||
374 | struct thread *thread); | ||
375 | |||
376 | void (*release_event)(struct trace_release_event *, | ||
377 | struct event_format *, | ||
378 | int cpu, | ||
379 | u64 timestamp, | ||
380 | struct thread *thread); | ||
381 | }; | 352 | }; |
382 | 353 | ||
383 | static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) | 354 | static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) |
@@ -390,8 +361,10 @@ static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) | |||
390 | } | 361 | } |
391 | 362 | ||
392 | seq = zalloc(sizeof(struct lock_seq_stat)); | 363 | seq = zalloc(sizeof(struct lock_seq_stat)); |
393 | if (!seq) | 364 | if (!seq) { |
394 | die("Not enough memory\n"); | 365 | pr_err("memory allocation failed\n"); |
366 | return NULL; | ||
367 | } | ||
395 | seq->state = SEQ_STATE_UNINITIALIZED; | 368 | seq->state = SEQ_STATE_UNINITIALIZED; |
396 | seq->addr = addr; | 369 | seq->addr = addr; |
397 | 370 | ||
@@ -414,33 +387,42 @@ enum acquire_flags { | |||
414 | READ_LOCK = 2, | 387 | READ_LOCK = 2, |
415 | }; | 388 | }; |
416 | 389 | ||
417 | static void | 390 | static int report_lock_acquire_event(struct perf_evsel *evsel, |
418 | report_lock_acquire_event(struct trace_acquire_event *acquire_event, | 391 | struct perf_sample *sample) |
419 | struct event_format *__event __used, | ||
420 | int cpu __used, | ||
421 | u64 timestamp __used, | ||
422 | struct thread *thread __used) | ||
423 | { | 392 | { |
393 | void *addr; | ||
424 | struct lock_stat *ls; | 394 | struct lock_stat *ls; |
425 | struct thread_stat *ts; | 395 | struct thread_stat *ts; |
426 | struct lock_seq_stat *seq; | 396 | struct lock_seq_stat *seq; |
397 | const char *name = perf_evsel__strval(evsel, sample, "name"); | ||
398 | u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); | ||
399 | int flag = perf_evsel__intval(evsel, sample, "flag"); | ||
400 | |||
401 | memcpy(&addr, &tmp, sizeof(void *)); | ||
427 | 402 | ||
428 | ls = lock_stat_findnew(acquire_event->addr, acquire_event->name); | 403 | ls = lock_stat_findnew(addr, name); |
404 | if (!ls) | ||
405 | return -1; | ||
429 | if (ls->discard) | 406 | if (ls->discard) |
430 | return; | 407 | return 0; |
431 | 408 | ||
432 | ts = thread_stat_findnew(thread->pid); | 409 | ts = thread_stat_findnew(sample->tid); |
433 | seq = get_seq(ts, acquire_event->addr); | 410 | if (!ts) |
411 | return -1; | ||
412 | |||
413 | seq = get_seq(ts, addr); | ||
414 | if (!seq) | ||
415 | return -1; | ||
434 | 416 | ||
435 | switch (seq->state) { | 417 | switch (seq->state) { |
436 | case SEQ_STATE_UNINITIALIZED: | 418 | case SEQ_STATE_UNINITIALIZED: |
437 | case SEQ_STATE_RELEASED: | 419 | case SEQ_STATE_RELEASED: |
438 | if (!acquire_event->flag) { | 420 | if (!flag) { |
439 | seq->state = SEQ_STATE_ACQUIRING; | 421 | seq->state = SEQ_STATE_ACQUIRING; |
440 | } else { | 422 | } else { |
441 | if (acquire_event->flag & TRY_LOCK) | 423 | if (flag & TRY_LOCK) |
442 | ls->nr_trylock++; | 424 | ls->nr_trylock++; |
443 | if (acquire_event->flag & READ_LOCK) | 425 | if (flag & READ_LOCK) |
444 | ls->nr_readlock++; | 426 | ls->nr_readlock++; |
445 | seq->state = SEQ_STATE_READ_ACQUIRED; | 427 | seq->state = SEQ_STATE_READ_ACQUIRED; |
446 | seq->read_count = 1; | 428 | seq->read_count = 1; |
@@ -448,7 +430,7 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event, | |||
448 | } | 430 | } |
449 | break; | 431 | break; |
450 | case SEQ_STATE_READ_ACQUIRED: | 432 | case SEQ_STATE_READ_ACQUIRED: |
451 | if (acquire_event->flag & READ_LOCK) { | 433 | if (flag & READ_LOCK) { |
452 | seq->read_count++; | 434 | seq->read_count++; |
453 | ls->nr_acquired++; | 435 | ls->nr_acquired++; |
454 | goto end; | 436 | goto end; |
@@ -473,38 +455,46 @@ broken: | |||
473 | } | 455 | } |
474 | 456 | ||
475 | ls->nr_acquire++; | 457 | ls->nr_acquire++; |
476 | seq->prev_event_time = timestamp; | 458 | seq->prev_event_time = sample->time; |
477 | end: | 459 | end: |
478 | return; | 460 | return 0; |
479 | } | 461 | } |
480 | 462 | ||
481 | static void | 463 | static int report_lock_acquired_event(struct perf_evsel *evsel, |
482 | report_lock_acquired_event(struct trace_acquired_event *acquired_event, | 464 | struct perf_sample *sample) |
483 | struct event_format *__event __used, | ||
484 | int cpu __used, | ||
485 | u64 timestamp __used, | ||
486 | struct thread *thread __used) | ||
487 | { | 465 | { |
466 | void *addr; | ||
488 | struct lock_stat *ls; | 467 | struct lock_stat *ls; |
489 | struct thread_stat *ts; | 468 | struct thread_stat *ts; |
490 | struct lock_seq_stat *seq; | 469 | struct lock_seq_stat *seq; |
491 | u64 contended_term; | 470 | u64 contended_term; |
471 | const char *name = perf_evsel__strval(evsel, sample, "name"); | ||
472 | u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); | ||
473 | |||
474 | memcpy(&addr, &tmp, sizeof(void *)); | ||
492 | 475 | ||
493 | ls = lock_stat_findnew(acquired_event->addr, acquired_event->name); | 476 | ls = lock_stat_findnew(addr, name); |
477 | if (!ls) | ||
478 | return -1; | ||
494 | if (ls->discard) | 479 | if (ls->discard) |
495 | return; | 480 | return 0; |
481 | |||
482 | ts = thread_stat_findnew(sample->tid); | ||
483 | if (!ts) | ||
484 | return -1; | ||
496 | 485 | ||
497 | ts = thread_stat_findnew(thread->pid); | 486 | seq = get_seq(ts, addr); |
498 | seq = get_seq(ts, acquired_event->addr); | 487 | if (!seq) |
488 | return -1; | ||
499 | 489 | ||
500 | switch (seq->state) { | 490 | switch (seq->state) { |
501 | case SEQ_STATE_UNINITIALIZED: | 491 | case SEQ_STATE_UNINITIALIZED: |
502 | /* orphan event, do nothing */ | 492 | /* orphan event, do nothing */ |
503 | return; | 493 | return 0; |
504 | case SEQ_STATE_ACQUIRING: | 494 | case SEQ_STATE_ACQUIRING: |
505 | break; | 495 | break; |
506 | case SEQ_STATE_CONTENDED: | 496 | case SEQ_STATE_CONTENDED: |
507 | contended_term = timestamp - seq->prev_event_time; | 497 | contended_term = sample->time - seq->prev_event_time; |
508 | ls->wait_time_total += contended_term; | 498 | ls->wait_time_total += contended_term; |
509 | if (contended_term < ls->wait_time_min) | 499 | if (contended_term < ls->wait_time_min) |
510 | ls->wait_time_min = contended_term; | 500 | ls->wait_time_min = contended_term; |
@@ -529,33 +519,41 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event, | |||
529 | 519 | ||
530 | seq->state = SEQ_STATE_ACQUIRED; | 520 | seq->state = SEQ_STATE_ACQUIRED; |
531 | ls->nr_acquired++; | 521 | ls->nr_acquired++; |
532 | seq->prev_event_time = timestamp; | 522 | seq->prev_event_time = sample->time; |
533 | end: | 523 | end: |
534 | return; | 524 | return 0; |
535 | } | 525 | } |
536 | 526 | ||
537 | static void | 527 | static int report_lock_contended_event(struct perf_evsel *evsel, |
538 | report_lock_contended_event(struct trace_contended_event *contended_event, | 528 | struct perf_sample *sample) |
539 | struct event_format *__event __used, | ||
540 | int cpu __used, | ||
541 | u64 timestamp __used, | ||
542 | struct thread *thread __used) | ||
543 | { | 529 | { |
530 | void *addr; | ||
544 | struct lock_stat *ls; | 531 | struct lock_stat *ls; |
545 | struct thread_stat *ts; | 532 | struct thread_stat *ts; |
546 | struct lock_seq_stat *seq; | 533 | struct lock_seq_stat *seq; |
534 | const char *name = perf_evsel__strval(evsel, sample, "name"); | ||
535 | u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); | ||
547 | 536 | ||
548 | ls = lock_stat_findnew(contended_event->addr, contended_event->name); | 537 | memcpy(&addr, &tmp, sizeof(void *)); |
538 | |||
539 | ls = lock_stat_findnew(addr, name); | ||
540 | if (!ls) | ||
541 | return -1; | ||
549 | if (ls->discard) | 542 | if (ls->discard) |
550 | return; | 543 | return 0; |
544 | |||
545 | ts = thread_stat_findnew(sample->tid); | ||
546 | if (!ts) | ||
547 | return -1; | ||
551 | 548 | ||
552 | ts = thread_stat_findnew(thread->pid); | 549 | seq = get_seq(ts, addr); |
553 | seq = get_seq(ts, contended_event->addr); | 550 | if (!seq) |
551 | return -1; | ||
554 | 552 | ||
555 | switch (seq->state) { | 553 | switch (seq->state) { |
556 | case SEQ_STATE_UNINITIALIZED: | 554 | case SEQ_STATE_UNINITIALIZED: |
557 | /* orphan event, do nothing */ | 555 | /* orphan event, do nothing */ |
558 | return; | 556 | return 0; |
559 | case SEQ_STATE_ACQUIRING: | 557 | case SEQ_STATE_ACQUIRING: |
560 | break; | 558 | break; |
561 | case SEQ_STATE_RELEASED: | 559 | case SEQ_STATE_RELEASED: |
@@ -576,28 +574,36 @@ report_lock_contended_event(struct trace_contended_event *contended_event, | |||
576 | 574 | ||
577 | seq->state = SEQ_STATE_CONTENDED; | 575 | seq->state = SEQ_STATE_CONTENDED; |
578 | ls->nr_contended++; | 576 | ls->nr_contended++; |
579 | seq->prev_event_time = timestamp; | 577 | seq->prev_event_time = sample->time; |
580 | end: | 578 | end: |
581 | return; | 579 | return 0; |
582 | } | 580 | } |
583 | 581 | ||
584 | static void | 582 | static int report_lock_release_event(struct perf_evsel *evsel, |
585 | report_lock_release_event(struct trace_release_event *release_event, | 583 | struct perf_sample *sample) |
586 | struct event_format *__event __used, | ||
587 | int cpu __used, | ||
588 | u64 timestamp __used, | ||
589 | struct thread *thread __used) | ||
590 | { | 584 | { |
585 | void *addr; | ||
591 | struct lock_stat *ls; | 586 | struct lock_stat *ls; |
592 | struct thread_stat *ts; | 587 | struct thread_stat *ts; |
593 | struct lock_seq_stat *seq; | 588 | struct lock_seq_stat *seq; |
589 | const char *name = perf_evsel__strval(evsel, sample, "name"); | ||
590 | u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); | ||
591 | |||
592 | memcpy(&addr, &tmp, sizeof(void *)); | ||
594 | 593 | ||
595 | ls = lock_stat_findnew(release_event->addr, release_event->name); | 594 | ls = lock_stat_findnew(addr, name); |
595 | if (!ls) | ||
596 | return -1; | ||
596 | if (ls->discard) | 597 | if (ls->discard) |
597 | return; | 598 | return 0; |
599 | |||
600 | ts = thread_stat_findnew(sample->tid); | ||
601 | if (!ts) | ||
602 | return -1; | ||
598 | 603 | ||
599 | ts = thread_stat_findnew(thread->pid); | 604 | seq = get_seq(ts, addr); |
600 | seq = get_seq(ts, release_event->addr); | 605 | if (!seq) |
606 | return -1; | ||
601 | 607 | ||
602 | switch (seq->state) { | 608 | switch (seq->state) { |
603 | case SEQ_STATE_UNINITIALIZED: | 609 | case SEQ_STATE_UNINITIALIZED: |
@@ -631,7 +637,7 @@ free_seq: | |||
631 | list_del(&seq->list); | 637 | list_del(&seq->list); |
632 | free(seq); | 638 | free(seq); |
633 | end: | 639 | end: |
634 | return; | 640 | return 0; |
635 | } | 641 | } |
636 | 642 | ||
637 | /* lock oriented handlers */ | 643 | /* lock oriented handlers */ |
@@ -645,96 +651,36 @@ static struct trace_lock_handler report_lock_ops = { | |||
645 | 651 | ||
646 | static struct trace_lock_handler *trace_handler; | 652 | static struct trace_lock_handler *trace_handler; |
647 | 653 | ||
648 | static void | 654 | static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel, |
649 | process_lock_acquire_event(void *data, | 655 | struct perf_sample *sample) |
650 | struct event_format *event __used, | ||
651 | int cpu __used, | ||
652 | u64 timestamp __used, | ||
653 | struct thread *thread __used) | ||
654 | { | ||
655 | struct trace_acquire_event acquire_event; | ||
656 | u64 tmp; /* this is required for casting... */ | ||
657 | |||
658 | tmp = raw_field_value(event, "lockdep_addr", data); | ||
659 | memcpy(&acquire_event.addr, &tmp, sizeof(void *)); | ||
660 | acquire_event.name = (char *)raw_field_ptr(event, "name", data); | ||
661 | acquire_event.flag = (int)raw_field_value(event, "flag", data); | ||
662 | |||
663 | if (trace_handler->acquire_event) | ||
664 | trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread); | ||
665 | } | ||
666 | |||
667 | static void | ||
668 | process_lock_acquired_event(void *data, | ||
669 | struct event_format *event __used, | ||
670 | int cpu __used, | ||
671 | u64 timestamp __used, | ||
672 | struct thread *thread __used) | ||
673 | { | 656 | { |
674 | struct trace_acquired_event acquired_event; | ||
675 | u64 tmp; /* this is required for casting... */ | ||
676 | |||
677 | tmp = raw_field_value(event, "lockdep_addr", data); | ||
678 | memcpy(&acquired_event.addr, &tmp, sizeof(void *)); | ||
679 | acquired_event.name = (char *)raw_field_ptr(event, "name", data); | ||
680 | |||
681 | if (trace_handler->acquire_event) | 657 | if (trace_handler->acquire_event) |
682 | trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread); | 658 | return trace_handler->acquire_event(evsel, sample); |
659 | return 0; | ||
683 | } | 660 | } |
684 | 661 | ||
685 | static void | 662 | static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel, |
686 | process_lock_contended_event(void *data, | 663 | struct perf_sample *sample) |
687 | struct event_format *event __used, | ||
688 | int cpu __used, | ||
689 | u64 timestamp __used, | ||
690 | struct thread *thread __used) | ||
691 | { | 664 | { |
692 | struct trace_contended_event contended_event; | 665 | if (trace_handler->acquired_event) |
693 | u64 tmp; /* this is required for casting... */ | 666 | return trace_handler->acquired_event(evsel, sample); |
694 | 667 | return 0; | |
695 | tmp = raw_field_value(event, "lockdep_addr", data); | ||
696 | memcpy(&contended_event.addr, &tmp, sizeof(void *)); | ||
697 | contended_event.name = (char *)raw_field_ptr(event, "name", data); | ||
698 | |||
699 | if (trace_handler->acquire_event) | ||
700 | trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread); | ||
701 | } | 668 | } |
702 | 669 | ||
703 | static void | 670 | static int perf_evsel__process_lock_contended(struct perf_evsel *evsel, |
704 | process_lock_release_event(void *data, | 671 | struct perf_sample *sample) |
705 | struct event_format *event __used, | ||
706 | int cpu __used, | ||
707 | u64 timestamp __used, | ||
708 | struct thread *thread __used) | ||
709 | { | 672 | { |
710 | struct trace_release_event release_event; | 673 | if (trace_handler->contended_event) |
711 | u64 tmp; /* this is required for casting... */ | 674 | return trace_handler->contended_event(evsel, sample); |
712 | 675 | return 0; | |
713 | tmp = raw_field_value(event, "lockdep_addr", data); | ||
714 | memcpy(&release_event.addr, &tmp, sizeof(void *)); | ||
715 | release_event.name = (char *)raw_field_ptr(event, "name", data); | ||
716 | |||
717 | if (trace_handler->acquire_event) | ||
718 | trace_handler->release_event(&release_event, event, cpu, timestamp, thread); | ||
719 | } | 676 | } |
720 | 677 | ||
721 | static void | 678 | static int perf_evsel__process_lock_release(struct perf_evsel *evsel, |
722 | process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread) | 679 | struct perf_sample *sample) |
723 | { | 680 | { |
724 | struct event_format *event; | 681 | if (trace_handler->release_event) |
725 | int type; | 682 | return trace_handler->release_event(evsel, sample); |
726 | 683 | return 0; | |
727 | type = trace_parse_common_type(session->pevent, data); | ||
728 | event = pevent_find_event(session->pevent, type); | ||
729 | |||
730 | if (!strcmp(event->name, "lock_acquire")) | ||
731 | process_lock_acquire_event(data, event, cpu, timestamp, thread); | ||
732 | if (!strcmp(event->name, "lock_acquired")) | ||
733 | process_lock_acquired_event(data, event, cpu, timestamp, thread); | ||
734 | if (!strcmp(event->name, "lock_contended")) | ||
735 | process_lock_contended_event(data, event, cpu, timestamp, thread); | ||
736 | if (!strcmp(event->name, "lock_release")) | ||
737 | process_lock_release_event(data, event, cpu, timestamp, thread); | ||
738 | } | 684 | } |
739 | 685 | ||
740 | static void print_bad_events(int bad, int total) | 686 | static void print_bad_events(int bad, int total) |
@@ -836,20 +782,29 @@ static void dump_map(void) | |||
836 | } | 782 | } |
837 | } | 783 | } |
838 | 784 | ||
839 | static void dump_info(void) | 785 | static int dump_info(void) |
840 | { | 786 | { |
787 | int rc = 0; | ||
788 | |||
841 | if (info_threads) | 789 | if (info_threads) |
842 | dump_threads(); | 790 | dump_threads(); |
843 | else if (info_map) | 791 | else if (info_map) |
844 | dump_map(); | 792 | dump_map(); |
845 | else | 793 | else { |
846 | die("Unknown type of information\n"); | 794 | rc = -1; |
795 | pr_err("Unknown type of information\n"); | ||
796 | } | ||
797 | |||
798 | return rc; | ||
847 | } | 799 | } |
848 | 800 | ||
849 | static int process_sample_event(struct perf_tool *tool __used, | 801 | typedef int (*tracepoint_handler)(struct perf_evsel *evsel, |
802 | struct perf_sample *sample); | ||
803 | |||
804 | static int process_sample_event(struct perf_tool *tool __maybe_unused, | ||
850 | union perf_event *event, | 805 | union perf_event *event, |
851 | struct perf_sample *sample, | 806 | struct perf_sample *sample, |
852 | struct perf_evsel *evsel __used, | 807 | struct perf_evsel *evsel, |
853 | struct machine *machine) | 808 | struct machine *machine) |
854 | { | 809 | { |
855 | struct thread *thread = machine__findnew_thread(machine, sample->tid); | 810 | struct thread *thread = machine__findnew_thread(machine, sample->tid); |
@@ -860,7 +815,10 @@ static int process_sample_event(struct perf_tool *tool __used, | |||
860 | return -1; | 815 | return -1; |
861 | } | 816 | } |
862 | 817 | ||
863 | process_raw_event(sample->raw_data, sample->cpu, sample->time, thread); | 818 | if (evsel->handler.func != NULL) { |
819 | tracepoint_handler f = evsel->handler.func; | ||
820 | return f(evsel, sample); | ||
821 | } | ||
864 | 822 | ||
865 | return 0; | 823 | return 0; |
866 | } | 824 | } |
@@ -871,11 +829,25 @@ static struct perf_tool eops = { | |||
871 | .ordered_samples = true, | 829 | .ordered_samples = true, |
872 | }; | 830 | }; |
873 | 831 | ||
832 | static const struct perf_evsel_str_handler lock_tracepoints[] = { | ||
833 | { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */ | ||
834 | { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ | ||
835 | { "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ | ||
836 | { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */ | ||
837 | }; | ||
838 | |||
874 | static int read_events(void) | 839 | static int read_events(void) |
875 | { | 840 | { |
876 | session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); | 841 | session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); |
877 | if (!session) | 842 | if (!session) { |
878 | die("Initializing perf session failed\n"); | 843 | pr_err("Initializing perf session failed\n"); |
844 | return -1; | ||
845 | } | ||
846 | |||
847 | if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) { | ||
848 | pr_err("Initializing perf session tracepoint handlers failed\n"); | ||
849 | return -1; | ||
850 | } | ||
879 | 851 | ||
880 | return perf_session__process_events(session, &eops); | 852 | return perf_session__process_events(session, &eops); |
881 | } | 853 | } |
@@ -892,13 +864,18 @@ static void sort_result(void) | |||
892 | } | 864 | } |
893 | } | 865 | } |
894 | 866 | ||
895 | static void __cmd_report(void) | 867 | static int __cmd_report(void) |
896 | { | 868 | { |
897 | setup_pager(); | 869 | setup_pager(); |
898 | select_key(); | 870 | |
899 | read_events(); | 871 | if ((select_key() != 0) || |
872 | (read_events() != 0)) | ||
873 | return -1; | ||
874 | |||
900 | sort_result(); | 875 | sort_result(); |
901 | print_result(); | 876 | print_result(); |
877 | |||
878 | return 0; | ||
902 | } | 879 | } |
903 | 880 | ||
904 | static const char * const report_usage[] = { | 881 | static const char * const report_usage[] = { |
@@ -944,10 +921,6 @@ static const char *record_args[] = { | |||
944 | "-f", | 921 | "-f", |
945 | "-m", "1024", | 922 | "-m", "1024", |
946 | "-c", "1", | 923 | "-c", "1", |
947 | "-e", "lock:lock_acquire", | ||
948 | "-e", "lock:lock_acquired", | ||
949 | "-e", "lock:lock_contended", | ||
950 | "-e", "lock:lock_release", | ||
951 | }; | 924 | }; |
952 | 925 | ||
953 | static int __cmd_record(int argc, const char **argv) | 926 | static int __cmd_record(int argc, const char **argv) |
@@ -955,15 +928,31 @@ static int __cmd_record(int argc, const char **argv) | |||
955 | unsigned int rec_argc, i, j; | 928 | unsigned int rec_argc, i, j; |
956 | const char **rec_argv; | 929 | const char **rec_argv; |
957 | 930 | ||
931 | for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { | ||
932 | if (!is_valid_tracepoint(lock_tracepoints[i].name)) { | ||
933 | pr_err("tracepoint %s is not enabled. " | ||
934 | "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n", | ||
935 | lock_tracepoints[i].name); | ||
936 | return 1; | ||
937 | } | ||
938 | } | ||
939 | |||
958 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; | 940 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; |
959 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | 941 | /* factor of 2 is for -e in front of each tracepoint */ |
942 | rec_argc += 2 * ARRAY_SIZE(lock_tracepoints); | ||
960 | 943 | ||
944 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | ||
961 | if (rec_argv == NULL) | 945 | if (rec_argv == NULL) |
962 | return -ENOMEM; | 946 | return -ENOMEM; |
963 | 947 | ||
964 | for (i = 0; i < ARRAY_SIZE(record_args); i++) | 948 | for (i = 0; i < ARRAY_SIZE(record_args); i++) |
965 | rec_argv[i] = strdup(record_args[i]); | 949 | rec_argv[i] = strdup(record_args[i]); |
966 | 950 | ||
951 | for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) { | ||
952 | rec_argv[i++] = "-e"; | ||
953 | rec_argv[i++] = strdup(lock_tracepoints[j].name); | ||
954 | } | ||
955 | |||
967 | for (j = 1; j < (unsigned int)argc; j++, i++) | 956 | for (j = 1; j < (unsigned int)argc; j++, i++) |
968 | rec_argv[i] = argv[j]; | 957 | rec_argv[i] = argv[j]; |
969 | 958 | ||
@@ -972,9 +961,10 @@ static int __cmd_record(int argc, const char **argv) | |||
972 | return cmd_record(i, rec_argv, NULL); | 961 | return cmd_record(i, rec_argv, NULL); |
973 | } | 962 | } |
974 | 963 | ||
975 | int cmd_lock(int argc, const char **argv, const char *prefix __used) | 964 | int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) |
976 | { | 965 | { |
977 | unsigned int i; | 966 | unsigned int i; |
967 | int rc = 0; | ||
978 | 968 | ||
979 | symbol__init(); | 969 | symbol__init(); |
980 | for (i = 0; i < LOCKHASH_SIZE; i++) | 970 | for (i = 0; i < LOCKHASH_SIZE; i++) |
@@ -1009,11 +999,13 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used) | |||
1009 | /* recycling report_lock_ops */ | 999 | /* recycling report_lock_ops */ |
1010 | trace_handler = &report_lock_ops; | 1000 | trace_handler = &report_lock_ops; |
1011 | setup_pager(); | 1001 | setup_pager(); |
1012 | read_events(); | 1002 | if (read_events() != 0) |
1013 | dump_info(); | 1003 | rc = -1; |
1004 | else | ||
1005 | rc = dump_info(); | ||
1014 | } else { | 1006 | } else { |
1015 | usage_with_options(lock_usage, lock_options); | 1007 | usage_with_options(lock_usage, lock_options); |
1016 | } | 1008 | } |
1017 | 1009 | ||
1018 | return 0; | 1010 | return rc; |
1019 | } | 1011 | } |
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index e215ae61b2ae..118aa8946573 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c | |||
@@ -143,8 +143,8 @@ static int parse_probe_event_argv(int argc, const char **argv) | |||
143 | return ret; | 143 | return ret; |
144 | } | 144 | } |
145 | 145 | ||
146 | static int opt_add_probe_event(const struct option *opt __used, | 146 | static int opt_add_probe_event(const struct option *opt __maybe_unused, |
147 | const char *str, int unset __used) | 147 | const char *str, int unset __maybe_unused) |
148 | { | 148 | { |
149 | if (str) { | 149 | if (str) { |
150 | params.mod_events = true; | 150 | params.mod_events = true; |
@@ -153,8 +153,8 @@ static int opt_add_probe_event(const struct option *opt __used, | |||
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
156 | static int opt_del_probe_event(const struct option *opt __used, | 156 | static int opt_del_probe_event(const struct option *opt __maybe_unused, |
157 | const char *str, int unset __used) | 157 | const char *str, int unset __maybe_unused) |
158 | { | 158 | { |
159 | if (str) { | 159 | if (str) { |
160 | params.mod_events = true; | 160 | params.mod_events = true; |
@@ -166,7 +166,7 @@ static int opt_del_probe_event(const struct option *opt __used, | |||
166 | } | 166 | } |
167 | 167 | ||
168 | static int opt_set_target(const struct option *opt, const char *str, | 168 | static int opt_set_target(const struct option *opt, const char *str, |
169 | int unset __used) | 169 | int unset __maybe_unused) |
170 | { | 170 | { |
171 | int ret = -ENOENT; | 171 | int ret = -ENOENT; |
172 | 172 | ||
@@ -188,8 +188,8 @@ static int opt_set_target(const struct option *opt, const char *str, | |||
188 | } | 188 | } |
189 | 189 | ||
190 | #ifdef DWARF_SUPPORT | 190 | #ifdef DWARF_SUPPORT |
191 | static int opt_show_lines(const struct option *opt __used, | 191 | static int opt_show_lines(const struct option *opt __maybe_unused, |
192 | const char *str, int unset __used) | 192 | const char *str, int unset __maybe_unused) |
193 | { | 193 | { |
194 | int ret = 0; | 194 | int ret = 0; |
195 | 195 | ||
@@ -209,8 +209,8 @@ static int opt_show_lines(const struct option *opt __used, | |||
209 | return ret; | 209 | return ret; |
210 | } | 210 | } |
211 | 211 | ||
212 | static int opt_show_vars(const struct option *opt __used, | 212 | static int opt_show_vars(const struct option *opt __maybe_unused, |
213 | const char *str, int unset __used) | 213 | const char *str, int unset __maybe_unused) |
214 | { | 214 | { |
215 | struct perf_probe_event *pev = ¶ms.events[params.nevents]; | 215 | struct perf_probe_event *pev = ¶ms.events[params.nevents]; |
216 | int ret; | 216 | int ret; |
@@ -229,8 +229,8 @@ static int opt_show_vars(const struct option *opt __used, | |||
229 | } | 229 | } |
230 | #endif | 230 | #endif |
231 | 231 | ||
232 | static int opt_set_filter(const struct option *opt __used, | 232 | static int opt_set_filter(const struct option *opt __maybe_unused, |
233 | const char *str, int unset __used) | 233 | const char *str, int unset __maybe_unused) |
234 | { | 234 | { |
235 | const char *err; | 235 | const char *err; |
236 | 236 | ||
@@ -327,7 +327,7 @@ static const struct option options[] = { | |||
327 | OPT_END() | 327 | OPT_END() |
328 | }; | 328 | }; |
329 | 329 | ||
330 | int cmd_probe(int argc, const char **argv, const char *prefix __used) | 330 | int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) |
331 | { | 331 | { |
332 | int ret; | 332 | int ret; |
333 | 333 | ||
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 4db6e1ba54e3..f14cb5fdb91f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -31,6 +31,15 @@ | |||
31 | #include <sched.h> | 31 | #include <sched.h> |
32 | #include <sys/mman.h> | 32 | #include <sys/mman.h> |
33 | 33 | ||
34 | #define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: " | ||
35 | |||
36 | #ifdef NO_LIBUNWIND_SUPPORT | ||
37 | static char callchain_help[] = CALLCHAIN_HELP "[fp]"; | ||
38 | #else | ||
39 | static unsigned long default_stack_dump_size = 8192; | ||
40 | static char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; | ||
41 | #endif | ||
42 | |||
34 | enum write_mode_t { | 43 | enum write_mode_t { |
35 | WRITE_FORCE, | 44 | WRITE_FORCE, |
36 | WRITE_APPEND | 45 | WRITE_APPEND |
@@ -62,32 +71,38 @@ static void advance_output(struct perf_record *rec, size_t size) | |||
62 | rec->bytes_written += size; | 71 | rec->bytes_written += size; |
63 | } | 72 | } |
64 | 73 | ||
65 | static void write_output(struct perf_record *rec, void *buf, size_t size) | 74 | static int write_output(struct perf_record *rec, void *buf, size_t size) |
66 | { | 75 | { |
67 | while (size) { | 76 | while (size) { |
68 | int ret = write(rec->output, buf, size); | 77 | int ret = write(rec->output, buf, size); |
69 | 78 | ||
70 | if (ret < 0) | 79 | if (ret < 0) { |
71 | die("failed to write"); | 80 | pr_err("failed to write\n"); |
81 | return -1; | ||
82 | } | ||
72 | 83 | ||
73 | size -= ret; | 84 | size -= ret; |
74 | buf += ret; | 85 | buf += ret; |
75 | 86 | ||
76 | rec->bytes_written += ret; | 87 | rec->bytes_written += ret; |
77 | } | 88 | } |
89 | |||
90 | return 0; | ||
78 | } | 91 | } |
79 | 92 | ||
80 | static int process_synthesized_event(struct perf_tool *tool, | 93 | static int process_synthesized_event(struct perf_tool *tool, |
81 | union perf_event *event, | 94 | union perf_event *event, |
82 | struct perf_sample *sample __used, | 95 | struct perf_sample *sample __maybe_unused, |
83 | struct machine *machine __used) | 96 | struct machine *machine __maybe_unused) |
84 | { | 97 | { |
85 | struct perf_record *rec = container_of(tool, struct perf_record, tool); | 98 | struct perf_record *rec = container_of(tool, struct perf_record, tool); |
86 | write_output(rec, event, event->header.size); | 99 | if (write_output(rec, event, event->header.size) < 0) |
100 | return -1; | ||
101 | |||
87 | return 0; | 102 | return 0; |
88 | } | 103 | } |
89 | 104 | ||
90 | static void perf_record__mmap_read(struct perf_record *rec, | 105 | static int perf_record__mmap_read(struct perf_record *rec, |
91 | struct perf_mmap *md) | 106 | struct perf_mmap *md) |
92 | { | 107 | { |
93 | unsigned int head = perf_mmap__read_head(md); | 108 | unsigned int head = perf_mmap__read_head(md); |
@@ -95,9 +110,10 @@ static void perf_record__mmap_read(struct perf_record *rec, | |||
95 | unsigned char *data = md->base + rec->page_size; | 110 | unsigned char *data = md->base + rec->page_size; |
96 | unsigned long size; | 111 | unsigned long size; |
97 | void *buf; | 112 | void *buf; |
113 | int rc = 0; | ||
98 | 114 | ||
99 | if (old == head) | 115 | if (old == head) |
100 | return; | 116 | return 0; |
101 | 117 | ||
102 | rec->samples++; | 118 | rec->samples++; |
103 | 119 | ||
@@ -108,17 +124,26 @@ static void perf_record__mmap_read(struct perf_record *rec, | |||
108 | size = md->mask + 1 - (old & md->mask); | 124 | size = md->mask + 1 - (old & md->mask); |
109 | old += size; | 125 | old += size; |
110 | 126 | ||
111 | write_output(rec, buf, size); | 127 | if (write_output(rec, buf, size) < 0) { |
128 | rc = -1; | ||
129 | goto out; | ||
130 | } | ||
112 | } | 131 | } |
113 | 132 | ||
114 | buf = &data[old & md->mask]; | 133 | buf = &data[old & md->mask]; |
115 | size = head - old; | 134 | size = head - old; |
116 | old += size; | 135 | old += size; |
117 | 136 | ||
118 | write_output(rec, buf, size); | 137 | if (write_output(rec, buf, size) < 0) { |
138 | rc = -1; | ||
139 | goto out; | ||
140 | } | ||
119 | 141 | ||
120 | md->prev = old; | 142 | md->prev = old; |
121 | perf_mmap__write_tail(md, old); | 143 | perf_mmap__write_tail(md, old); |
144 | |||
145 | out: | ||
146 | return rc; | ||
122 | } | 147 | } |
123 | 148 | ||
124 | static volatile int done = 0; | 149 | static volatile int done = 0; |
@@ -134,7 +159,7 @@ static void sig_handler(int sig) | |||
134 | signr = sig; | 159 | signr = sig; |
135 | } | 160 | } |
136 | 161 | ||
137 | static void perf_record__sig_exit(int exit_status __used, void *arg) | 162 | static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg) |
138 | { | 163 | { |
139 | struct perf_record *rec = arg; | 164 | struct perf_record *rec = arg; |
140 | int status; | 165 | int status; |
@@ -163,31 +188,32 @@ static bool perf_evlist__equal(struct perf_evlist *evlist, | |||
163 | if (evlist->nr_entries != other->nr_entries) | 188 | if (evlist->nr_entries != other->nr_entries) |
164 | return false; | 189 | return false; |
165 | 190 | ||
166 | pair = list_entry(other->entries.next, struct perf_evsel, node); | 191 | pair = perf_evlist__first(other); |
167 | 192 | ||
168 | list_for_each_entry(pos, &evlist->entries, node) { | 193 | list_for_each_entry(pos, &evlist->entries, node) { |
169 | if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0)) | 194 | if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0)) |
170 | return false; | 195 | return false; |
171 | pair = list_entry(pair->node.next, struct perf_evsel, node); | 196 | pair = perf_evsel__next(pair); |
172 | } | 197 | } |
173 | 198 | ||
174 | return true; | 199 | return true; |
175 | } | 200 | } |
176 | 201 | ||
177 | static void perf_record__open(struct perf_record *rec) | 202 | static int perf_record__open(struct perf_record *rec) |
178 | { | 203 | { |
179 | struct perf_evsel *pos, *first; | 204 | struct perf_evsel *pos; |
180 | struct perf_evlist *evlist = rec->evlist; | 205 | struct perf_evlist *evlist = rec->evlist; |
181 | struct perf_session *session = rec->session; | 206 | struct perf_session *session = rec->session; |
182 | struct perf_record_opts *opts = &rec->opts; | 207 | struct perf_record_opts *opts = &rec->opts; |
183 | 208 | int rc = 0; | |
184 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
185 | 209 | ||
186 | perf_evlist__config_attrs(evlist, opts); | 210 | perf_evlist__config_attrs(evlist, opts); |
187 | 211 | ||
212 | if (opts->group) | ||
213 | perf_evlist__set_leader(evlist); | ||
214 | |||
188 | list_for_each_entry(pos, &evlist->entries, node) { | 215 | list_for_each_entry(pos, &evlist->entries, node) { |
189 | struct perf_event_attr *attr = &pos->attr; | 216 | struct perf_event_attr *attr = &pos->attr; |
190 | struct xyarray *group_fd = NULL; | ||
191 | /* | 217 | /* |
192 | * Check if parse_single_tracepoint_event has already asked for | 218 | * Check if parse_single_tracepoint_event has already asked for |
193 | * PERF_SAMPLE_TIME. | 219 | * PERF_SAMPLE_TIME. |
@@ -202,24 +228,24 @@ static void perf_record__open(struct perf_record *rec) | |||
202 | */ | 228 | */ |
203 | bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; | 229 | bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; |
204 | 230 | ||
205 | if (opts->group && pos != first) | ||
206 | group_fd = first->fd; | ||
207 | fallback_missing_features: | 231 | fallback_missing_features: |
208 | if (opts->exclude_guest_missing) | 232 | if (opts->exclude_guest_missing) |
209 | attr->exclude_guest = attr->exclude_host = 0; | 233 | attr->exclude_guest = attr->exclude_host = 0; |
210 | retry_sample_id: | 234 | retry_sample_id: |
211 | attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; | 235 | attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; |
212 | try_again: | 236 | try_again: |
213 | if (perf_evsel__open(pos, evlist->cpus, evlist->threads, | 237 | if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { |
214 | opts->group, group_fd) < 0) { | ||
215 | int err = errno; | 238 | int err = errno; |
216 | 239 | ||
217 | if (err == EPERM || err == EACCES) { | 240 | if (err == EPERM || err == EACCES) { |
218 | ui__error_paranoid(); | 241 | ui__error_paranoid(); |
219 | exit(EXIT_FAILURE); | 242 | rc = -err; |
243 | goto out; | ||
220 | } else if (err == ENODEV && opts->target.cpu_list) { | 244 | } else if (err == ENODEV && opts->target.cpu_list) { |
221 | die("No such device - did you specify" | 245 | pr_err("No such device - did you specify" |
222 | " an out-of-range profile CPU?\n"); | 246 | " an out-of-range profile CPU?\n"); |
247 | rc = -err; | ||
248 | goto out; | ||
223 | } else if (err == EINVAL) { | 249 | } else if (err == EINVAL) { |
224 | if (!opts->exclude_guest_missing && | 250 | if (!opts->exclude_guest_missing && |
225 | (attr->exclude_guest || attr->exclude_host)) { | 251 | (attr->exclude_guest || attr->exclude_host)) { |
@@ -266,42 +292,57 @@ try_again: | |||
266 | if (err == ENOENT) { | 292 | if (err == ENOENT) { |
267 | ui__error("The %s event is not supported.\n", | 293 | ui__error("The %s event is not supported.\n", |
268 | perf_evsel__name(pos)); | 294 | perf_evsel__name(pos)); |
269 | exit(EXIT_FAILURE); | 295 | rc = -err; |
296 | goto out; | ||
270 | } | 297 | } |
271 | 298 | ||
272 | printf("\n"); | 299 | printf("\n"); |
273 | error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", | 300 | error("sys_perf_event_open() syscall returned with %d " |
274 | err, strerror(err)); | 301 | "(%s) for event %s. /bin/dmesg may provide " |
302 | "additional information.\n", | ||
303 | err, strerror(err), perf_evsel__name(pos)); | ||
275 | 304 | ||
276 | #if defined(__i386__) || defined(__x86_64__) | 305 | #if defined(__i386__) || defined(__x86_64__) |
277 | if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP) | 306 | if (attr->type == PERF_TYPE_HARDWARE && |
278 | die("No hardware sampling interrupt available." | 307 | err == EOPNOTSUPP) { |
279 | " No APIC? If so then you can boot the kernel" | 308 | pr_err("No hardware sampling interrupt available." |
280 | " with the \"lapic\" boot parameter to" | 309 | " No APIC? If so then you can boot the kernel" |
281 | " force-enable it.\n"); | 310 | " with the \"lapic\" boot parameter to" |
311 | " force-enable it.\n"); | ||
312 | rc = -err; | ||
313 | goto out; | ||
314 | } | ||
282 | #endif | 315 | #endif |
283 | 316 | ||
284 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); | 317 | pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); |
318 | rc = -err; | ||
319 | goto out; | ||
285 | } | 320 | } |
286 | } | 321 | } |
287 | 322 | ||
288 | if (perf_evlist__set_filters(evlist)) { | 323 | if (perf_evlist__apply_filters(evlist)) { |
289 | error("failed to set filter with %d (%s)\n", errno, | 324 | error("failed to set filter with %d (%s)\n", errno, |
290 | strerror(errno)); | 325 | strerror(errno)); |
291 | exit(-1); | 326 | rc = -1; |
327 | goto out; | ||
292 | } | 328 | } |
293 | 329 | ||
294 | if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { | 330 | if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { |
295 | if (errno == EPERM) | 331 | if (errno == EPERM) { |
296 | die("Permission error mapping pages.\n" | 332 | pr_err("Permission error mapping pages.\n" |
297 | "Consider increasing " | 333 | "Consider increasing " |
298 | "/proc/sys/kernel/perf_event_mlock_kb,\n" | 334 | "/proc/sys/kernel/perf_event_mlock_kb,\n" |
299 | "or try again with a smaller value of -m/--mmap_pages.\n" | 335 | "or try again with a smaller value of -m/--mmap_pages.\n" |
300 | "(current value: %d)\n", opts->mmap_pages); | 336 | "(current value: %d)\n", opts->mmap_pages); |
301 | else if (!is_power_of_2(opts->mmap_pages)) | 337 | rc = -errno; |
302 | die("--mmap_pages/-m value must be a power of two."); | 338 | } else if (!is_power_of_2(opts->mmap_pages)) { |
303 | 339 | pr_err("--mmap_pages/-m value must be a power of two."); | |
304 | die("failed to mmap with %d (%s)\n", errno, strerror(errno)); | 340 | rc = -EINVAL; |
341 | } else { | ||
342 | pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno)); | ||
343 | rc = -errno; | ||
344 | } | ||
345 | goto out; | ||
305 | } | 346 | } |
306 | 347 | ||
307 | if (rec->file_new) | 348 | if (rec->file_new) |
@@ -309,11 +350,14 @@ try_again: | |||
309 | else { | 350 | else { |
310 | if (!perf_evlist__equal(session->evlist, evlist)) { | 351 | if (!perf_evlist__equal(session->evlist, evlist)) { |
311 | fprintf(stderr, "incompatible append\n"); | 352 | fprintf(stderr, "incompatible append\n"); |
312 | exit(-1); | 353 | rc = -1; |
354 | goto out; | ||
313 | } | 355 | } |
314 | } | 356 | } |
315 | 357 | ||
316 | perf_session__set_id_hdr_size(session); | 358 | perf_session__set_id_hdr_size(session); |
359 | out: | ||
360 | return rc; | ||
317 | } | 361 | } |
318 | 362 | ||
319 | static int process_buildids(struct perf_record *rec) | 363 | static int process_buildids(struct perf_record *rec) |
@@ -329,10 +373,13 @@ static int process_buildids(struct perf_record *rec) | |||
329 | size, &build_id__mark_dso_hit_ops); | 373 | size, &build_id__mark_dso_hit_ops); |
330 | } | 374 | } |
331 | 375 | ||
332 | static void perf_record__exit(int status __used, void *arg) | 376 | static void perf_record__exit(int status, void *arg) |
333 | { | 377 | { |
334 | struct perf_record *rec = arg; | 378 | struct perf_record *rec = arg; |
335 | 379 | ||
380 | if (status != 0) | ||
381 | return; | ||
382 | |||
336 | if (!rec->opts.pipe_output) { | 383 | if (!rec->opts.pipe_output) { |
337 | rec->session->header.data_size += rec->bytes_written; | 384 | rec->session->header.data_size += rec->bytes_written; |
338 | 385 | ||
@@ -387,17 +434,26 @@ static struct perf_event_header finished_round_event = { | |||
387 | .type = PERF_RECORD_FINISHED_ROUND, | 434 | .type = PERF_RECORD_FINISHED_ROUND, |
388 | }; | 435 | }; |
389 | 436 | ||
390 | static void perf_record__mmap_read_all(struct perf_record *rec) | 437 | static int perf_record__mmap_read_all(struct perf_record *rec) |
391 | { | 438 | { |
392 | int i; | 439 | int i; |
440 | int rc = 0; | ||
393 | 441 | ||
394 | for (i = 0; i < rec->evlist->nr_mmaps; i++) { | 442 | for (i = 0; i < rec->evlist->nr_mmaps; i++) { |
395 | if (rec->evlist->mmap[i].base) | 443 | if (rec->evlist->mmap[i].base) { |
396 | perf_record__mmap_read(rec, &rec->evlist->mmap[i]); | 444 | if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) { |
445 | rc = -1; | ||
446 | goto out; | ||
447 | } | ||
448 | } | ||
397 | } | 449 | } |
398 | 450 | ||
399 | if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA)) | 451 | if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA)) |
400 | write_output(rec, &finished_round_event, sizeof(finished_round_event)); | 452 | rc = write_output(rec, &finished_round_event, |
453 | sizeof(finished_round_event)); | ||
454 | |||
455 | out: | ||
456 | return rc; | ||
401 | } | 457 | } |
402 | 458 | ||
403 | static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | 459 | static int __cmd_record(struct perf_record *rec, int argc, const char **argv) |
@@ -457,7 +513,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
457 | output = open(output_name, flags, S_IRUSR | S_IWUSR); | 513 | output = open(output_name, flags, S_IRUSR | S_IWUSR); |
458 | if (output < 0) { | 514 | if (output < 0) { |
459 | perror("failed to create output file"); | 515 | perror("failed to create output file"); |
460 | exit(-1); | 516 | return -1; |
461 | } | 517 | } |
462 | 518 | ||
463 | rec->output = output; | 519 | rec->output = output; |
@@ -497,7 +553,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
497 | } | 553 | } |
498 | } | 554 | } |
499 | 555 | ||
500 | perf_record__open(rec); | 556 | if (perf_record__open(rec) != 0) { |
557 | err = -1; | ||
558 | goto out_delete_session; | ||
559 | } | ||
501 | 560 | ||
502 | /* | 561 | /* |
503 | * perf_session__delete(session) will be called at perf_record__exit() | 562 | * perf_session__delete(session) will be called at perf_record__exit() |
@@ -507,19 +566,20 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
507 | if (opts->pipe_output) { | 566 | if (opts->pipe_output) { |
508 | err = perf_header__write_pipe(output); | 567 | err = perf_header__write_pipe(output); |
509 | if (err < 0) | 568 | if (err < 0) |
510 | return err; | 569 | goto out_delete_session; |
511 | } else if (rec->file_new) { | 570 | } else if (rec->file_new) { |
512 | err = perf_session__write_header(session, evsel_list, | 571 | err = perf_session__write_header(session, evsel_list, |
513 | output, false); | 572 | output, false); |
514 | if (err < 0) | 573 | if (err < 0) |
515 | return err; | 574 | goto out_delete_session; |
516 | } | 575 | } |
517 | 576 | ||
518 | if (!rec->no_buildid | 577 | if (!rec->no_buildid |
519 | && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { | 578 | && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { |
520 | pr_err("Couldn't generate buildids. " | 579 | pr_err("Couldn't generate buildids. " |
521 | "Use --no-buildid to profile anyway.\n"); | 580 | "Use --no-buildid to profile anyway.\n"); |
522 | return -1; | 581 | err = -1; |
582 | goto out_delete_session; | ||
523 | } | 583 | } |
524 | 584 | ||
525 | rec->post_processing_offset = lseek(output, 0, SEEK_CUR); | 585 | rec->post_processing_offset = lseek(output, 0, SEEK_CUR); |
@@ -527,7 +587,8 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
527 | machine = perf_session__find_host_machine(session); | 587 | machine = perf_session__find_host_machine(session); |
528 | if (!machine) { | 588 | if (!machine) { |
529 | pr_err("Couldn't find native kernel information.\n"); | 589 | pr_err("Couldn't find native kernel information.\n"); |
530 | return -1; | 590 | err = -1; |
591 | goto out_delete_session; | ||
531 | } | 592 | } |
532 | 593 | ||
533 | if (opts->pipe_output) { | 594 | if (opts->pipe_output) { |
@@ -535,14 +596,14 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
535 | process_synthesized_event); | 596 | process_synthesized_event); |
536 | if (err < 0) { | 597 | if (err < 0) { |
537 | pr_err("Couldn't synthesize attrs.\n"); | 598 | pr_err("Couldn't synthesize attrs.\n"); |
538 | return err; | 599 | goto out_delete_session; |
539 | } | 600 | } |
540 | 601 | ||
541 | err = perf_event__synthesize_event_types(tool, process_synthesized_event, | 602 | err = perf_event__synthesize_event_types(tool, process_synthesized_event, |
542 | machine); | 603 | machine); |
543 | if (err < 0) { | 604 | if (err < 0) { |
544 | pr_err("Couldn't synthesize event_types.\n"); | 605 | pr_err("Couldn't synthesize event_types.\n"); |
545 | return err; | 606 | goto out_delete_session; |
546 | } | 607 | } |
547 | 608 | ||
548 | if (have_tracepoints(&evsel_list->entries)) { | 609 | if (have_tracepoints(&evsel_list->entries)) { |
@@ -558,7 +619,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
558 | process_synthesized_event); | 619 | process_synthesized_event); |
559 | if (err <= 0) { | 620 | if (err <= 0) { |
560 | pr_err("Couldn't record tracing data.\n"); | 621 | pr_err("Couldn't record tracing data.\n"); |
561 | return err; | 622 | goto out_delete_session; |
562 | } | 623 | } |
563 | advance_output(rec, err); | 624 | advance_output(rec, err); |
564 | } | 625 | } |
@@ -586,20 +647,24 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
586 | perf_event__synthesize_guest_os); | 647 | perf_event__synthesize_guest_os); |
587 | 648 | ||
588 | if (!opts->target.system_wide) | 649 | if (!opts->target.system_wide) |
589 | perf_event__synthesize_thread_map(tool, evsel_list->threads, | 650 | err = perf_event__synthesize_thread_map(tool, evsel_list->threads, |
590 | process_synthesized_event, | 651 | process_synthesized_event, |
591 | machine); | 652 | machine); |
592 | else | 653 | else |
593 | perf_event__synthesize_threads(tool, process_synthesized_event, | 654 | err = perf_event__synthesize_threads(tool, process_synthesized_event, |
594 | machine); | 655 | machine); |
595 | 656 | ||
657 | if (err != 0) | ||
658 | goto out_delete_session; | ||
659 | |||
596 | if (rec->realtime_prio) { | 660 | if (rec->realtime_prio) { |
597 | struct sched_param param; | 661 | struct sched_param param; |
598 | 662 | ||
599 | param.sched_priority = rec->realtime_prio; | 663 | param.sched_priority = rec->realtime_prio; |
600 | if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { | 664 | if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { |
601 | pr_err("Could not set realtime priority.\n"); | 665 | pr_err("Could not set realtime priority.\n"); |
602 | exit(-1); | 666 | err = -1; |
667 | goto out_delete_session; | ||
603 | } | 668 | } |
604 | } | 669 | } |
605 | 670 | ||
@@ -614,7 +679,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
614 | for (;;) { | 679 | for (;;) { |
615 | int hits = rec->samples; | 680 | int hits = rec->samples; |
616 | 681 | ||
617 | perf_record__mmap_read_all(rec); | 682 | if (perf_record__mmap_read_all(rec) < 0) { |
683 | err = -1; | ||
684 | goto out_delete_session; | ||
685 | } | ||
618 | 686 | ||
619 | if (hits == rec->samples) { | 687 | if (hits == rec->samples) { |
620 | if (done) | 688 | if (done) |
@@ -732,6 +800,106 @@ error: | |||
732 | return ret; | 800 | return ret; |
733 | } | 801 | } |
734 | 802 | ||
803 | #ifndef NO_LIBUNWIND_SUPPORT | ||
804 | static int get_stack_size(char *str, unsigned long *_size) | ||
805 | { | ||
806 | char *endptr; | ||
807 | unsigned long size; | ||
808 | unsigned long max_size = round_down(USHRT_MAX, sizeof(u64)); | ||
809 | |||
810 | size = strtoul(str, &endptr, 0); | ||
811 | |||
812 | do { | ||
813 | if (*endptr) | ||
814 | break; | ||
815 | |||
816 | size = round_up(size, sizeof(u64)); | ||
817 | if (!size || size > max_size) | ||
818 | break; | ||
819 | |||
820 | *_size = size; | ||
821 | return 0; | ||
822 | |||
823 | } while (0); | ||
824 | |||
825 | pr_err("callchain: Incorrect stack dump size (max %ld): %s\n", | ||
826 | max_size, str); | ||
827 | return -1; | ||
828 | } | ||
829 | #endif /* !NO_LIBUNWIND_SUPPORT */ | ||
830 | |||
831 | static int | ||
832 | parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, | ||
833 | int unset) | ||
834 | { | ||
835 | struct perf_record *rec = (struct perf_record *)opt->value; | ||
836 | char *tok, *name, *saveptr = NULL; | ||
837 | char *buf; | ||
838 | int ret = -1; | ||
839 | |||
840 | /* --no-call-graph */ | ||
841 | if (unset) | ||
842 | return 0; | ||
843 | |||
844 | /* We specified default option if none is provided. */ | ||
845 | BUG_ON(!arg); | ||
846 | |||
847 | /* We need buffer that we know we can write to. */ | ||
848 | buf = malloc(strlen(arg) + 1); | ||
849 | if (!buf) | ||
850 | return -ENOMEM; | ||
851 | |||
852 | strcpy(buf, arg); | ||
853 | |||
854 | tok = strtok_r((char *)buf, ",", &saveptr); | ||
855 | name = tok ? : (char *)buf; | ||
856 | |||
857 | do { | ||
858 | /* Framepointer style */ | ||
859 | if (!strncmp(name, "fp", sizeof("fp"))) { | ||
860 | if (!strtok_r(NULL, ",", &saveptr)) { | ||
861 | rec->opts.call_graph = CALLCHAIN_FP; | ||
862 | ret = 0; | ||
863 | } else | ||
864 | pr_err("callchain: No more arguments " | ||
865 | "needed for -g fp\n"); | ||
866 | break; | ||
867 | |||
868 | #ifndef NO_LIBUNWIND_SUPPORT | ||
869 | /* Dwarf style */ | ||
870 | } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) { | ||
871 | ret = 0; | ||
872 | rec->opts.call_graph = CALLCHAIN_DWARF; | ||
873 | rec->opts.stack_dump_size = default_stack_dump_size; | ||
874 | |||
875 | tok = strtok_r(NULL, ",", &saveptr); | ||
876 | if (tok) { | ||
877 | unsigned long size = 0; | ||
878 | |||
879 | ret = get_stack_size(tok, &size); | ||
880 | rec->opts.stack_dump_size = size; | ||
881 | } | ||
882 | |||
883 | if (!ret) | ||
884 | pr_debug("callchain: stack dump size %d\n", | ||
885 | rec->opts.stack_dump_size); | ||
886 | #endif /* !NO_LIBUNWIND_SUPPORT */ | ||
887 | } else { | ||
888 | pr_err("callchain: Unknown -g option " | ||
889 | "value: %s\n", arg); | ||
890 | break; | ||
891 | } | ||
892 | |||
893 | } while (0); | ||
894 | |||
895 | free(buf); | ||
896 | |||
897 | if (!ret) | ||
898 | pr_debug("callchain: type %d\n", rec->opts.call_graph); | ||
899 | |||
900 | return ret; | ||
901 | } | ||
902 | |||
735 | static const char * const record_usage[] = { | 903 | static const char * const record_usage[] = { |
736 | "perf record [<options>] [<command>]", | 904 | "perf record [<options>] [<command>]", |
737 | "perf record [<options>] -- <command> [<options>]", | 905 | "perf record [<options>] -- <command> [<options>]", |
@@ -803,8 +971,9 @@ const struct option record_options[] = { | |||
803 | "number of mmap data pages"), | 971 | "number of mmap data pages"), |
804 | OPT_BOOLEAN(0, "group", &record.opts.group, | 972 | OPT_BOOLEAN(0, "group", &record.opts.group, |
805 | "put the counters into a counter group"), | 973 | "put the counters into a counter group"), |
806 | OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph, | 974 | OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]", |
807 | "do call-graph (stack chain/backtrace) recording"), | 975 | callchain_help, &parse_callchain_opt, |
976 | "fp"), | ||
808 | OPT_INCR('v', "verbose", &verbose, | 977 | OPT_INCR('v', "verbose", &verbose, |
809 | "be more verbose (show counter open errors, etc)"), | 978 | "be more verbose (show counter open errors, etc)"), |
810 | OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), | 979 | OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), |
@@ -836,7 +1005,7 @@ const struct option record_options[] = { | |||
836 | OPT_END() | 1005 | OPT_END() |
837 | }; | 1006 | }; |
838 | 1007 | ||
839 | int cmd_record(int argc, const char **argv, const char *prefix __used) | 1008 | int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) |
840 | { | 1009 | { |
841 | int err = -ENOMEM; | 1010 | int err = -ENOMEM; |
842 | struct perf_evsel *pos; | 1011 | struct perf_evsel *pos; |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 7c88a243b5db..1da243dfbc3e 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -69,8 +69,8 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, | |||
69 | 69 | ||
70 | if ((sort__has_parent || symbol_conf.use_callchain) | 70 | if ((sort__has_parent || symbol_conf.use_callchain) |
71 | && sample->callchain) { | 71 | && sample->callchain) { |
72 | err = machine__resolve_callchain(machine, al->thread, | 72 | err = machine__resolve_callchain(machine, evsel, al->thread, |
73 | sample->callchain, &parent); | 73 | sample, &parent); |
74 | if (err) | 74 | if (err) |
75 | return err; | 75 | return err; |
76 | } | 76 | } |
@@ -93,7 +93,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, | |||
93 | struct annotation *notes; | 93 | struct annotation *notes; |
94 | err = -ENOMEM; | 94 | err = -ENOMEM; |
95 | bx = he->branch_info; | 95 | bx = he->branch_info; |
96 | if (bx->from.sym && use_browser > 0) { | 96 | if (bx->from.sym && use_browser == 1 && sort__has_sym) { |
97 | notes = symbol__annotation(bx->from.sym); | 97 | notes = symbol__annotation(bx->from.sym); |
98 | if (!notes->src | 98 | if (!notes->src |
99 | && symbol__alloc_hist(bx->from.sym) < 0) | 99 | && symbol__alloc_hist(bx->from.sym) < 0) |
@@ -107,7 +107,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, | |||
107 | goto out; | 107 | goto out; |
108 | } | 108 | } |
109 | 109 | ||
110 | if (bx->to.sym && use_browser > 0) { | 110 | if (bx->to.sym && use_browser == 1 && sort__has_sym) { |
111 | notes = symbol__annotation(bx->to.sym); | 111 | notes = symbol__annotation(bx->to.sym); |
112 | if (!notes->src | 112 | if (!notes->src |
113 | && symbol__alloc_hist(bx->to.sym) < 0) | 113 | && symbol__alloc_hist(bx->to.sym) < 0) |
@@ -140,8 +140,8 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, | |||
140 | struct hist_entry *he; | 140 | struct hist_entry *he; |
141 | 141 | ||
142 | if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { | 142 | if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { |
143 | err = machine__resolve_callchain(machine, al->thread, | 143 | err = machine__resolve_callchain(machine, evsel, al->thread, |
144 | sample->callchain, &parent); | 144 | sample, &parent); |
145 | if (err) | 145 | if (err) |
146 | return err; | 146 | return err; |
147 | } | 147 | } |
@@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, | |||
162 | * so we don't allocated the extra space needed because the stdio | 162 | * so we don't allocated the extra space needed because the stdio |
163 | * code will not use it. | 163 | * code will not use it. |
164 | */ | 164 | */ |
165 | if (he->ms.sym != NULL && use_browser > 0) { | 165 | if (he->ms.sym != NULL && use_browser == 1 && sort__has_sym) { |
166 | struct annotation *notes = symbol__annotation(he->ms.sym); | 166 | struct annotation *notes = symbol__annotation(he->ms.sym); |
167 | 167 | ||
168 | assert(evsel != NULL); | 168 | assert(evsel != NULL); |
@@ -223,9 +223,9 @@ static int process_sample_event(struct perf_tool *tool, | |||
223 | 223 | ||
224 | static int process_read_event(struct perf_tool *tool, | 224 | static int process_read_event(struct perf_tool *tool, |
225 | union perf_event *event, | 225 | union perf_event *event, |
226 | struct perf_sample *sample __used, | 226 | struct perf_sample *sample __maybe_unused, |
227 | struct perf_evsel *evsel, | 227 | struct perf_evsel *evsel, |
228 | struct machine *machine __used) | 228 | struct machine *machine __maybe_unused) |
229 | { | 229 | { |
230 | struct perf_report *rep = container_of(tool, struct perf_report, tool); | 230 | struct perf_report *rep = container_of(tool, struct perf_report, tool); |
231 | 231 | ||
@@ -287,7 +287,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) | |||
287 | 287 | ||
288 | extern volatile int session_done; | 288 | extern volatile int session_done; |
289 | 289 | ||
290 | static void sig_handler(int sig __used) | 290 | static void sig_handler(int sig __maybe_unused) |
291 | { | 291 | { |
292 | session_done = 1; | 292 | session_done = 1; |
293 | } | 293 | } |
@@ -397,17 +397,17 @@ static int __cmd_report(struct perf_report *rep) | |||
397 | desc); | 397 | desc); |
398 | } | 398 | } |
399 | 399 | ||
400 | if (dump_trace) { | ||
401 | perf_session__fprintf_nr_events(session, stdout); | ||
402 | goto out_delete; | ||
403 | } | ||
404 | |||
405 | if (verbose > 3) | 400 | if (verbose > 3) |
406 | perf_session__fprintf(session, stdout); | 401 | perf_session__fprintf(session, stdout); |
407 | 402 | ||
408 | if (verbose > 2) | 403 | if (verbose > 2) |
409 | perf_session__fprintf_dsos(session, stdout); | 404 | perf_session__fprintf_dsos(session, stdout); |
410 | 405 | ||
406 | if (dump_trace) { | ||
407 | perf_session__fprintf_nr_events(session, stdout); | ||
408 | goto out_delete; | ||
409 | } | ||
410 | |||
411 | nr_samples = 0; | 411 | nr_samples = 0; |
412 | list_for_each_entry(pos, &session->evlist->entries, node) { | 412 | list_for_each_entry(pos, &session->evlist->entries, node) { |
413 | struct hists *hists = &pos->hists; | 413 | struct hists *hists = &pos->hists; |
@@ -533,13 +533,14 @@ setup: | |||
533 | } | 533 | } |
534 | 534 | ||
535 | static int | 535 | static int |
536 | parse_branch_mode(const struct option *opt __used, const char *str __used, int unset) | 536 | parse_branch_mode(const struct option *opt __maybe_unused, |
537 | const char *str __maybe_unused, int unset) | ||
537 | { | 538 | { |
538 | sort__branch_mode = !unset; | 539 | sort__branch_mode = !unset; |
539 | return 0; | 540 | return 0; |
540 | } | 541 | } |
541 | 542 | ||
542 | int cmd_report(int argc, const char **argv, const char *prefix __used) | 543 | int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) |
543 | { | 544 | { |
544 | struct perf_session *session; | 545 | struct perf_session *session; |
545 | struct stat st; | 546 | struct stat st; |
@@ -638,6 +639,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) | |||
638 | "Show a column with the sum of periods"), | 639 | "Show a column with the sum of periods"), |
639 | OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", | 640 | OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", |
640 | "use branch records for histogram filling", parse_branch_mode), | 641 | "use branch records for histogram filling", parse_branch_mode), |
642 | OPT_STRING(0, "objdump", &objdump_path, "path", | ||
643 | "objdump binary to use for disassembly and annotations"), | ||
641 | OPT_END() | 644 | OPT_END() |
642 | }; | 645 | }; |
643 | 646 | ||
@@ -686,15 +689,19 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) | |||
686 | 689 | ||
687 | if (strcmp(report.input_name, "-") != 0) | 690 | if (strcmp(report.input_name, "-") != 0) |
688 | setup_browser(true); | 691 | setup_browser(true); |
689 | else | 692 | else { |
690 | use_browser = 0; | 693 | use_browser = 0; |
694 | perf_hpp__init(false, false); | ||
695 | } | ||
696 | |||
697 | setup_sorting(report_usage, options); | ||
691 | 698 | ||
692 | /* | 699 | /* |
693 | * Only in the newt browser we are doing integrated annotation, | 700 | * Only in the newt browser we are doing integrated annotation, |
694 | * so don't allocate extra space that won't be used in the stdio | 701 | * so don't allocate extra space that won't be used in the stdio |
695 | * implementation. | 702 | * implementation. |
696 | */ | 703 | */ |
697 | if (use_browser > 0) { | 704 | if (use_browser == 1 && sort__has_sym) { |
698 | symbol_conf.priv_size = sizeof(struct annotation); | 705 | symbol_conf.priv_size = sizeof(struct annotation); |
699 | report.annotate_init = symbol__annotate_init; | 706 | report.annotate_init = symbol__annotate_init; |
700 | /* | 707 | /* |
@@ -717,8 +724,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) | |||
717 | if (symbol__init() < 0) | 724 | if (symbol__init() < 0) |
718 | goto error; | 725 | goto error; |
719 | 726 | ||
720 | setup_sorting(report_usage, options); | ||
721 | |||
722 | if (parent_pattern != default_parent_pattern) { | 727 | if (parent_pattern != default_parent_pattern) { |
723 | if (sort_dimension__add("parent") < 0) | 728 | if (sort_dimension__add("parent") < 0) |
724 | goto error; | 729 | goto error; |
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 7a9ad2b1ee76..9b9e32eaa805 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -23,31 +23,12 @@ | |||
23 | #include <pthread.h> | 23 | #include <pthread.h> |
24 | #include <math.h> | 24 | #include <math.h> |
25 | 25 | ||
26 | static const char *input_name; | ||
27 | |||
28 | static char default_sort_order[] = "avg, max, switch, runtime"; | ||
29 | static const char *sort_order = default_sort_order; | ||
30 | |||
31 | static int profile_cpu = -1; | ||
32 | |||
33 | #define PR_SET_NAME 15 /* Set process name */ | 26 | #define PR_SET_NAME 15 /* Set process name */ |
34 | #define MAX_CPUS 4096 | 27 | #define MAX_CPUS 4096 |
35 | |||
36 | static u64 run_measurement_overhead; | ||
37 | static u64 sleep_measurement_overhead; | ||
38 | |||
39 | #define COMM_LEN 20 | 28 | #define COMM_LEN 20 |
40 | #define SYM_LEN 129 | 29 | #define SYM_LEN 129 |
41 | |||
42 | #define MAX_PID 65536 | 30 | #define MAX_PID 65536 |
43 | 31 | ||
44 | static unsigned long nr_tasks; | ||
45 | |||
46 | struct perf_sched { | ||
47 | struct perf_tool tool; | ||
48 | struct perf_session *session; | ||
49 | }; | ||
50 | |||
51 | struct sched_atom; | 32 | struct sched_atom; |
52 | 33 | ||
53 | struct task_desc { | 34 | struct task_desc { |
@@ -85,44 +66,6 @@ struct sched_atom { | |||
85 | struct task_desc *wakee; | 66 | struct task_desc *wakee; |
86 | }; | 67 | }; |
87 | 68 | ||
88 | static struct task_desc *pid_to_task[MAX_PID]; | ||
89 | |||
90 | static struct task_desc **tasks; | ||
91 | |||
92 | static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER; | ||
93 | static u64 start_time; | ||
94 | |||
95 | static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER; | ||
96 | |||
97 | static unsigned long nr_run_events; | ||
98 | static unsigned long nr_sleep_events; | ||
99 | static unsigned long nr_wakeup_events; | ||
100 | |||
101 | static unsigned long nr_sleep_corrections; | ||
102 | static unsigned long nr_run_events_optimized; | ||
103 | |||
104 | static unsigned long targetless_wakeups; | ||
105 | static unsigned long multitarget_wakeups; | ||
106 | |||
107 | static u64 cpu_usage; | ||
108 | static u64 runavg_cpu_usage; | ||
109 | static u64 parent_cpu_usage; | ||
110 | static u64 runavg_parent_cpu_usage; | ||
111 | |||
112 | static unsigned long nr_runs; | ||
113 | static u64 sum_runtime; | ||
114 | static u64 sum_fluct; | ||
115 | static u64 run_avg; | ||
116 | |||
117 | static unsigned int replay_repeat = 10; | ||
118 | static unsigned long nr_timestamps; | ||
119 | static unsigned long nr_unordered_timestamps; | ||
120 | static unsigned long nr_state_machine_bugs; | ||
121 | static unsigned long nr_context_switch_bugs; | ||
122 | static unsigned long nr_events; | ||
123 | static unsigned long nr_lost_chunks; | ||
124 | static unsigned long nr_lost_events; | ||
125 | |||
126 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | 69 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" |
127 | 70 | ||
128 | enum thread_state { | 71 | enum thread_state { |
@@ -154,11 +97,79 @@ struct work_atoms { | |||
154 | 97 | ||
155 | typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); | 98 | typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); |
156 | 99 | ||
157 | static struct rb_root atom_root, sorted_atom_root; | 100 | struct perf_sched; |
101 | |||
102 | struct trace_sched_handler { | ||
103 | int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, | ||
104 | struct perf_sample *sample, struct machine *machine); | ||
158 | 105 | ||
159 | static u64 all_runtime; | 106 | int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
160 | static u64 all_count; | 107 | struct perf_sample *sample, struct machine *machine); |
161 | 108 | ||
109 | int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, | ||
110 | struct perf_sample *sample, struct machine *machine); | ||
111 | |||
112 | int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel, | ||
113 | struct perf_sample *sample); | ||
114 | |||
115 | int (*migrate_task_event)(struct perf_sched *sched, | ||
116 | struct perf_evsel *evsel, | ||
117 | struct perf_sample *sample, | ||
118 | struct machine *machine); | ||
119 | }; | ||
120 | |||
121 | struct perf_sched { | ||
122 | struct perf_tool tool; | ||
123 | const char *input_name; | ||
124 | const char *sort_order; | ||
125 | unsigned long nr_tasks; | ||
126 | struct task_desc *pid_to_task[MAX_PID]; | ||
127 | struct task_desc **tasks; | ||
128 | const struct trace_sched_handler *tp_handler; | ||
129 | pthread_mutex_t start_work_mutex; | ||
130 | pthread_mutex_t work_done_wait_mutex; | ||
131 | int profile_cpu; | ||
132 | /* | ||
133 | * Track the current task - that way we can know whether there's any | ||
134 | * weird events, such as a task being switched away that is not current. | ||
135 | */ | ||
136 | int max_cpu; | ||
137 | u32 curr_pid[MAX_CPUS]; | ||
138 | struct thread *curr_thread[MAX_CPUS]; | ||
139 | char next_shortname1; | ||
140 | char next_shortname2; | ||
141 | unsigned int replay_repeat; | ||
142 | unsigned long nr_run_events; | ||
143 | unsigned long nr_sleep_events; | ||
144 | unsigned long nr_wakeup_events; | ||
145 | unsigned long nr_sleep_corrections; | ||
146 | unsigned long nr_run_events_optimized; | ||
147 | unsigned long targetless_wakeups; | ||
148 | unsigned long multitarget_wakeups; | ||
149 | unsigned long nr_runs; | ||
150 | unsigned long nr_timestamps; | ||
151 | unsigned long nr_unordered_timestamps; | ||
152 | unsigned long nr_state_machine_bugs; | ||
153 | unsigned long nr_context_switch_bugs; | ||
154 | unsigned long nr_events; | ||
155 | unsigned long nr_lost_chunks; | ||
156 | unsigned long nr_lost_events; | ||
157 | u64 run_measurement_overhead; | ||
158 | u64 sleep_measurement_overhead; | ||
159 | u64 start_time; | ||
160 | u64 cpu_usage; | ||
161 | u64 runavg_cpu_usage; | ||
162 | u64 parent_cpu_usage; | ||
163 | u64 runavg_parent_cpu_usage; | ||
164 | u64 sum_runtime; | ||
165 | u64 sum_fluct; | ||
166 | u64 run_avg; | ||
167 | u64 all_runtime; | ||
168 | u64 all_count; | ||
169 | u64 cpu_last_switched[MAX_CPUS]; | ||
170 | struct rb_root atom_root, sorted_atom_root; | ||
171 | struct list_head sort_list, cmp_pid; | ||
172 | }; | ||
162 | 173 | ||
163 | static u64 get_nsecs(void) | 174 | static u64 get_nsecs(void) |
164 | { | 175 | { |
@@ -169,13 +180,13 @@ static u64 get_nsecs(void) | |||
169 | return ts.tv_sec * 1000000000ULL + ts.tv_nsec; | 180 | return ts.tv_sec * 1000000000ULL + ts.tv_nsec; |
170 | } | 181 | } |
171 | 182 | ||
172 | static void burn_nsecs(u64 nsecs) | 183 | static void burn_nsecs(struct perf_sched *sched, u64 nsecs) |
173 | { | 184 | { |
174 | u64 T0 = get_nsecs(), T1; | 185 | u64 T0 = get_nsecs(), T1; |
175 | 186 | ||
176 | do { | 187 | do { |
177 | T1 = get_nsecs(); | 188 | T1 = get_nsecs(); |
178 | } while (T1 + run_measurement_overhead < T0 + nsecs); | 189 | } while (T1 + sched->run_measurement_overhead < T0 + nsecs); |
179 | } | 190 | } |
180 | 191 | ||
181 | static void sleep_nsecs(u64 nsecs) | 192 | static void sleep_nsecs(u64 nsecs) |
@@ -188,24 +199,24 @@ static void sleep_nsecs(u64 nsecs) | |||
188 | nanosleep(&ts, NULL); | 199 | nanosleep(&ts, NULL); |
189 | } | 200 | } |
190 | 201 | ||
191 | static void calibrate_run_measurement_overhead(void) | 202 | static void calibrate_run_measurement_overhead(struct perf_sched *sched) |
192 | { | 203 | { |
193 | u64 T0, T1, delta, min_delta = 1000000000ULL; | 204 | u64 T0, T1, delta, min_delta = 1000000000ULL; |
194 | int i; | 205 | int i; |
195 | 206 | ||
196 | for (i = 0; i < 10; i++) { | 207 | for (i = 0; i < 10; i++) { |
197 | T0 = get_nsecs(); | 208 | T0 = get_nsecs(); |
198 | burn_nsecs(0); | 209 | burn_nsecs(sched, 0); |
199 | T1 = get_nsecs(); | 210 | T1 = get_nsecs(); |
200 | delta = T1-T0; | 211 | delta = T1-T0; |
201 | min_delta = min(min_delta, delta); | 212 | min_delta = min(min_delta, delta); |
202 | } | 213 | } |
203 | run_measurement_overhead = min_delta; | 214 | sched->run_measurement_overhead = min_delta; |
204 | 215 | ||
205 | printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); | 216 | printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); |
206 | } | 217 | } |
207 | 218 | ||
208 | static void calibrate_sleep_measurement_overhead(void) | 219 | static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) |
209 | { | 220 | { |
210 | u64 T0, T1, delta, min_delta = 1000000000ULL; | 221 | u64 T0, T1, delta, min_delta = 1000000000ULL; |
211 | int i; | 222 | int i; |
@@ -218,7 +229,7 @@ static void calibrate_sleep_measurement_overhead(void) | |||
218 | min_delta = min(min_delta, delta); | 229 | min_delta = min(min_delta, delta); |
219 | } | 230 | } |
220 | min_delta -= 10000; | 231 | min_delta -= 10000; |
221 | sleep_measurement_overhead = min_delta; | 232 | sched->sleep_measurement_overhead = min_delta; |
222 | 233 | ||
223 | printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); | 234 | printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); |
224 | } | 235 | } |
@@ -251,8 +262,8 @@ static struct sched_atom *last_event(struct task_desc *task) | |||
251 | return task->atoms[task->nr_events - 1]; | 262 | return task->atoms[task->nr_events - 1]; |
252 | } | 263 | } |
253 | 264 | ||
254 | static void | 265 | static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, |
255 | add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) | 266 | u64 timestamp, u64 duration) |
256 | { | 267 | { |
257 | struct sched_atom *event, *curr_event = last_event(task); | 268 | struct sched_atom *event, *curr_event = last_event(task); |
258 | 269 | ||
@@ -261,7 +272,7 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) | |||
261 | * to it: | 272 | * to it: |
262 | */ | 273 | */ |
263 | if (curr_event && curr_event->type == SCHED_EVENT_RUN) { | 274 | if (curr_event && curr_event->type == SCHED_EVENT_RUN) { |
264 | nr_run_events_optimized++; | 275 | sched->nr_run_events_optimized++; |
265 | curr_event->duration += duration; | 276 | curr_event->duration += duration; |
266 | return; | 277 | return; |
267 | } | 278 | } |
@@ -271,12 +282,11 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) | |||
271 | event->type = SCHED_EVENT_RUN; | 282 | event->type = SCHED_EVENT_RUN; |
272 | event->duration = duration; | 283 | event->duration = duration; |
273 | 284 | ||
274 | nr_run_events++; | 285 | sched->nr_run_events++; |
275 | } | 286 | } |
276 | 287 | ||
277 | static void | 288 | static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, |
278 | add_sched_event_wakeup(struct task_desc *task, u64 timestamp, | 289 | u64 timestamp, struct task_desc *wakee) |
279 | struct task_desc *wakee) | ||
280 | { | 290 | { |
281 | struct sched_atom *event, *wakee_event; | 291 | struct sched_atom *event, *wakee_event; |
282 | 292 | ||
@@ -286,11 +296,11 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, | |||
286 | 296 | ||
287 | wakee_event = last_event(wakee); | 297 | wakee_event = last_event(wakee); |
288 | if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { | 298 | if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { |
289 | targetless_wakeups++; | 299 | sched->targetless_wakeups++; |
290 | return; | 300 | return; |
291 | } | 301 | } |
292 | if (wakee_event->wait_sem) { | 302 | if (wakee_event->wait_sem) { |
293 | multitarget_wakeups++; | 303 | sched->multitarget_wakeups++; |
294 | return; | 304 | return; |
295 | } | 305 | } |
296 | 306 | ||
@@ -299,89 +309,89 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, | |||
299 | wakee_event->specific_wait = 1; | 309 | wakee_event->specific_wait = 1; |
300 | event->wait_sem = wakee_event->wait_sem; | 310 | event->wait_sem = wakee_event->wait_sem; |
301 | 311 | ||
302 | nr_wakeup_events++; | 312 | sched->nr_wakeup_events++; |
303 | } | 313 | } |
304 | 314 | ||
305 | static void | 315 | static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, |
306 | add_sched_event_sleep(struct task_desc *task, u64 timestamp, | 316 | u64 timestamp, u64 task_state __maybe_unused) |
307 | u64 task_state __used) | ||
308 | { | 317 | { |
309 | struct sched_atom *event = get_new_event(task, timestamp); | 318 | struct sched_atom *event = get_new_event(task, timestamp); |
310 | 319 | ||
311 | event->type = SCHED_EVENT_SLEEP; | 320 | event->type = SCHED_EVENT_SLEEP; |
312 | 321 | ||
313 | nr_sleep_events++; | 322 | sched->nr_sleep_events++; |
314 | } | 323 | } |
315 | 324 | ||
316 | static struct task_desc *register_pid(unsigned long pid, const char *comm) | 325 | static struct task_desc *register_pid(struct perf_sched *sched, |
326 | unsigned long pid, const char *comm) | ||
317 | { | 327 | { |
318 | struct task_desc *task; | 328 | struct task_desc *task; |
319 | 329 | ||
320 | BUG_ON(pid >= MAX_PID); | 330 | BUG_ON(pid >= MAX_PID); |
321 | 331 | ||
322 | task = pid_to_task[pid]; | 332 | task = sched->pid_to_task[pid]; |
323 | 333 | ||
324 | if (task) | 334 | if (task) |
325 | return task; | 335 | return task; |
326 | 336 | ||
327 | task = zalloc(sizeof(*task)); | 337 | task = zalloc(sizeof(*task)); |
328 | task->pid = pid; | 338 | task->pid = pid; |
329 | task->nr = nr_tasks; | 339 | task->nr = sched->nr_tasks; |
330 | strcpy(task->comm, comm); | 340 | strcpy(task->comm, comm); |
331 | /* | 341 | /* |
332 | * every task starts in sleeping state - this gets ignored | 342 | * every task starts in sleeping state - this gets ignored |
333 | * if there's no wakeup pointing to this sleep state: | 343 | * if there's no wakeup pointing to this sleep state: |
334 | */ | 344 | */ |
335 | add_sched_event_sleep(task, 0, 0); | 345 | add_sched_event_sleep(sched, task, 0, 0); |
336 | 346 | ||
337 | pid_to_task[pid] = task; | 347 | sched->pid_to_task[pid] = task; |
338 | nr_tasks++; | 348 | sched->nr_tasks++; |
339 | tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *)); | 349 | sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *)); |
340 | BUG_ON(!tasks); | 350 | BUG_ON(!sched->tasks); |
341 | tasks[task->nr] = task; | 351 | sched->tasks[task->nr] = task; |
342 | 352 | ||
343 | if (verbose) | 353 | if (verbose) |
344 | printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm); | 354 | printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); |
345 | 355 | ||
346 | return task; | 356 | return task; |
347 | } | 357 | } |
348 | 358 | ||
349 | 359 | ||
350 | static void print_task_traces(void) | 360 | static void print_task_traces(struct perf_sched *sched) |
351 | { | 361 | { |
352 | struct task_desc *task; | 362 | struct task_desc *task; |
353 | unsigned long i; | 363 | unsigned long i; |
354 | 364 | ||
355 | for (i = 0; i < nr_tasks; i++) { | 365 | for (i = 0; i < sched->nr_tasks; i++) { |
356 | task = tasks[i]; | 366 | task = sched->tasks[i]; |
357 | printf("task %6ld (%20s:%10ld), nr_events: %ld\n", | 367 | printf("task %6ld (%20s:%10ld), nr_events: %ld\n", |
358 | task->nr, task->comm, task->pid, task->nr_events); | 368 | task->nr, task->comm, task->pid, task->nr_events); |
359 | } | 369 | } |
360 | } | 370 | } |
361 | 371 | ||
362 | static void add_cross_task_wakeups(void) | 372 | static void add_cross_task_wakeups(struct perf_sched *sched) |
363 | { | 373 | { |
364 | struct task_desc *task1, *task2; | 374 | struct task_desc *task1, *task2; |
365 | unsigned long i, j; | 375 | unsigned long i, j; |
366 | 376 | ||
367 | for (i = 0; i < nr_tasks; i++) { | 377 | for (i = 0; i < sched->nr_tasks; i++) { |
368 | task1 = tasks[i]; | 378 | task1 = sched->tasks[i]; |
369 | j = i + 1; | 379 | j = i + 1; |
370 | if (j == nr_tasks) | 380 | if (j == sched->nr_tasks) |
371 | j = 0; | 381 | j = 0; |
372 | task2 = tasks[j]; | 382 | task2 = sched->tasks[j]; |
373 | add_sched_event_wakeup(task1, 0, task2); | 383 | add_sched_event_wakeup(sched, task1, 0, task2); |
374 | } | 384 | } |
375 | } | 385 | } |
376 | 386 | ||
377 | static void | 387 | static void perf_sched__process_event(struct perf_sched *sched, |
378 | process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) | 388 | struct sched_atom *atom) |
379 | { | 389 | { |
380 | int ret = 0; | 390 | int ret = 0; |
381 | 391 | ||
382 | switch (atom->type) { | 392 | switch (atom->type) { |
383 | case SCHED_EVENT_RUN: | 393 | case SCHED_EVENT_RUN: |
384 | burn_nsecs(atom->duration); | 394 | burn_nsecs(sched, atom->duration); |
385 | break; | 395 | break; |
386 | case SCHED_EVENT_SLEEP: | 396 | case SCHED_EVENT_SLEEP: |
387 | if (atom->wait_sem) | 397 | if (atom->wait_sem) |
@@ -428,8 +438,8 @@ static int self_open_counters(void) | |||
428 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | 438 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); |
429 | 439 | ||
430 | if (fd < 0) | 440 | if (fd < 0) |
431 | die("Error: sys_perf_event_open() syscall returned" | 441 | pr_err("Error: sys_perf_event_open() syscall returned " |
432 | "with %d (%s)\n", fd, strerror(errno)); | 442 | "with %d (%s)\n", fd, strerror(errno)); |
433 | return fd; | 443 | return fd; |
434 | } | 444 | } |
435 | 445 | ||
@@ -444,31 +454,41 @@ static u64 get_cpu_usage_nsec_self(int fd) | |||
444 | return runtime; | 454 | return runtime; |
445 | } | 455 | } |
446 | 456 | ||
457 | struct sched_thread_parms { | ||
458 | struct task_desc *task; | ||
459 | struct perf_sched *sched; | ||
460 | }; | ||
461 | |||
447 | static void *thread_func(void *ctx) | 462 | static void *thread_func(void *ctx) |
448 | { | 463 | { |
449 | struct task_desc *this_task = ctx; | 464 | struct sched_thread_parms *parms = ctx; |
465 | struct task_desc *this_task = parms->task; | ||
466 | struct perf_sched *sched = parms->sched; | ||
450 | u64 cpu_usage_0, cpu_usage_1; | 467 | u64 cpu_usage_0, cpu_usage_1; |
451 | unsigned long i, ret; | 468 | unsigned long i, ret; |
452 | char comm2[22]; | 469 | char comm2[22]; |
453 | int fd; | 470 | int fd; |
454 | 471 | ||
472 | free(parms); | ||
473 | |||
455 | sprintf(comm2, ":%s", this_task->comm); | 474 | sprintf(comm2, ":%s", this_task->comm); |
456 | prctl(PR_SET_NAME, comm2); | 475 | prctl(PR_SET_NAME, comm2); |
457 | fd = self_open_counters(); | 476 | fd = self_open_counters(); |
458 | 477 | if (fd < 0) | |
478 | return NULL; | ||
459 | again: | 479 | again: |
460 | ret = sem_post(&this_task->ready_for_work); | 480 | ret = sem_post(&this_task->ready_for_work); |
461 | BUG_ON(ret); | 481 | BUG_ON(ret); |
462 | ret = pthread_mutex_lock(&start_work_mutex); | 482 | ret = pthread_mutex_lock(&sched->start_work_mutex); |
463 | BUG_ON(ret); | 483 | BUG_ON(ret); |
464 | ret = pthread_mutex_unlock(&start_work_mutex); | 484 | ret = pthread_mutex_unlock(&sched->start_work_mutex); |
465 | BUG_ON(ret); | 485 | BUG_ON(ret); |
466 | 486 | ||
467 | cpu_usage_0 = get_cpu_usage_nsec_self(fd); | 487 | cpu_usage_0 = get_cpu_usage_nsec_self(fd); |
468 | 488 | ||
469 | for (i = 0; i < this_task->nr_events; i++) { | 489 | for (i = 0; i < this_task->nr_events; i++) { |
470 | this_task->curr_event = i; | 490 | this_task->curr_event = i; |
471 | process_sched_event(this_task, this_task->atoms[i]); | 491 | perf_sched__process_event(sched, this_task->atoms[i]); |
472 | } | 492 | } |
473 | 493 | ||
474 | cpu_usage_1 = get_cpu_usage_nsec_self(fd); | 494 | cpu_usage_1 = get_cpu_usage_nsec_self(fd); |
@@ -476,15 +496,15 @@ again: | |||
476 | ret = sem_post(&this_task->work_done_sem); | 496 | ret = sem_post(&this_task->work_done_sem); |
477 | BUG_ON(ret); | 497 | BUG_ON(ret); |
478 | 498 | ||
479 | ret = pthread_mutex_lock(&work_done_wait_mutex); | 499 | ret = pthread_mutex_lock(&sched->work_done_wait_mutex); |
480 | BUG_ON(ret); | 500 | BUG_ON(ret); |
481 | ret = pthread_mutex_unlock(&work_done_wait_mutex); | 501 | ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); |
482 | BUG_ON(ret); | 502 | BUG_ON(ret); |
483 | 503 | ||
484 | goto again; | 504 | goto again; |
485 | } | 505 | } |
486 | 506 | ||
487 | static void create_tasks(void) | 507 | static void create_tasks(struct perf_sched *sched) |
488 | { | 508 | { |
489 | struct task_desc *task; | 509 | struct task_desc *task; |
490 | pthread_attr_t attr; | 510 | pthread_attr_t attr; |
@@ -496,128 +516,129 @@ static void create_tasks(void) | |||
496 | err = pthread_attr_setstacksize(&attr, | 516 | err = pthread_attr_setstacksize(&attr, |
497 | (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); | 517 | (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); |
498 | BUG_ON(err); | 518 | BUG_ON(err); |
499 | err = pthread_mutex_lock(&start_work_mutex); | 519 | err = pthread_mutex_lock(&sched->start_work_mutex); |
500 | BUG_ON(err); | 520 | BUG_ON(err); |
501 | err = pthread_mutex_lock(&work_done_wait_mutex); | 521 | err = pthread_mutex_lock(&sched->work_done_wait_mutex); |
502 | BUG_ON(err); | 522 | BUG_ON(err); |
503 | for (i = 0; i < nr_tasks; i++) { | 523 | for (i = 0; i < sched->nr_tasks; i++) { |
504 | task = tasks[i]; | 524 | struct sched_thread_parms *parms = malloc(sizeof(*parms)); |
525 | BUG_ON(parms == NULL); | ||
526 | parms->task = task = sched->tasks[i]; | ||
527 | parms->sched = sched; | ||
505 | sem_init(&task->sleep_sem, 0, 0); | 528 | sem_init(&task->sleep_sem, 0, 0); |
506 | sem_init(&task->ready_for_work, 0, 0); | 529 | sem_init(&task->ready_for_work, 0, 0); |
507 | sem_init(&task->work_done_sem, 0, 0); | 530 | sem_init(&task->work_done_sem, 0, 0); |
508 | task->curr_event = 0; | 531 | task->curr_event = 0; |
509 | err = pthread_create(&task->thread, &attr, thread_func, task); | 532 | err = pthread_create(&task->thread, &attr, thread_func, parms); |
510 | BUG_ON(err); | 533 | BUG_ON(err); |
511 | } | 534 | } |
512 | } | 535 | } |
513 | 536 | ||
514 | static void wait_for_tasks(void) | 537 | static void wait_for_tasks(struct perf_sched *sched) |
515 | { | 538 | { |
516 | u64 cpu_usage_0, cpu_usage_1; | 539 | u64 cpu_usage_0, cpu_usage_1; |
517 | struct task_desc *task; | 540 | struct task_desc *task; |
518 | unsigned long i, ret; | 541 | unsigned long i, ret; |
519 | 542 | ||
520 | start_time = get_nsecs(); | 543 | sched->start_time = get_nsecs(); |
521 | cpu_usage = 0; | 544 | sched->cpu_usage = 0; |
522 | pthread_mutex_unlock(&work_done_wait_mutex); | 545 | pthread_mutex_unlock(&sched->work_done_wait_mutex); |
523 | 546 | ||
524 | for (i = 0; i < nr_tasks; i++) { | 547 | for (i = 0; i < sched->nr_tasks; i++) { |
525 | task = tasks[i]; | 548 | task = sched->tasks[i]; |
526 | ret = sem_wait(&task->ready_for_work); | 549 | ret = sem_wait(&task->ready_for_work); |
527 | BUG_ON(ret); | 550 | BUG_ON(ret); |
528 | sem_init(&task->ready_for_work, 0, 0); | 551 | sem_init(&task->ready_for_work, 0, 0); |
529 | } | 552 | } |
530 | ret = pthread_mutex_lock(&work_done_wait_mutex); | 553 | ret = pthread_mutex_lock(&sched->work_done_wait_mutex); |
531 | BUG_ON(ret); | 554 | BUG_ON(ret); |
532 | 555 | ||
533 | cpu_usage_0 = get_cpu_usage_nsec_parent(); | 556 | cpu_usage_0 = get_cpu_usage_nsec_parent(); |
534 | 557 | ||
535 | pthread_mutex_unlock(&start_work_mutex); | 558 | pthread_mutex_unlock(&sched->start_work_mutex); |
536 | 559 | ||
537 | for (i = 0; i < nr_tasks; i++) { | 560 | for (i = 0; i < sched->nr_tasks; i++) { |
538 | task = tasks[i]; | 561 | task = sched->tasks[i]; |
539 | ret = sem_wait(&task->work_done_sem); | 562 | ret = sem_wait(&task->work_done_sem); |
540 | BUG_ON(ret); | 563 | BUG_ON(ret); |
541 | sem_init(&task->work_done_sem, 0, 0); | 564 | sem_init(&task->work_done_sem, 0, 0); |
542 | cpu_usage += task->cpu_usage; | 565 | sched->cpu_usage += task->cpu_usage; |
543 | task->cpu_usage = 0; | 566 | task->cpu_usage = 0; |
544 | } | 567 | } |
545 | 568 | ||
546 | cpu_usage_1 = get_cpu_usage_nsec_parent(); | 569 | cpu_usage_1 = get_cpu_usage_nsec_parent(); |
547 | if (!runavg_cpu_usage) | 570 | if (!sched->runavg_cpu_usage) |
548 | runavg_cpu_usage = cpu_usage; | 571 | sched->runavg_cpu_usage = sched->cpu_usage; |
549 | runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10; | 572 | sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10; |
550 | 573 | ||
551 | parent_cpu_usage = cpu_usage_1 - cpu_usage_0; | 574 | sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; |
552 | if (!runavg_parent_cpu_usage) | 575 | if (!sched->runavg_parent_cpu_usage) |
553 | runavg_parent_cpu_usage = parent_cpu_usage; | 576 | sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; |
554 | runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 + | 577 | sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 + |
555 | parent_cpu_usage)/10; | 578 | sched->parent_cpu_usage)/10; |
556 | 579 | ||
557 | ret = pthread_mutex_lock(&start_work_mutex); | 580 | ret = pthread_mutex_lock(&sched->start_work_mutex); |
558 | BUG_ON(ret); | 581 | BUG_ON(ret); |
559 | 582 | ||
560 | for (i = 0; i < nr_tasks; i++) { | 583 | for (i = 0; i < sched->nr_tasks; i++) { |
561 | task = tasks[i]; | 584 | task = sched->tasks[i]; |
562 | sem_init(&task->sleep_sem, 0, 0); | 585 | sem_init(&task->sleep_sem, 0, 0); |
563 | task->curr_event = 0; | 586 | task->curr_event = 0; |
564 | } | 587 | } |
565 | } | 588 | } |
566 | 589 | ||
567 | static void run_one_test(void) | 590 | static void run_one_test(struct perf_sched *sched) |
568 | { | 591 | { |
569 | u64 T0, T1, delta, avg_delta, fluct; | 592 | u64 T0, T1, delta, avg_delta, fluct; |
570 | 593 | ||
571 | T0 = get_nsecs(); | 594 | T0 = get_nsecs(); |
572 | wait_for_tasks(); | 595 | wait_for_tasks(sched); |
573 | T1 = get_nsecs(); | 596 | T1 = get_nsecs(); |
574 | 597 | ||
575 | delta = T1 - T0; | 598 | delta = T1 - T0; |
576 | sum_runtime += delta; | 599 | sched->sum_runtime += delta; |
577 | nr_runs++; | 600 | sched->nr_runs++; |
578 | 601 | ||
579 | avg_delta = sum_runtime / nr_runs; | 602 | avg_delta = sched->sum_runtime / sched->nr_runs; |
580 | if (delta < avg_delta) | 603 | if (delta < avg_delta) |
581 | fluct = avg_delta - delta; | 604 | fluct = avg_delta - delta; |
582 | else | 605 | else |
583 | fluct = delta - avg_delta; | 606 | fluct = delta - avg_delta; |
584 | sum_fluct += fluct; | 607 | sched->sum_fluct += fluct; |
585 | if (!run_avg) | 608 | if (!sched->run_avg) |
586 | run_avg = delta; | 609 | sched->run_avg = delta; |
587 | run_avg = (run_avg*9 + delta)/10; | 610 | sched->run_avg = (sched->run_avg * 9 + delta) / 10; |
588 | 611 | ||
589 | printf("#%-3ld: %0.3f, ", | 612 | printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0); |
590 | nr_runs, (double)delta/1000000.0); | ||
591 | 613 | ||
592 | printf("ravg: %0.2f, ", | 614 | printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6); |
593 | (double)run_avg/1e6); | ||
594 | 615 | ||
595 | printf("cpu: %0.2f / %0.2f", | 616 | printf("cpu: %0.2f / %0.2f", |
596 | (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6); | 617 | (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6); |
597 | 618 | ||
598 | #if 0 | 619 | #if 0 |
599 | /* | 620 | /* |
600 | * rusage statistics done by the parent, these are less | 621 | * rusage statistics done by the parent, these are less |
601 | * accurate than the sum_exec_runtime based statistics: | 622 | * accurate than the sched->sum_exec_runtime based statistics: |
602 | */ | 623 | */ |
603 | printf(" [%0.2f / %0.2f]", | 624 | printf(" [%0.2f / %0.2f]", |
604 | (double)parent_cpu_usage/1e6, | 625 | (double)sched->parent_cpu_usage/1e6, |
605 | (double)runavg_parent_cpu_usage/1e6); | 626 | (double)sched->runavg_parent_cpu_usage/1e6); |
606 | #endif | 627 | #endif |
607 | 628 | ||
608 | printf("\n"); | 629 | printf("\n"); |
609 | 630 | ||
610 | if (nr_sleep_corrections) | 631 | if (sched->nr_sleep_corrections) |
611 | printf(" (%ld sleep corrections)\n", nr_sleep_corrections); | 632 | printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); |
612 | nr_sleep_corrections = 0; | 633 | sched->nr_sleep_corrections = 0; |
613 | } | 634 | } |
614 | 635 | ||
615 | static void test_calibrations(void) | 636 | static void test_calibrations(struct perf_sched *sched) |
616 | { | 637 | { |
617 | u64 T0, T1; | 638 | u64 T0, T1; |
618 | 639 | ||
619 | T0 = get_nsecs(); | 640 | T0 = get_nsecs(); |
620 | burn_nsecs(1e6); | 641 | burn_nsecs(sched, 1e6); |
621 | T1 = get_nsecs(); | 642 | T1 = get_nsecs(); |
622 | 643 | ||
623 | printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); | 644 | printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); |
@@ -629,236 +650,92 @@ static void test_calibrations(void) | |||
629 | printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); | 650 | printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); |
630 | } | 651 | } |
631 | 652 | ||
632 | #define FILL_FIELD(ptr, field, event, data) \ | 653 | static int |
633 | ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data) | 654 | replay_wakeup_event(struct perf_sched *sched, |
634 | 655 | struct perf_evsel *evsel, struct perf_sample *sample, | |
635 | #define FILL_ARRAY(ptr, array, event, data) \ | 656 | struct machine *machine __maybe_unused) |
636 | do { \ | ||
637 | void *__array = raw_field_ptr(event, #array, data); \ | ||
638 | memcpy(ptr.array, __array, sizeof(ptr.array)); \ | ||
639 | } while(0) | ||
640 | |||
641 | #define FILL_COMMON_FIELDS(ptr, event, data) \ | ||
642 | do { \ | ||
643 | FILL_FIELD(ptr, common_type, event, data); \ | ||
644 | FILL_FIELD(ptr, common_flags, event, data); \ | ||
645 | FILL_FIELD(ptr, common_preempt_count, event, data); \ | ||
646 | FILL_FIELD(ptr, common_pid, event, data); \ | ||
647 | FILL_FIELD(ptr, common_tgid, event, data); \ | ||
648 | } while (0) | ||
649 | |||
650 | |||
651 | |||
652 | struct trace_switch_event { | ||
653 | u32 size; | ||
654 | |||
655 | u16 common_type; | ||
656 | u8 common_flags; | ||
657 | u8 common_preempt_count; | ||
658 | u32 common_pid; | ||
659 | u32 common_tgid; | ||
660 | |||
661 | char prev_comm[16]; | ||
662 | u32 prev_pid; | ||
663 | u32 prev_prio; | ||
664 | u64 prev_state; | ||
665 | char next_comm[16]; | ||
666 | u32 next_pid; | ||
667 | u32 next_prio; | ||
668 | }; | ||
669 | |||
670 | struct trace_runtime_event { | ||
671 | u32 size; | ||
672 | |||
673 | u16 common_type; | ||
674 | u8 common_flags; | ||
675 | u8 common_preempt_count; | ||
676 | u32 common_pid; | ||
677 | u32 common_tgid; | ||
678 | |||
679 | char comm[16]; | ||
680 | u32 pid; | ||
681 | u64 runtime; | ||
682 | u64 vruntime; | ||
683 | }; | ||
684 | |||
685 | struct trace_wakeup_event { | ||
686 | u32 size; | ||
687 | |||
688 | u16 common_type; | ||
689 | u8 common_flags; | ||
690 | u8 common_preempt_count; | ||
691 | u32 common_pid; | ||
692 | u32 common_tgid; | ||
693 | |||
694 | char comm[16]; | ||
695 | u32 pid; | ||
696 | |||
697 | u32 prio; | ||
698 | u32 success; | ||
699 | u32 cpu; | ||
700 | }; | ||
701 | |||
702 | struct trace_fork_event { | ||
703 | u32 size; | ||
704 | |||
705 | u16 common_type; | ||
706 | u8 common_flags; | ||
707 | u8 common_preempt_count; | ||
708 | u32 common_pid; | ||
709 | u32 common_tgid; | ||
710 | |||
711 | char parent_comm[16]; | ||
712 | u32 parent_pid; | ||
713 | char child_comm[16]; | ||
714 | u32 child_pid; | ||
715 | }; | ||
716 | |||
717 | struct trace_migrate_task_event { | ||
718 | u32 size; | ||
719 | |||
720 | u16 common_type; | ||
721 | u8 common_flags; | ||
722 | u8 common_preempt_count; | ||
723 | u32 common_pid; | ||
724 | u32 common_tgid; | ||
725 | |||
726 | char comm[16]; | ||
727 | u32 pid; | ||
728 | |||
729 | u32 prio; | ||
730 | u32 cpu; | ||
731 | }; | ||
732 | |||
733 | struct trace_sched_handler { | ||
734 | void (*switch_event)(struct trace_switch_event *, | ||
735 | struct machine *, | ||
736 | struct event_format *, | ||
737 | int cpu, | ||
738 | u64 timestamp, | ||
739 | struct thread *thread); | ||
740 | |||
741 | void (*runtime_event)(struct trace_runtime_event *, | ||
742 | struct machine *, | ||
743 | struct event_format *, | ||
744 | int cpu, | ||
745 | u64 timestamp, | ||
746 | struct thread *thread); | ||
747 | |||
748 | void (*wakeup_event)(struct trace_wakeup_event *, | ||
749 | struct machine *, | ||
750 | struct event_format *, | ||
751 | int cpu, | ||
752 | u64 timestamp, | ||
753 | struct thread *thread); | ||
754 | |||
755 | void (*fork_event)(struct trace_fork_event *, | ||
756 | struct event_format *, | ||
757 | int cpu, | ||
758 | u64 timestamp, | ||
759 | struct thread *thread); | ||
760 | |||
761 | void (*migrate_task_event)(struct trace_migrate_task_event *, | ||
762 | struct machine *machine, | ||
763 | struct event_format *, | ||
764 | int cpu, | ||
765 | u64 timestamp, | ||
766 | struct thread *thread); | ||
767 | }; | ||
768 | |||
769 | |||
770 | static void | ||
771 | replay_wakeup_event(struct trace_wakeup_event *wakeup_event, | ||
772 | struct machine *machine __used, | ||
773 | struct event_format *event, | ||
774 | int cpu __used, | ||
775 | u64 timestamp __used, | ||
776 | struct thread *thread __used) | ||
777 | { | 657 | { |
658 | const char *comm = perf_evsel__strval(evsel, sample, "comm"); | ||
659 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); | ||
778 | struct task_desc *waker, *wakee; | 660 | struct task_desc *waker, *wakee; |
779 | 661 | ||
780 | if (verbose) { | 662 | if (verbose) { |
781 | printf("sched_wakeup event %p\n", event); | 663 | printf("sched_wakeup event %p\n", evsel); |
782 | 664 | ||
783 | printf(" ... pid %d woke up %s/%d\n", | 665 | printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); |
784 | wakeup_event->common_pid, | ||
785 | wakeup_event->comm, | ||
786 | wakeup_event->pid); | ||
787 | } | 666 | } |
788 | 667 | ||
789 | waker = register_pid(wakeup_event->common_pid, "<unknown>"); | 668 | waker = register_pid(sched, sample->tid, "<unknown>"); |
790 | wakee = register_pid(wakeup_event->pid, wakeup_event->comm); | 669 | wakee = register_pid(sched, pid, comm); |
791 | 670 | ||
792 | add_sched_event_wakeup(waker, timestamp, wakee); | 671 | add_sched_event_wakeup(sched, waker, sample->time, wakee); |
672 | return 0; | ||
793 | } | 673 | } |
794 | 674 | ||
795 | static u64 cpu_last_switched[MAX_CPUS]; | 675 | static int replay_switch_event(struct perf_sched *sched, |
796 | 676 | struct perf_evsel *evsel, | |
797 | static void | 677 | struct perf_sample *sample, |
798 | replay_switch_event(struct trace_switch_event *switch_event, | 678 | struct machine *machine __maybe_unused) |
799 | struct machine *machine __used, | ||
800 | struct event_format *event, | ||
801 | int cpu, | ||
802 | u64 timestamp, | ||
803 | struct thread *thread __used) | ||
804 | { | 679 | { |
805 | struct task_desc *prev, __used *next; | 680 | const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), |
806 | u64 timestamp0; | 681 | *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); |
682 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), | ||
683 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
684 | const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); | ||
685 | struct task_desc *prev, __maybe_unused *next; | ||
686 | u64 timestamp0, timestamp = sample->time; | ||
687 | int cpu = sample->cpu; | ||
807 | s64 delta; | 688 | s64 delta; |
808 | 689 | ||
809 | if (verbose) | 690 | if (verbose) |
810 | printf("sched_switch event %p\n", event); | 691 | printf("sched_switch event %p\n", evsel); |
811 | 692 | ||
812 | if (cpu >= MAX_CPUS || cpu < 0) | 693 | if (cpu >= MAX_CPUS || cpu < 0) |
813 | return; | 694 | return 0; |
814 | 695 | ||
815 | timestamp0 = cpu_last_switched[cpu]; | 696 | timestamp0 = sched->cpu_last_switched[cpu]; |
816 | if (timestamp0) | 697 | if (timestamp0) |
817 | delta = timestamp - timestamp0; | 698 | delta = timestamp - timestamp0; |
818 | else | 699 | else |
819 | delta = 0; | 700 | delta = 0; |
820 | 701 | ||
821 | if (delta < 0) | 702 | if (delta < 0) { |
822 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 703 | pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
823 | 704 | return -1; | |
824 | if (verbose) { | ||
825 | printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", | ||
826 | switch_event->prev_comm, switch_event->prev_pid, | ||
827 | switch_event->next_comm, switch_event->next_pid, | ||
828 | delta); | ||
829 | } | 705 | } |
830 | 706 | ||
831 | prev = register_pid(switch_event->prev_pid, switch_event->prev_comm); | 707 | pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", |
832 | next = register_pid(switch_event->next_pid, switch_event->next_comm); | 708 | prev_comm, prev_pid, next_comm, next_pid, delta); |
833 | 709 | ||
834 | cpu_last_switched[cpu] = timestamp; | 710 | prev = register_pid(sched, prev_pid, prev_comm); |
711 | next = register_pid(sched, next_pid, next_comm); | ||
835 | 712 | ||
836 | add_sched_event_run(prev, timestamp, delta); | 713 | sched->cpu_last_switched[cpu] = timestamp; |
837 | add_sched_event_sleep(prev, timestamp, switch_event->prev_state); | ||
838 | } | ||
839 | 714 | ||
715 | add_sched_event_run(sched, prev, timestamp, delta); | ||
716 | add_sched_event_sleep(sched, prev, timestamp, prev_state); | ||
840 | 717 | ||
841 | static void | 718 | return 0; |
842 | replay_fork_event(struct trace_fork_event *fork_event, | 719 | } |
843 | struct event_format *event, | 720 | |
844 | int cpu __used, | 721 | static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel, |
845 | u64 timestamp __used, | 722 | struct perf_sample *sample) |
846 | struct thread *thread __used) | ||
847 | { | 723 | { |
724 | const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), | ||
725 | *child_comm = perf_evsel__strval(evsel, sample, "child_comm"); | ||
726 | const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), | ||
727 | child_pid = perf_evsel__intval(evsel, sample, "child_pid"); | ||
728 | |||
848 | if (verbose) { | 729 | if (verbose) { |
849 | printf("sched_fork event %p\n", event); | 730 | printf("sched_fork event %p\n", evsel); |
850 | printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); | 731 | printf("... parent: %s/%d\n", parent_comm, parent_pid); |
851 | printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); | 732 | printf("... child: %s/%d\n", child_comm, child_pid); |
852 | } | 733 | } |
853 | register_pid(fork_event->parent_pid, fork_event->parent_comm); | ||
854 | register_pid(fork_event->child_pid, fork_event->child_comm); | ||
855 | } | ||
856 | 734 | ||
857 | static struct trace_sched_handler replay_ops = { | 735 | register_pid(sched, parent_pid, parent_comm); |
858 | .wakeup_event = replay_wakeup_event, | 736 | register_pid(sched, child_pid, child_comm); |
859 | .switch_event = replay_switch_event, | 737 | return 0; |
860 | .fork_event = replay_fork_event, | 738 | } |
861 | }; | ||
862 | 739 | ||
863 | struct sort_dimension { | 740 | struct sort_dimension { |
864 | const char *name; | 741 | const char *name; |
@@ -866,8 +743,6 @@ struct sort_dimension { | |||
866 | struct list_head list; | 743 | struct list_head list; |
867 | }; | 744 | }; |
868 | 745 | ||
869 | static LIST_HEAD(cmp_pid); | ||
870 | |||
871 | static int | 746 | static int |
872 | thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) | 747 | thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) |
873 | { | 748 | { |
@@ -936,43 +811,45 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data, | |||
936 | rb_insert_color(&data->node, root); | 811 | rb_insert_color(&data->node, root); |
937 | } | 812 | } |
938 | 813 | ||
939 | static void thread_atoms_insert(struct thread *thread) | 814 | static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) |
940 | { | 815 | { |
941 | struct work_atoms *atoms = zalloc(sizeof(*atoms)); | 816 | struct work_atoms *atoms = zalloc(sizeof(*atoms)); |
942 | if (!atoms) | 817 | if (!atoms) { |
943 | die("No memory"); | 818 | pr_err("No memory at %s\n", __func__); |
819 | return -1; | ||
820 | } | ||
944 | 821 | ||
945 | atoms->thread = thread; | 822 | atoms->thread = thread; |
946 | INIT_LIST_HEAD(&atoms->work_list); | 823 | INIT_LIST_HEAD(&atoms->work_list); |
947 | __thread_latency_insert(&atom_root, atoms, &cmp_pid); | 824 | __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); |
825 | return 0; | ||
948 | } | 826 | } |
949 | 827 | ||
950 | static void | 828 | static int latency_fork_event(struct perf_sched *sched __maybe_unused, |
951 | latency_fork_event(struct trace_fork_event *fork_event __used, | 829 | struct perf_evsel *evsel __maybe_unused, |
952 | struct event_format *event __used, | 830 | struct perf_sample *sample __maybe_unused) |
953 | int cpu __used, | ||
954 | u64 timestamp __used, | ||
955 | struct thread *thread __used) | ||
956 | { | 831 | { |
957 | /* should insert the newcomer */ | 832 | /* should insert the newcomer */ |
833 | return 0; | ||
958 | } | 834 | } |
959 | 835 | ||
960 | __used | 836 | static char sched_out_state(u64 prev_state) |
961 | static char sched_out_state(struct trace_switch_event *switch_event) | ||
962 | { | 837 | { |
963 | const char *str = TASK_STATE_TO_CHAR_STR; | 838 | const char *str = TASK_STATE_TO_CHAR_STR; |
964 | 839 | ||
965 | return str[switch_event->prev_state]; | 840 | return str[prev_state]; |
966 | } | 841 | } |
967 | 842 | ||
968 | static void | 843 | static int |
969 | add_sched_out_event(struct work_atoms *atoms, | 844 | add_sched_out_event(struct work_atoms *atoms, |
970 | char run_state, | 845 | char run_state, |
971 | u64 timestamp) | 846 | u64 timestamp) |
972 | { | 847 | { |
973 | struct work_atom *atom = zalloc(sizeof(*atom)); | 848 | struct work_atom *atom = zalloc(sizeof(*atom)); |
974 | if (!atom) | 849 | if (!atom) { |
975 | die("Non memory"); | 850 | pr_err("Non memory at %s", __func__); |
851 | return -1; | ||
852 | } | ||
976 | 853 | ||
977 | atom->sched_out_time = timestamp; | 854 | atom->sched_out_time = timestamp; |
978 | 855 | ||
@@ -982,10 +859,12 @@ add_sched_out_event(struct work_atoms *atoms, | |||
982 | } | 859 | } |
983 | 860 | ||
984 | list_add_tail(&atom->list, &atoms->work_list); | 861 | list_add_tail(&atom->list, &atoms->work_list); |
862 | return 0; | ||
985 | } | 863 | } |
986 | 864 | ||
987 | static void | 865 | static void |
988 | add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used) | 866 | add_runtime_event(struct work_atoms *atoms, u64 delta, |
867 | u64 timestamp __maybe_unused) | ||
989 | { | 868 | { |
990 | struct work_atom *atom; | 869 | struct work_atom *atom; |
991 | 870 | ||
@@ -1028,106 +907,128 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) | |||
1028 | atoms->nb_atoms++; | 907 | atoms->nb_atoms++; |
1029 | } | 908 | } |
1030 | 909 | ||
1031 | static void | 910 | static int latency_switch_event(struct perf_sched *sched, |
1032 | latency_switch_event(struct trace_switch_event *switch_event, | 911 | struct perf_evsel *evsel, |
1033 | struct machine *machine, | 912 | struct perf_sample *sample, |
1034 | struct event_format *event __used, | 913 | struct machine *machine) |
1035 | int cpu, | ||
1036 | u64 timestamp, | ||
1037 | struct thread *thread __used) | ||
1038 | { | 914 | { |
915 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), | ||
916 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
917 | const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); | ||
1039 | struct work_atoms *out_events, *in_events; | 918 | struct work_atoms *out_events, *in_events; |
1040 | struct thread *sched_out, *sched_in; | 919 | struct thread *sched_out, *sched_in; |
1041 | u64 timestamp0; | 920 | u64 timestamp0, timestamp = sample->time; |
921 | int cpu = sample->cpu; | ||
1042 | s64 delta; | 922 | s64 delta; |
1043 | 923 | ||
1044 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 924 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
1045 | 925 | ||
1046 | timestamp0 = cpu_last_switched[cpu]; | 926 | timestamp0 = sched->cpu_last_switched[cpu]; |
1047 | cpu_last_switched[cpu] = timestamp; | 927 | sched->cpu_last_switched[cpu] = timestamp; |
1048 | if (timestamp0) | 928 | if (timestamp0) |
1049 | delta = timestamp - timestamp0; | 929 | delta = timestamp - timestamp0; |
1050 | else | 930 | else |
1051 | delta = 0; | 931 | delta = 0; |
1052 | 932 | ||
1053 | if (delta < 0) | 933 | if (delta < 0) { |
1054 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 934 | pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
1055 | 935 | return -1; | |
936 | } | ||
1056 | 937 | ||
1057 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); | 938 | sched_out = machine__findnew_thread(machine, prev_pid); |
1058 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); | 939 | sched_in = machine__findnew_thread(machine, next_pid); |
1059 | 940 | ||
1060 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 941 | out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); |
1061 | if (!out_events) { | 942 | if (!out_events) { |
1062 | thread_atoms_insert(sched_out); | 943 | if (thread_atoms_insert(sched, sched_out)) |
1063 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 944 | return -1; |
1064 | if (!out_events) | 945 | out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); |
1065 | die("out-event: Internal tree error"); | 946 | if (!out_events) { |
947 | pr_err("out-event: Internal tree error"); | ||
948 | return -1; | ||
949 | } | ||
1066 | } | 950 | } |
1067 | add_sched_out_event(out_events, sched_out_state(switch_event), timestamp); | 951 | if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) |
952 | return -1; | ||
1068 | 953 | ||
1069 | in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); | 954 | in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); |
1070 | if (!in_events) { | 955 | if (!in_events) { |
1071 | thread_atoms_insert(sched_in); | 956 | if (thread_atoms_insert(sched, sched_in)) |
1072 | in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); | 957 | return -1; |
1073 | if (!in_events) | 958 | in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); |
1074 | die("in-event: Internal tree error"); | 959 | if (!in_events) { |
960 | pr_err("in-event: Internal tree error"); | ||
961 | return -1; | ||
962 | } | ||
1075 | /* | 963 | /* |
1076 | * Take came in we have not heard about yet, | 964 | * Take came in we have not heard about yet, |
1077 | * add in an initial atom in runnable state: | 965 | * add in an initial atom in runnable state: |
1078 | */ | 966 | */ |
1079 | add_sched_out_event(in_events, 'R', timestamp); | 967 | if (add_sched_out_event(in_events, 'R', timestamp)) |
968 | return -1; | ||
1080 | } | 969 | } |
1081 | add_sched_in_event(in_events, timestamp); | 970 | add_sched_in_event(in_events, timestamp); |
971 | |||
972 | return 0; | ||
1082 | } | 973 | } |
1083 | 974 | ||
1084 | static void | 975 | static int latency_runtime_event(struct perf_sched *sched, |
1085 | latency_runtime_event(struct trace_runtime_event *runtime_event, | 976 | struct perf_evsel *evsel, |
1086 | struct machine *machine, | 977 | struct perf_sample *sample, |
1087 | struct event_format *event __used, | 978 | struct machine *machine) |
1088 | int cpu, | ||
1089 | u64 timestamp, | ||
1090 | struct thread *this_thread __used) | ||
1091 | { | 979 | { |
1092 | struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); | 980 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); |
1093 | struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | 981 | const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); |
982 | struct thread *thread = machine__findnew_thread(machine, pid); | ||
983 | struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); | ||
984 | u64 timestamp = sample->time; | ||
985 | int cpu = sample->cpu; | ||
1094 | 986 | ||
1095 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); | 987 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
1096 | if (!atoms) { | 988 | if (!atoms) { |
1097 | thread_atoms_insert(thread); | 989 | if (thread_atoms_insert(sched, thread)) |
1098 | atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); | 990 | return -1; |
1099 | if (!atoms) | 991 | atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); |
1100 | die("in-event: Internal tree error"); | 992 | if (!atoms) { |
1101 | add_sched_out_event(atoms, 'R', timestamp); | 993 | pr_err("in-event: Internal tree error"); |
994 | return -1; | ||
995 | } | ||
996 | if (add_sched_out_event(atoms, 'R', timestamp)) | ||
997 | return -1; | ||
1102 | } | 998 | } |
1103 | 999 | ||
1104 | add_runtime_event(atoms, runtime_event->runtime, timestamp); | 1000 | add_runtime_event(atoms, runtime, timestamp); |
1001 | return 0; | ||
1105 | } | 1002 | } |
1106 | 1003 | ||
1107 | static void | 1004 | static int latency_wakeup_event(struct perf_sched *sched, |
1108 | latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | 1005 | struct perf_evsel *evsel, |
1109 | struct machine *machine, | 1006 | struct perf_sample *sample, |
1110 | struct event_format *__event __used, | 1007 | struct machine *machine) |
1111 | int cpu __used, | ||
1112 | u64 timestamp, | ||
1113 | struct thread *thread __used) | ||
1114 | { | 1008 | { |
1009 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"), | ||
1010 | success = perf_evsel__intval(evsel, sample, "success"); | ||
1115 | struct work_atoms *atoms; | 1011 | struct work_atoms *atoms; |
1116 | struct work_atom *atom; | 1012 | struct work_atom *atom; |
1117 | struct thread *wakee; | 1013 | struct thread *wakee; |
1014 | u64 timestamp = sample->time; | ||
1118 | 1015 | ||
1119 | /* Note for later, it may be interesting to observe the failing cases */ | 1016 | /* Note for later, it may be interesting to observe the failing cases */ |
1120 | if (!wakeup_event->success) | 1017 | if (!success) |
1121 | return; | 1018 | return 0; |
1122 | 1019 | ||
1123 | wakee = machine__findnew_thread(machine, wakeup_event->pid); | 1020 | wakee = machine__findnew_thread(machine, pid); |
1124 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1021 | atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); |
1125 | if (!atoms) { | 1022 | if (!atoms) { |
1126 | thread_atoms_insert(wakee); | 1023 | if (thread_atoms_insert(sched, wakee)) |
1127 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1024 | return -1; |
1128 | if (!atoms) | 1025 | atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); |
1129 | die("wakeup-event: Internal tree error"); | 1026 | if (!atoms) { |
1130 | add_sched_out_event(atoms, 'S', timestamp); | 1027 | pr_err("wakeup-event: Internal tree error"); |
1028 | return -1; | ||
1029 | } | ||
1030 | if (add_sched_out_event(atoms, 'S', timestamp)) | ||
1031 | return -1; | ||
1131 | } | 1032 | } |
1132 | 1033 | ||
1133 | BUG_ON(list_empty(&atoms->work_list)); | 1034 | BUG_ON(list_empty(&atoms->work_list)); |
@@ -1139,27 +1040,27 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1139 | * one CPU, or are only looking at only one, so don't | 1040 | * one CPU, or are only looking at only one, so don't |
1140 | * make useless noise. | 1041 | * make useless noise. |
1141 | */ | 1042 | */ |
1142 | if (profile_cpu == -1 && atom->state != THREAD_SLEEPING) | 1043 | if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) |
1143 | nr_state_machine_bugs++; | 1044 | sched->nr_state_machine_bugs++; |
1144 | 1045 | ||
1145 | nr_timestamps++; | 1046 | sched->nr_timestamps++; |
1146 | if (atom->sched_out_time > timestamp) { | 1047 | if (atom->sched_out_time > timestamp) { |
1147 | nr_unordered_timestamps++; | 1048 | sched->nr_unordered_timestamps++; |
1148 | return; | 1049 | return 0; |
1149 | } | 1050 | } |
1150 | 1051 | ||
1151 | atom->state = THREAD_WAIT_CPU; | 1052 | atom->state = THREAD_WAIT_CPU; |
1152 | atom->wake_up_time = timestamp; | 1053 | atom->wake_up_time = timestamp; |
1054 | return 0; | ||
1153 | } | 1055 | } |
1154 | 1056 | ||
1155 | static void | 1057 | static int latency_migrate_task_event(struct perf_sched *sched, |
1156 | latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | 1058 | struct perf_evsel *evsel, |
1157 | struct machine *machine, | 1059 | struct perf_sample *sample, |
1158 | struct event_format *__event __used, | 1060 | struct machine *machine) |
1159 | int cpu __used, | ||
1160 | u64 timestamp, | ||
1161 | struct thread *thread __used) | ||
1162 | { | 1061 | { |
1062 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); | ||
1063 | u64 timestamp = sample->time; | ||
1163 | struct work_atoms *atoms; | 1064 | struct work_atoms *atoms; |
1164 | struct work_atom *atom; | 1065 | struct work_atom *atom; |
1165 | struct thread *migrant; | 1066 | struct thread *migrant; |
@@ -1167,18 +1068,22 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | |||
1167 | /* | 1068 | /* |
1168 | * Only need to worry about migration when profiling one CPU. | 1069 | * Only need to worry about migration when profiling one CPU. |
1169 | */ | 1070 | */ |
1170 | if (profile_cpu == -1) | 1071 | if (sched->profile_cpu == -1) |
1171 | return; | 1072 | return 0; |
1172 | 1073 | ||
1173 | migrant = machine__findnew_thread(machine, migrate_task_event->pid); | 1074 | migrant = machine__findnew_thread(machine, pid); |
1174 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | 1075 | atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); |
1175 | if (!atoms) { | 1076 | if (!atoms) { |
1176 | thread_atoms_insert(migrant); | 1077 | if (thread_atoms_insert(sched, migrant)) |
1177 | register_pid(migrant->pid, migrant->comm); | 1078 | return -1; |
1178 | atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); | 1079 | register_pid(sched, migrant->pid, migrant->comm); |
1179 | if (!atoms) | 1080 | atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); |
1180 | die("migration-event: Internal tree error"); | 1081 | if (!atoms) { |
1181 | add_sched_out_event(atoms, 'R', timestamp); | 1082 | pr_err("migration-event: Internal tree error"); |
1083 | return -1; | ||
1084 | } | ||
1085 | if (add_sched_out_event(atoms, 'R', timestamp)) | ||
1086 | return -1; | ||
1182 | } | 1087 | } |
1183 | 1088 | ||
1184 | BUG_ON(list_empty(&atoms->work_list)); | 1089 | BUG_ON(list_empty(&atoms->work_list)); |
@@ -1186,21 +1091,15 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, | |||
1186 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); | 1091 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); |
1187 | atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; | 1092 | atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; |
1188 | 1093 | ||
1189 | nr_timestamps++; | 1094 | sched->nr_timestamps++; |
1190 | 1095 | ||
1191 | if (atom->sched_out_time > timestamp) | 1096 | if (atom->sched_out_time > timestamp) |
1192 | nr_unordered_timestamps++; | 1097 | sched->nr_unordered_timestamps++; |
1193 | } | ||
1194 | 1098 | ||
1195 | static struct trace_sched_handler lat_ops = { | 1099 | return 0; |
1196 | .wakeup_event = latency_wakeup_event, | 1100 | } |
1197 | .switch_event = latency_switch_event, | ||
1198 | .runtime_event = latency_runtime_event, | ||
1199 | .fork_event = latency_fork_event, | ||
1200 | .migrate_task_event = latency_migrate_task_event, | ||
1201 | }; | ||
1202 | 1101 | ||
1203 | static void output_lat_thread(struct work_atoms *work_list) | 1102 | static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) |
1204 | { | 1103 | { |
1205 | int i; | 1104 | int i; |
1206 | int ret; | 1105 | int ret; |
@@ -1214,8 +1113,8 @@ static void output_lat_thread(struct work_atoms *work_list) | |||
1214 | if (!strcmp(work_list->thread->comm, "swapper")) | 1113 | if (!strcmp(work_list->thread->comm, "swapper")) |
1215 | return; | 1114 | return; |
1216 | 1115 | ||
1217 | all_runtime += work_list->total_runtime; | 1116 | sched->all_runtime += work_list->total_runtime; |
1218 | all_count += work_list->nb_atoms; | 1117 | sched->all_count += work_list->nb_atoms; |
1219 | 1118 | ||
1220 | ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid); | 1119 | ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid); |
1221 | 1120 | ||
@@ -1241,11 +1140,6 @@ static int pid_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1241 | return 0; | 1140 | return 0; |
1242 | } | 1141 | } |
1243 | 1142 | ||
1244 | static struct sort_dimension pid_sort_dimension = { | ||
1245 | .name = "pid", | ||
1246 | .cmp = pid_cmp, | ||
1247 | }; | ||
1248 | |||
1249 | static int avg_cmp(struct work_atoms *l, struct work_atoms *r) | 1143 | static int avg_cmp(struct work_atoms *l, struct work_atoms *r) |
1250 | { | 1144 | { |
1251 | u64 avgl, avgr; | 1145 | u64 avgl, avgr; |
@@ -1267,11 +1161,6 @@ static int avg_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1267 | return 0; | 1161 | return 0; |
1268 | } | 1162 | } |
1269 | 1163 | ||
1270 | static struct sort_dimension avg_sort_dimension = { | ||
1271 | .name = "avg", | ||
1272 | .cmp = avg_cmp, | ||
1273 | }; | ||
1274 | |||
1275 | static int max_cmp(struct work_atoms *l, struct work_atoms *r) | 1164 | static int max_cmp(struct work_atoms *l, struct work_atoms *r) |
1276 | { | 1165 | { |
1277 | if (l->max_lat < r->max_lat) | 1166 | if (l->max_lat < r->max_lat) |
@@ -1282,11 +1171,6 @@ static int max_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1282 | return 0; | 1171 | return 0; |
1283 | } | 1172 | } |
1284 | 1173 | ||
1285 | static struct sort_dimension max_sort_dimension = { | ||
1286 | .name = "max", | ||
1287 | .cmp = max_cmp, | ||
1288 | }; | ||
1289 | |||
1290 | static int switch_cmp(struct work_atoms *l, struct work_atoms *r) | 1174 | static int switch_cmp(struct work_atoms *l, struct work_atoms *r) |
1291 | { | 1175 | { |
1292 | if (l->nb_atoms < r->nb_atoms) | 1176 | if (l->nb_atoms < r->nb_atoms) |
@@ -1297,11 +1181,6 @@ static int switch_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1297 | return 0; | 1181 | return 0; |
1298 | } | 1182 | } |
1299 | 1183 | ||
1300 | static struct sort_dimension switch_sort_dimension = { | ||
1301 | .name = "switch", | ||
1302 | .cmp = switch_cmp, | ||
1303 | }; | ||
1304 | |||
1305 | static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) | 1184 | static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) |
1306 | { | 1185 | { |
1307 | if (l->total_runtime < r->total_runtime) | 1186 | if (l->total_runtime < r->total_runtime) |
@@ -1312,28 +1191,38 @@ static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) | |||
1312 | return 0; | 1191 | return 0; |
1313 | } | 1192 | } |
1314 | 1193 | ||
1315 | static struct sort_dimension runtime_sort_dimension = { | ||
1316 | .name = "runtime", | ||
1317 | .cmp = runtime_cmp, | ||
1318 | }; | ||
1319 | |||
1320 | static struct sort_dimension *available_sorts[] = { | ||
1321 | &pid_sort_dimension, | ||
1322 | &avg_sort_dimension, | ||
1323 | &max_sort_dimension, | ||
1324 | &switch_sort_dimension, | ||
1325 | &runtime_sort_dimension, | ||
1326 | }; | ||
1327 | |||
1328 | #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *)) | ||
1329 | |||
1330 | static LIST_HEAD(sort_list); | ||
1331 | |||
1332 | static int sort_dimension__add(const char *tok, struct list_head *list) | 1194 | static int sort_dimension__add(const char *tok, struct list_head *list) |
1333 | { | 1195 | { |
1334 | int i; | 1196 | size_t i; |
1197 | static struct sort_dimension avg_sort_dimension = { | ||
1198 | .name = "avg", | ||
1199 | .cmp = avg_cmp, | ||
1200 | }; | ||
1201 | static struct sort_dimension max_sort_dimension = { | ||
1202 | .name = "max", | ||
1203 | .cmp = max_cmp, | ||
1204 | }; | ||
1205 | static struct sort_dimension pid_sort_dimension = { | ||
1206 | .name = "pid", | ||
1207 | .cmp = pid_cmp, | ||
1208 | }; | ||
1209 | static struct sort_dimension runtime_sort_dimension = { | ||
1210 | .name = "runtime", | ||
1211 | .cmp = runtime_cmp, | ||
1212 | }; | ||
1213 | static struct sort_dimension switch_sort_dimension = { | ||
1214 | .name = "switch", | ||
1215 | .cmp = switch_cmp, | ||
1216 | }; | ||
1217 | struct sort_dimension *available_sorts[] = { | ||
1218 | &pid_sort_dimension, | ||
1219 | &avg_sort_dimension, | ||
1220 | &max_sort_dimension, | ||
1221 | &switch_sort_dimension, | ||
1222 | &runtime_sort_dimension, | ||
1223 | }; | ||
1335 | 1224 | ||
1336 | for (i = 0; i < NB_AVAILABLE_SORTS; i++) { | 1225 | for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { |
1337 | if (!strcmp(available_sorts[i]->name, tok)) { | 1226 | if (!strcmp(available_sorts[i]->name, tok)) { |
1338 | list_add_tail(&available_sorts[i]->list, list); | 1227 | list_add_tail(&available_sorts[i]->list, list); |
1339 | 1228 | ||
@@ -1344,126 +1233,97 @@ static int sort_dimension__add(const char *tok, struct list_head *list) | |||
1344 | return -1; | 1233 | return -1; |
1345 | } | 1234 | } |
1346 | 1235 | ||
1347 | static void setup_sorting(void); | 1236 | static void perf_sched__sort_lat(struct perf_sched *sched) |
1348 | |||
1349 | static void sort_lat(void) | ||
1350 | { | 1237 | { |
1351 | struct rb_node *node; | 1238 | struct rb_node *node; |
1352 | 1239 | ||
1353 | for (;;) { | 1240 | for (;;) { |
1354 | struct work_atoms *data; | 1241 | struct work_atoms *data; |
1355 | node = rb_first(&atom_root); | 1242 | node = rb_first(&sched->atom_root); |
1356 | if (!node) | 1243 | if (!node) |
1357 | break; | 1244 | break; |
1358 | 1245 | ||
1359 | rb_erase(node, &atom_root); | 1246 | rb_erase(node, &sched->atom_root); |
1360 | data = rb_entry(node, struct work_atoms, node); | 1247 | data = rb_entry(node, struct work_atoms, node); |
1361 | __thread_latency_insert(&sorted_atom_root, data, &sort_list); | 1248 | __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); |
1362 | } | 1249 | } |
1363 | } | 1250 | } |
1364 | 1251 | ||
1365 | static struct trace_sched_handler *trace_handler; | 1252 | static int process_sched_wakeup_event(struct perf_tool *tool, |
1366 | 1253 | struct perf_evsel *evsel, | |
1367 | static void | 1254 | struct perf_sample *sample, |
1368 | process_sched_wakeup_event(struct perf_tool *tool __used, | 1255 | struct machine *machine) |
1369 | struct event_format *event, | ||
1370 | struct perf_sample *sample, | ||
1371 | struct machine *machine, | ||
1372 | struct thread *thread) | ||
1373 | { | 1256 | { |
1374 | void *data = sample->raw_data; | 1257 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1375 | struct trace_wakeup_event wakeup_event; | ||
1376 | |||
1377 | FILL_COMMON_FIELDS(wakeup_event, event, data); | ||
1378 | 1258 | ||
1379 | FILL_ARRAY(wakeup_event, comm, event, data); | 1259 | if (sched->tp_handler->wakeup_event) |
1380 | FILL_FIELD(wakeup_event, pid, event, data); | 1260 | return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); |
1381 | FILL_FIELD(wakeup_event, prio, event, data); | ||
1382 | FILL_FIELD(wakeup_event, success, event, data); | ||
1383 | FILL_FIELD(wakeup_event, cpu, event, data); | ||
1384 | 1261 | ||
1385 | if (trace_handler->wakeup_event) | 1262 | return 0; |
1386 | trace_handler->wakeup_event(&wakeup_event, machine, event, | ||
1387 | sample->cpu, sample->time, thread); | ||
1388 | } | 1263 | } |
1389 | 1264 | ||
1390 | /* | 1265 | static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, |
1391 | * Track the current task - that way we can know whether there's any | 1266 | struct perf_sample *sample, struct machine *machine) |
1392 | * weird events, such as a task being switched away that is not current. | ||
1393 | */ | ||
1394 | static int max_cpu; | ||
1395 | |||
1396 | static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 }; | ||
1397 | |||
1398 | static struct thread *curr_thread[MAX_CPUS]; | ||
1399 | |||
1400 | static char next_shortname1 = 'A'; | ||
1401 | static char next_shortname2 = '0'; | ||
1402 | |||
1403 | static void | ||
1404 | map_switch_event(struct trace_switch_event *switch_event, | ||
1405 | struct machine *machine, | ||
1406 | struct event_format *event __used, | ||
1407 | int this_cpu, | ||
1408 | u64 timestamp, | ||
1409 | struct thread *thread __used) | ||
1410 | { | 1267 | { |
1411 | struct thread *sched_out __used, *sched_in; | 1268 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), |
1269 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | ||
1270 | struct thread *sched_out __maybe_unused, *sched_in; | ||
1412 | int new_shortname; | 1271 | int new_shortname; |
1413 | u64 timestamp0; | 1272 | u64 timestamp0, timestamp = sample->time; |
1414 | s64 delta; | 1273 | s64 delta; |
1415 | int cpu; | 1274 | int cpu, this_cpu = sample->cpu; |
1416 | 1275 | ||
1417 | BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); | 1276 | BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); |
1418 | 1277 | ||
1419 | if (this_cpu > max_cpu) | 1278 | if (this_cpu > sched->max_cpu) |
1420 | max_cpu = this_cpu; | 1279 | sched->max_cpu = this_cpu; |
1421 | 1280 | ||
1422 | timestamp0 = cpu_last_switched[this_cpu]; | 1281 | timestamp0 = sched->cpu_last_switched[this_cpu]; |
1423 | cpu_last_switched[this_cpu] = timestamp; | 1282 | sched->cpu_last_switched[this_cpu] = timestamp; |
1424 | if (timestamp0) | 1283 | if (timestamp0) |
1425 | delta = timestamp - timestamp0; | 1284 | delta = timestamp - timestamp0; |
1426 | else | 1285 | else |
1427 | delta = 0; | 1286 | delta = 0; |
1428 | 1287 | ||
1429 | if (delta < 0) | 1288 | if (delta < 0) { |
1430 | die("hm, delta: %" PRIu64 " < 0 ?\n", delta); | 1289 | pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
1431 | 1290 | return -1; | |
1291 | } | ||
1432 | 1292 | ||
1433 | sched_out = machine__findnew_thread(machine, switch_event->prev_pid); | 1293 | sched_out = machine__findnew_thread(machine, prev_pid); |
1434 | sched_in = machine__findnew_thread(machine, switch_event->next_pid); | 1294 | sched_in = machine__findnew_thread(machine, next_pid); |
1435 | 1295 | ||
1436 | curr_thread[this_cpu] = sched_in; | 1296 | sched->curr_thread[this_cpu] = sched_in; |
1437 | 1297 | ||
1438 | printf(" "); | 1298 | printf(" "); |
1439 | 1299 | ||
1440 | new_shortname = 0; | 1300 | new_shortname = 0; |
1441 | if (!sched_in->shortname[0]) { | 1301 | if (!sched_in->shortname[0]) { |
1442 | sched_in->shortname[0] = next_shortname1; | 1302 | sched_in->shortname[0] = sched->next_shortname1; |
1443 | sched_in->shortname[1] = next_shortname2; | 1303 | sched_in->shortname[1] = sched->next_shortname2; |
1444 | 1304 | ||
1445 | if (next_shortname1 < 'Z') { | 1305 | if (sched->next_shortname1 < 'Z') { |
1446 | next_shortname1++; | 1306 | sched->next_shortname1++; |
1447 | } else { | 1307 | } else { |
1448 | next_shortname1='A'; | 1308 | sched->next_shortname1='A'; |
1449 | if (next_shortname2 < '9') { | 1309 | if (sched->next_shortname2 < '9') { |
1450 | next_shortname2++; | 1310 | sched->next_shortname2++; |
1451 | } else { | 1311 | } else { |
1452 | next_shortname2='0'; | 1312 | sched->next_shortname2='0'; |
1453 | } | 1313 | } |
1454 | } | 1314 | } |
1455 | new_shortname = 1; | 1315 | new_shortname = 1; |
1456 | } | 1316 | } |
1457 | 1317 | ||
1458 | for (cpu = 0; cpu <= max_cpu; cpu++) { | 1318 | for (cpu = 0; cpu <= sched->max_cpu; cpu++) { |
1459 | if (cpu != this_cpu) | 1319 | if (cpu != this_cpu) |
1460 | printf(" "); | 1320 | printf(" "); |
1461 | else | 1321 | else |
1462 | printf("*"); | 1322 | printf("*"); |
1463 | 1323 | ||
1464 | if (curr_thread[cpu]) { | 1324 | if (sched->curr_thread[cpu]) { |
1465 | if (curr_thread[cpu]->pid) | 1325 | if (sched->curr_thread[cpu]->pid) |
1466 | printf("%2s ", curr_thread[cpu]->shortname); | 1326 | printf("%2s ", sched->curr_thread[cpu]->shortname); |
1467 | else | 1327 | else |
1468 | printf(". "); | 1328 | printf(". "); |
1469 | } else | 1329 | } else |
@@ -1477,134 +1337,97 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1477 | } else { | 1337 | } else { |
1478 | printf("\n"); | 1338 | printf("\n"); |
1479 | } | 1339 | } |
1340 | |||
1341 | return 0; | ||
1480 | } | 1342 | } |
1481 | 1343 | ||
1482 | static void | 1344 | static int process_sched_switch_event(struct perf_tool *tool, |
1483 | process_sched_switch_event(struct perf_tool *tool __used, | 1345 | struct perf_evsel *evsel, |
1484 | struct event_format *event, | 1346 | struct perf_sample *sample, |
1485 | struct perf_sample *sample, | 1347 | struct machine *machine) |
1486 | struct machine *machine, | ||
1487 | struct thread *thread) | ||
1488 | { | 1348 | { |
1489 | int this_cpu = sample->cpu; | 1349 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1490 | void *data = sample->raw_data; | 1350 | int this_cpu = sample->cpu, err = 0; |
1491 | struct trace_switch_event switch_event; | 1351 | u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), |
1492 | 1352 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); | |
1493 | FILL_COMMON_FIELDS(switch_event, event, data); | ||
1494 | |||
1495 | FILL_ARRAY(switch_event, prev_comm, event, data); | ||
1496 | FILL_FIELD(switch_event, prev_pid, event, data); | ||
1497 | FILL_FIELD(switch_event, prev_prio, event, data); | ||
1498 | FILL_FIELD(switch_event, prev_state, event, data); | ||
1499 | FILL_ARRAY(switch_event, next_comm, event, data); | ||
1500 | FILL_FIELD(switch_event, next_pid, event, data); | ||
1501 | FILL_FIELD(switch_event, next_prio, event, data); | ||
1502 | 1353 | ||
1503 | if (curr_pid[this_cpu] != (u32)-1) { | 1354 | if (sched->curr_pid[this_cpu] != (u32)-1) { |
1504 | /* | 1355 | /* |
1505 | * Are we trying to switch away a PID that is | 1356 | * Are we trying to switch away a PID that is |
1506 | * not current? | 1357 | * not current? |
1507 | */ | 1358 | */ |
1508 | if (curr_pid[this_cpu] != switch_event.prev_pid) | 1359 | if (sched->curr_pid[this_cpu] != prev_pid) |
1509 | nr_context_switch_bugs++; | 1360 | sched->nr_context_switch_bugs++; |
1510 | } | 1361 | } |
1511 | if (trace_handler->switch_event) | ||
1512 | trace_handler->switch_event(&switch_event, machine, event, | ||
1513 | this_cpu, sample->time, thread); | ||
1514 | 1362 | ||
1515 | curr_pid[this_cpu] = switch_event.next_pid; | 1363 | if (sched->tp_handler->switch_event) |
1364 | err = sched->tp_handler->switch_event(sched, evsel, sample, machine); | ||
1365 | |||
1366 | sched->curr_pid[this_cpu] = next_pid; | ||
1367 | return err; | ||
1516 | } | 1368 | } |
1517 | 1369 | ||
1518 | static void | 1370 | static int process_sched_runtime_event(struct perf_tool *tool, |
1519 | process_sched_runtime_event(struct perf_tool *tool __used, | 1371 | struct perf_evsel *evsel, |
1520 | struct event_format *event, | 1372 | struct perf_sample *sample, |
1521 | struct perf_sample *sample, | 1373 | struct machine *machine) |
1522 | struct machine *machine, | ||
1523 | struct thread *thread) | ||
1524 | { | 1374 | { |
1525 | void *data = sample->raw_data; | 1375 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1526 | struct trace_runtime_event runtime_event; | ||
1527 | 1376 | ||
1528 | FILL_ARRAY(runtime_event, comm, event, data); | 1377 | if (sched->tp_handler->runtime_event) |
1529 | FILL_FIELD(runtime_event, pid, event, data); | 1378 | return sched->tp_handler->runtime_event(sched, evsel, sample, machine); |
1530 | FILL_FIELD(runtime_event, runtime, event, data); | ||
1531 | FILL_FIELD(runtime_event, vruntime, event, data); | ||
1532 | 1379 | ||
1533 | if (trace_handler->runtime_event) | 1380 | return 0; |
1534 | trace_handler->runtime_event(&runtime_event, machine, event, | ||
1535 | sample->cpu, sample->time, thread); | ||
1536 | } | 1381 | } |
1537 | 1382 | ||
1538 | static void | 1383 | static int process_sched_fork_event(struct perf_tool *tool, |
1539 | process_sched_fork_event(struct perf_tool *tool __used, | 1384 | struct perf_evsel *evsel, |
1540 | struct event_format *event, | 1385 | struct perf_sample *sample, |
1541 | struct perf_sample *sample, | 1386 | struct machine *machine __maybe_unused) |
1542 | struct machine *machine __used, | ||
1543 | struct thread *thread) | ||
1544 | { | 1387 | { |
1545 | void *data = sample->raw_data; | 1388 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1546 | struct trace_fork_event fork_event; | ||
1547 | |||
1548 | FILL_COMMON_FIELDS(fork_event, event, data); | ||
1549 | 1389 | ||
1550 | FILL_ARRAY(fork_event, parent_comm, event, data); | 1390 | if (sched->tp_handler->fork_event) |
1551 | FILL_FIELD(fork_event, parent_pid, event, data); | 1391 | return sched->tp_handler->fork_event(sched, evsel, sample); |
1552 | FILL_ARRAY(fork_event, child_comm, event, data); | ||
1553 | FILL_FIELD(fork_event, child_pid, event, data); | ||
1554 | 1392 | ||
1555 | if (trace_handler->fork_event) | 1393 | return 0; |
1556 | trace_handler->fork_event(&fork_event, event, | ||
1557 | sample->cpu, sample->time, thread); | ||
1558 | } | 1394 | } |
1559 | 1395 | ||
1560 | static void | 1396 | static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, |
1561 | process_sched_exit_event(struct perf_tool *tool __used, | 1397 | struct perf_evsel *evsel, |
1562 | struct event_format *event, | 1398 | struct perf_sample *sample __maybe_unused, |
1563 | struct perf_sample *sample __used, | 1399 | struct machine *machine __maybe_unused) |
1564 | struct machine *machine __used, | ||
1565 | struct thread *thread __used) | ||
1566 | { | 1400 | { |
1567 | if (verbose) | 1401 | pr_debug("sched_exit event %p\n", evsel); |
1568 | printf("sched_exit event %p\n", event); | 1402 | return 0; |
1569 | } | 1403 | } |
1570 | 1404 | ||
1571 | static void | 1405 | static int process_sched_migrate_task_event(struct perf_tool *tool, |
1572 | process_sched_migrate_task_event(struct perf_tool *tool __used, | 1406 | struct perf_evsel *evsel, |
1573 | struct event_format *event, | 1407 | struct perf_sample *sample, |
1574 | struct perf_sample *sample, | 1408 | struct machine *machine) |
1575 | struct machine *machine, | ||
1576 | struct thread *thread) | ||
1577 | { | 1409 | { |
1578 | void *data = sample->raw_data; | 1410 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1579 | struct trace_migrate_task_event migrate_task_event; | ||
1580 | |||
1581 | FILL_COMMON_FIELDS(migrate_task_event, event, data); | ||
1582 | 1411 | ||
1583 | FILL_ARRAY(migrate_task_event, comm, event, data); | 1412 | if (sched->tp_handler->migrate_task_event) |
1584 | FILL_FIELD(migrate_task_event, pid, event, data); | 1413 | return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); |
1585 | FILL_FIELD(migrate_task_event, prio, event, data); | ||
1586 | FILL_FIELD(migrate_task_event, cpu, event, data); | ||
1587 | 1414 | ||
1588 | if (trace_handler->migrate_task_event) | 1415 | return 0; |
1589 | trace_handler->migrate_task_event(&migrate_task_event, machine, | ||
1590 | event, sample->cpu, | ||
1591 | sample->time, thread); | ||
1592 | } | 1416 | } |
1593 | 1417 | ||
1594 | typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, | 1418 | typedef int (*tracepoint_handler)(struct perf_tool *tool, |
1595 | struct perf_sample *sample, | 1419 | struct perf_evsel *evsel, |
1596 | struct machine *machine, | 1420 | struct perf_sample *sample, |
1597 | struct thread *thread); | 1421 | struct machine *machine); |
1598 | 1422 | ||
1599 | static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, | 1423 | static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, |
1600 | union perf_event *event __used, | 1424 | union perf_event *event __maybe_unused, |
1601 | struct perf_sample *sample, | 1425 | struct perf_sample *sample, |
1602 | struct perf_evsel *evsel, | 1426 | struct perf_evsel *evsel, |
1603 | struct machine *machine) | 1427 | struct machine *machine) |
1604 | { | 1428 | { |
1605 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); | ||
1606 | struct pevent *pevent = sched->session->pevent; | ||
1607 | struct thread *thread = machine__findnew_thread(machine, sample->pid); | 1429 | struct thread *thread = machine__findnew_thread(machine, sample->pid); |
1430 | int err = 0; | ||
1608 | 1431 | ||
1609 | if (thread == NULL) { | 1432 | if (thread == NULL) { |
1610 | pr_debug("problem processing %s event, skipping it.\n", | 1433 | pr_debug("problem processing %s event, skipping it.\n", |
@@ -1617,30 +1440,15 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, | |||
1617 | 1440 | ||
1618 | if (evsel->handler.func != NULL) { | 1441 | if (evsel->handler.func != NULL) { |
1619 | tracepoint_handler f = evsel->handler.func; | 1442 | tracepoint_handler f = evsel->handler.func; |
1620 | 1443 | err = f(tool, evsel, sample, machine); | |
1621 | if (evsel->handler.data == NULL) | ||
1622 | evsel->handler.data = pevent_find_event(pevent, | ||
1623 | evsel->attr.config); | ||
1624 | |||
1625 | f(tool, evsel->handler.data, sample, machine, thread); | ||
1626 | } | 1444 | } |
1627 | 1445 | ||
1628 | return 0; | 1446 | return err; |
1629 | } | 1447 | } |
1630 | 1448 | ||
1631 | static struct perf_sched perf_sched = { | 1449 | static int perf_sched__read_events(struct perf_sched *sched, bool destroy, |
1632 | .tool = { | 1450 | struct perf_session **psession) |
1633 | .sample = perf_sched__process_tracepoint_sample, | ||
1634 | .comm = perf_event__process_comm, | ||
1635 | .lost = perf_event__process_lost, | ||
1636 | .fork = perf_event__process_task, | ||
1637 | .ordered_samples = true, | ||
1638 | }, | ||
1639 | }; | ||
1640 | |||
1641 | static void read_events(bool destroy, struct perf_session **psession) | ||
1642 | { | 1451 | { |
1643 | int err = -EINVAL; | ||
1644 | const struct perf_evsel_str_handler handlers[] = { | 1452 | const struct perf_evsel_str_handler handlers[] = { |
1645 | { "sched:sched_switch", process_sched_switch_event, }, | 1453 | { "sched:sched_switch", process_sched_switch_event, }, |
1646 | { "sched:sched_stat_runtime", process_sched_runtime_event, }, | 1454 | { "sched:sched_stat_runtime", process_sched_runtime_event, }, |
@@ -1652,24 +1460,25 @@ static void read_events(bool destroy, struct perf_session **psession) | |||
1652 | }; | 1460 | }; |
1653 | struct perf_session *session; | 1461 | struct perf_session *session; |
1654 | 1462 | ||
1655 | session = perf_session__new(input_name, O_RDONLY, 0, false, | 1463 | session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool); |
1656 | &perf_sched.tool); | 1464 | if (session == NULL) { |
1657 | if (session == NULL) | 1465 | pr_debug("No Memory for session\n"); |
1658 | die("No Memory"); | 1466 | return -1; |
1659 | 1467 | } | |
1660 | perf_sched.session = session; | ||
1661 | 1468 | ||
1662 | err = perf_session__set_tracepoints_handlers(session, handlers); | 1469 | if (perf_session__set_tracepoints_handlers(session, handlers)) |
1663 | assert(err == 0); | 1470 | goto out_delete; |
1664 | 1471 | ||
1665 | if (perf_session__has_traces(session, "record -R")) { | 1472 | if (perf_session__has_traces(session, "record -R")) { |
1666 | err = perf_session__process_events(session, &perf_sched.tool); | 1473 | int err = perf_session__process_events(session, &sched->tool); |
1667 | if (err) | 1474 | if (err) { |
1668 | die("Failed to process events, error %d", err); | 1475 | pr_err("Failed to process events, error %d", err); |
1476 | goto out_delete; | ||
1477 | } | ||
1669 | 1478 | ||
1670 | nr_events = session->hists.stats.nr_events[0]; | 1479 | sched->nr_events = session->hists.stats.nr_events[0]; |
1671 | nr_lost_events = session->hists.stats.total_lost; | 1480 | sched->nr_lost_events = session->hists.stats.total_lost; |
1672 | nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; | 1481 | sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; |
1673 | } | 1482 | } |
1674 | 1483 | ||
1675 | if (destroy) | 1484 | if (destroy) |
@@ -1677,208 +1486,166 @@ static void read_events(bool destroy, struct perf_session **psession) | |||
1677 | 1486 | ||
1678 | if (psession) | 1487 | if (psession) |
1679 | *psession = session; | 1488 | *psession = session; |
1489 | |||
1490 | return 0; | ||
1491 | |||
1492 | out_delete: | ||
1493 | perf_session__delete(session); | ||
1494 | return -1; | ||
1680 | } | 1495 | } |
1681 | 1496 | ||
1682 | static void print_bad_events(void) | 1497 | static void print_bad_events(struct perf_sched *sched) |
1683 | { | 1498 | { |
1684 | if (nr_unordered_timestamps && nr_timestamps) { | 1499 | if (sched->nr_unordered_timestamps && sched->nr_timestamps) { |
1685 | printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", | 1500 | printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", |
1686 | (double)nr_unordered_timestamps/(double)nr_timestamps*100.0, | 1501 | (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, |
1687 | nr_unordered_timestamps, nr_timestamps); | 1502 | sched->nr_unordered_timestamps, sched->nr_timestamps); |
1688 | } | 1503 | } |
1689 | if (nr_lost_events && nr_events) { | 1504 | if (sched->nr_lost_events && sched->nr_events) { |
1690 | printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", | 1505 | printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", |
1691 | (double)nr_lost_events/(double)nr_events*100.0, | 1506 | (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, |
1692 | nr_lost_events, nr_events, nr_lost_chunks); | 1507 | sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); |
1693 | } | 1508 | } |
1694 | if (nr_state_machine_bugs && nr_timestamps) { | 1509 | if (sched->nr_state_machine_bugs && sched->nr_timestamps) { |
1695 | printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)", | 1510 | printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)", |
1696 | (double)nr_state_machine_bugs/(double)nr_timestamps*100.0, | 1511 | (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0, |
1697 | nr_state_machine_bugs, nr_timestamps); | 1512 | sched->nr_state_machine_bugs, sched->nr_timestamps); |
1698 | if (nr_lost_events) | 1513 | if (sched->nr_lost_events) |
1699 | printf(" (due to lost events?)"); | 1514 | printf(" (due to lost events?)"); |
1700 | printf("\n"); | 1515 | printf("\n"); |
1701 | } | 1516 | } |
1702 | if (nr_context_switch_bugs && nr_timestamps) { | 1517 | if (sched->nr_context_switch_bugs && sched->nr_timestamps) { |
1703 | printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", | 1518 | printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", |
1704 | (double)nr_context_switch_bugs/(double)nr_timestamps*100.0, | 1519 | (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, |
1705 | nr_context_switch_bugs, nr_timestamps); | 1520 | sched->nr_context_switch_bugs, sched->nr_timestamps); |
1706 | if (nr_lost_events) | 1521 | if (sched->nr_lost_events) |
1707 | printf(" (due to lost events?)"); | 1522 | printf(" (due to lost events?)"); |
1708 | printf("\n"); | 1523 | printf("\n"); |
1709 | } | 1524 | } |
1710 | } | 1525 | } |
1711 | 1526 | ||
1712 | static void __cmd_lat(void) | 1527 | static int perf_sched__lat(struct perf_sched *sched) |
1713 | { | 1528 | { |
1714 | struct rb_node *next; | 1529 | struct rb_node *next; |
1715 | struct perf_session *session; | 1530 | struct perf_session *session; |
1716 | 1531 | ||
1717 | setup_pager(); | 1532 | setup_pager(); |
1718 | read_events(false, &session); | 1533 | if (perf_sched__read_events(sched, false, &session)) |
1719 | sort_lat(); | 1534 | return -1; |
1535 | perf_sched__sort_lat(sched); | ||
1720 | 1536 | ||
1721 | printf("\n ---------------------------------------------------------------------------------------------------------------\n"); | 1537 | printf("\n ---------------------------------------------------------------------------------------------------------------\n"); |
1722 | printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); | 1538 | printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); |
1723 | printf(" ---------------------------------------------------------------------------------------------------------------\n"); | 1539 | printf(" ---------------------------------------------------------------------------------------------------------------\n"); |
1724 | 1540 | ||
1725 | next = rb_first(&sorted_atom_root); | 1541 | next = rb_first(&sched->sorted_atom_root); |
1726 | 1542 | ||
1727 | while (next) { | 1543 | while (next) { |
1728 | struct work_atoms *work_list; | 1544 | struct work_atoms *work_list; |
1729 | 1545 | ||
1730 | work_list = rb_entry(next, struct work_atoms, node); | 1546 | work_list = rb_entry(next, struct work_atoms, node); |
1731 | output_lat_thread(work_list); | 1547 | output_lat_thread(sched, work_list); |
1732 | next = rb_next(next); | 1548 | next = rb_next(next); |
1733 | } | 1549 | } |
1734 | 1550 | ||
1735 | printf(" -----------------------------------------------------------------------------------------\n"); | 1551 | printf(" -----------------------------------------------------------------------------------------\n"); |
1736 | printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", | 1552 | printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", |
1737 | (double)all_runtime/1e6, all_count); | 1553 | (double)sched->all_runtime / 1e6, sched->all_count); |
1738 | 1554 | ||
1739 | printf(" ---------------------------------------------------\n"); | 1555 | printf(" ---------------------------------------------------\n"); |
1740 | 1556 | ||
1741 | print_bad_events(); | 1557 | print_bad_events(sched); |
1742 | printf("\n"); | 1558 | printf("\n"); |
1743 | 1559 | ||
1744 | perf_session__delete(session); | 1560 | perf_session__delete(session); |
1561 | return 0; | ||
1745 | } | 1562 | } |
1746 | 1563 | ||
1747 | static struct trace_sched_handler map_ops = { | 1564 | static int perf_sched__map(struct perf_sched *sched) |
1748 | .wakeup_event = NULL, | ||
1749 | .switch_event = map_switch_event, | ||
1750 | .runtime_event = NULL, | ||
1751 | .fork_event = NULL, | ||
1752 | }; | ||
1753 | |||
1754 | static void __cmd_map(void) | ||
1755 | { | 1565 | { |
1756 | max_cpu = sysconf(_SC_NPROCESSORS_CONF); | 1566 | sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); |
1757 | 1567 | ||
1758 | setup_pager(); | 1568 | setup_pager(); |
1759 | read_events(true, NULL); | 1569 | if (perf_sched__read_events(sched, true, NULL)) |
1760 | print_bad_events(); | 1570 | return -1; |
1571 | print_bad_events(sched); | ||
1572 | return 0; | ||
1761 | } | 1573 | } |
1762 | 1574 | ||
1763 | static void __cmd_replay(void) | 1575 | static int perf_sched__replay(struct perf_sched *sched) |
1764 | { | 1576 | { |
1765 | unsigned long i; | 1577 | unsigned long i; |
1766 | 1578 | ||
1767 | calibrate_run_measurement_overhead(); | 1579 | calibrate_run_measurement_overhead(sched); |
1768 | calibrate_sleep_measurement_overhead(); | 1580 | calibrate_sleep_measurement_overhead(sched); |
1769 | 1581 | ||
1770 | test_calibrations(); | 1582 | test_calibrations(sched); |
1771 | 1583 | ||
1772 | read_events(true, NULL); | 1584 | if (perf_sched__read_events(sched, true, NULL)) |
1585 | return -1; | ||
1773 | 1586 | ||
1774 | printf("nr_run_events: %ld\n", nr_run_events); | 1587 | printf("nr_run_events: %ld\n", sched->nr_run_events); |
1775 | printf("nr_sleep_events: %ld\n", nr_sleep_events); | 1588 | printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); |
1776 | printf("nr_wakeup_events: %ld\n", nr_wakeup_events); | 1589 | printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); |
1777 | 1590 | ||
1778 | if (targetless_wakeups) | 1591 | if (sched->targetless_wakeups) |
1779 | printf("target-less wakeups: %ld\n", targetless_wakeups); | 1592 | printf("target-less wakeups: %ld\n", sched->targetless_wakeups); |
1780 | if (multitarget_wakeups) | 1593 | if (sched->multitarget_wakeups) |
1781 | printf("multi-target wakeups: %ld\n", multitarget_wakeups); | 1594 | printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); |
1782 | if (nr_run_events_optimized) | 1595 | if (sched->nr_run_events_optimized) |
1783 | printf("run atoms optimized: %ld\n", | 1596 | printf("run atoms optimized: %ld\n", |
1784 | nr_run_events_optimized); | 1597 | sched->nr_run_events_optimized); |
1785 | 1598 | ||
1786 | print_task_traces(); | 1599 | print_task_traces(sched); |
1787 | add_cross_task_wakeups(); | 1600 | add_cross_task_wakeups(sched); |
1788 | 1601 | ||
1789 | create_tasks(); | 1602 | create_tasks(sched); |
1790 | printf("------------------------------------------------------------\n"); | 1603 | printf("------------------------------------------------------------\n"); |
1791 | for (i = 0; i < replay_repeat; i++) | 1604 | for (i = 0; i < sched->replay_repeat; i++) |
1792 | run_one_test(); | 1605 | run_one_test(sched); |
1793 | } | ||
1794 | |||
1795 | |||
1796 | static const char * const sched_usage[] = { | ||
1797 | "perf sched [<options>] {record|latency|map|replay|script}", | ||
1798 | NULL | ||
1799 | }; | ||
1800 | |||
1801 | static const struct option sched_options[] = { | ||
1802 | OPT_STRING('i', "input", &input_name, "file", | ||
1803 | "input file name"), | ||
1804 | OPT_INCR('v', "verbose", &verbose, | ||
1805 | "be more verbose (show symbol address, etc)"), | ||
1806 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1807 | "dump raw trace in ASCII"), | ||
1808 | OPT_END() | ||
1809 | }; | ||
1810 | |||
1811 | static const char * const latency_usage[] = { | ||
1812 | "perf sched latency [<options>]", | ||
1813 | NULL | ||
1814 | }; | ||
1815 | |||
1816 | static const struct option latency_options[] = { | ||
1817 | OPT_STRING('s', "sort", &sort_order, "key[,key2...]", | ||
1818 | "sort by key(s): runtime, switch, avg, max"), | ||
1819 | OPT_INCR('v', "verbose", &verbose, | ||
1820 | "be more verbose (show symbol address, etc)"), | ||
1821 | OPT_INTEGER('C', "CPU", &profile_cpu, | ||
1822 | "CPU to profile on"), | ||
1823 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1824 | "dump raw trace in ASCII"), | ||
1825 | OPT_END() | ||
1826 | }; | ||
1827 | |||
1828 | static const char * const replay_usage[] = { | ||
1829 | "perf sched replay [<options>]", | ||
1830 | NULL | ||
1831 | }; | ||
1832 | 1606 | ||
1833 | static const struct option replay_options[] = { | 1607 | return 0; |
1834 | OPT_UINTEGER('r', "repeat", &replay_repeat, | 1608 | } |
1835 | "repeat the workload replay N times (-1: infinite)"), | ||
1836 | OPT_INCR('v', "verbose", &verbose, | ||
1837 | "be more verbose (show symbol address, etc)"), | ||
1838 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1839 | "dump raw trace in ASCII"), | ||
1840 | OPT_END() | ||
1841 | }; | ||
1842 | 1609 | ||
1843 | static void setup_sorting(void) | 1610 | static void setup_sorting(struct perf_sched *sched, const struct option *options, |
1611 | const char * const usage_msg[]) | ||
1844 | { | 1612 | { |
1845 | char *tmp, *tok, *str = strdup(sort_order); | 1613 | char *tmp, *tok, *str = strdup(sched->sort_order); |
1846 | 1614 | ||
1847 | for (tok = strtok_r(str, ", ", &tmp); | 1615 | for (tok = strtok_r(str, ", ", &tmp); |
1848 | tok; tok = strtok_r(NULL, ", ", &tmp)) { | 1616 | tok; tok = strtok_r(NULL, ", ", &tmp)) { |
1849 | if (sort_dimension__add(tok, &sort_list) < 0) { | 1617 | if (sort_dimension__add(tok, &sched->sort_list) < 0) { |
1850 | error("Unknown --sort key: `%s'", tok); | 1618 | error("Unknown --sort key: `%s'", tok); |
1851 | usage_with_options(latency_usage, latency_options); | 1619 | usage_with_options(usage_msg, options); |
1852 | } | 1620 | } |
1853 | } | 1621 | } |
1854 | 1622 | ||
1855 | free(str); | 1623 | free(str); |
1856 | 1624 | ||
1857 | sort_dimension__add("pid", &cmp_pid); | 1625 | sort_dimension__add("pid", &sched->cmp_pid); |
1858 | } | 1626 | } |
1859 | 1627 | ||
1860 | static const char *record_args[] = { | ||
1861 | "record", | ||
1862 | "-a", | ||
1863 | "-R", | ||
1864 | "-f", | ||
1865 | "-m", "1024", | ||
1866 | "-c", "1", | ||
1867 | "-e", "sched:sched_switch", | ||
1868 | "-e", "sched:sched_stat_wait", | ||
1869 | "-e", "sched:sched_stat_sleep", | ||
1870 | "-e", "sched:sched_stat_iowait", | ||
1871 | "-e", "sched:sched_stat_runtime", | ||
1872 | "-e", "sched:sched_process_exit", | ||
1873 | "-e", "sched:sched_process_fork", | ||
1874 | "-e", "sched:sched_wakeup", | ||
1875 | "-e", "sched:sched_migrate_task", | ||
1876 | }; | ||
1877 | |||
1878 | static int __cmd_record(int argc, const char **argv) | 1628 | static int __cmd_record(int argc, const char **argv) |
1879 | { | 1629 | { |
1880 | unsigned int rec_argc, i, j; | 1630 | unsigned int rec_argc, i, j; |
1881 | const char **rec_argv; | 1631 | const char **rec_argv; |
1632 | const char * const record_args[] = { | ||
1633 | "record", | ||
1634 | "-a", | ||
1635 | "-R", | ||
1636 | "-f", | ||
1637 | "-m", "1024", | ||
1638 | "-c", "1", | ||
1639 | "-e", "sched:sched_switch", | ||
1640 | "-e", "sched:sched_stat_wait", | ||
1641 | "-e", "sched:sched_stat_sleep", | ||
1642 | "-e", "sched:sched_stat_iowait", | ||
1643 | "-e", "sched:sched_stat_runtime", | ||
1644 | "-e", "sched:sched_process_exit", | ||
1645 | "-e", "sched:sched_process_fork", | ||
1646 | "-e", "sched:sched_wakeup", | ||
1647 | "-e", "sched:sched_migrate_task", | ||
1648 | }; | ||
1882 | 1649 | ||
1883 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; | 1650 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; |
1884 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | 1651 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); |
@@ -1897,8 +1664,85 @@ static int __cmd_record(int argc, const char **argv) | |||
1897 | return cmd_record(i, rec_argv, NULL); | 1664 | return cmd_record(i, rec_argv, NULL); |
1898 | } | 1665 | } |
1899 | 1666 | ||
1900 | int cmd_sched(int argc, const char **argv, const char *prefix __used) | 1667 | int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) |
1901 | { | 1668 | { |
1669 | const char default_sort_order[] = "avg, max, switch, runtime"; | ||
1670 | struct perf_sched sched = { | ||
1671 | .tool = { | ||
1672 | .sample = perf_sched__process_tracepoint_sample, | ||
1673 | .comm = perf_event__process_comm, | ||
1674 | .lost = perf_event__process_lost, | ||
1675 | .fork = perf_event__process_task, | ||
1676 | .ordered_samples = true, | ||
1677 | }, | ||
1678 | .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), | ||
1679 | .sort_list = LIST_HEAD_INIT(sched.sort_list), | ||
1680 | .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, | ||
1681 | .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, | ||
1682 | .curr_pid = { [0 ... MAX_CPUS - 1] = -1 }, | ||
1683 | .sort_order = default_sort_order, | ||
1684 | .replay_repeat = 10, | ||
1685 | .profile_cpu = -1, | ||
1686 | .next_shortname1 = 'A', | ||
1687 | .next_shortname2 = '0', | ||
1688 | }; | ||
1689 | const struct option latency_options[] = { | ||
1690 | OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", | ||
1691 | "sort by key(s): runtime, switch, avg, max"), | ||
1692 | OPT_INCR('v', "verbose", &verbose, | ||
1693 | "be more verbose (show symbol address, etc)"), | ||
1694 | OPT_INTEGER('C', "CPU", &sched.profile_cpu, | ||
1695 | "CPU to profile on"), | ||
1696 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1697 | "dump raw trace in ASCII"), | ||
1698 | OPT_END() | ||
1699 | }; | ||
1700 | const struct option replay_options[] = { | ||
1701 | OPT_UINTEGER('r', "repeat", &sched.replay_repeat, | ||
1702 | "repeat the workload replay N times (-1: infinite)"), | ||
1703 | OPT_INCR('v', "verbose", &verbose, | ||
1704 | "be more verbose (show symbol address, etc)"), | ||
1705 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1706 | "dump raw trace in ASCII"), | ||
1707 | OPT_END() | ||
1708 | }; | ||
1709 | const struct option sched_options[] = { | ||
1710 | OPT_STRING('i', "input", &sched.input_name, "file", | ||
1711 | "input file name"), | ||
1712 | OPT_INCR('v', "verbose", &verbose, | ||
1713 | "be more verbose (show symbol address, etc)"), | ||
1714 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, | ||
1715 | "dump raw trace in ASCII"), | ||
1716 | OPT_END() | ||
1717 | }; | ||
1718 | const char * const latency_usage[] = { | ||
1719 | "perf sched latency [<options>]", | ||
1720 | NULL | ||
1721 | }; | ||
1722 | const char * const replay_usage[] = { | ||
1723 | "perf sched replay [<options>]", | ||
1724 | NULL | ||
1725 | }; | ||
1726 | const char * const sched_usage[] = { | ||
1727 | "perf sched [<options>] {record|latency|map|replay|script}", | ||
1728 | NULL | ||
1729 | }; | ||
1730 | struct trace_sched_handler lat_ops = { | ||
1731 | .wakeup_event = latency_wakeup_event, | ||
1732 | .switch_event = latency_switch_event, | ||
1733 | .runtime_event = latency_runtime_event, | ||
1734 | .fork_event = latency_fork_event, | ||
1735 | .migrate_task_event = latency_migrate_task_event, | ||
1736 | }; | ||
1737 | struct trace_sched_handler map_ops = { | ||
1738 | .switch_event = map_switch_event, | ||
1739 | }; | ||
1740 | struct trace_sched_handler replay_ops = { | ||
1741 | .wakeup_event = replay_wakeup_event, | ||
1742 | .switch_event = replay_switch_event, | ||
1743 | .fork_event = replay_fork_event, | ||
1744 | }; | ||
1745 | |||
1902 | argc = parse_options(argc, argv, sched_options, sched_usage, | 1746 | argc = parse_options(argc, argv, sched_options, sched_usage, |
1903 | PARSE_OPT_STOP_AT_NON_OPTION); | 1747 | PARSE_OPT_STOP_AT_NON_OPTION); |
1904 | if (!argc) | 1748 | if (!argc) |
@@ -1914,26 +1758,26 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) | |||
1914 | if (!strncmp(argv[0], "rec", 3)) { | 1758 | if (!strncmp(argv[0], "rec", 3)) { |
1915 | return __cmd_record(argc, argv); | 1759 | return __cmd_record(argc, argv); |
1916 | } else if (!strncmp(argv[0], "lat", 3)) { | 1760 | } else if (!strncmp(argv[0], "lat", 3)) { |
1917 | trace_handler = &lat_ops; | 1761 | sched.tp_handler = &lat_ops; |
1918 | if (argc > 1) { | 1762 | if (argc > 1) { |
1919 | argc = parse_options(argc, argv, latency_options, latency_usage, 0); | 1763 | argc = parse_options(argc, argv, latency_options, latency_usage, 0); |
1920 | if (argc) | 1764 | if (argc) |
1921 | usage_with_options(latency_usage, latency_options); | 1765 | usage_with_options(latency_usage, latency_options); |
1922 | } | 1766 | } |
1923 | setup_sorting(); | 1767 | setup_sorting(&sched, latency_options, latency_usage); |
1924 | __cmd_lat(); | 1768 | return perf_sched__lat(&sched); |
1925 | } else if (!strcmp(argv[0], "map")) { | 1769 | } else if (!strcmp(argv[0], "map")) { |
1926 | trace_handler = &map_ops; | 1770 | sched.tp_handler = &map_ops; |
1927 | setup_sorting(); | 1771 | setup_sorting(&sched, latency_options, latency_usage); |
1928 | __cmd_map(); | 1772 | return perf_sched__map(&sched); |
1929 | } else if (!strncmp(argv[0], "rep", 3)) { | 1773 | } else if (!strncmp(argv[0], "rep", 3)) { |
1930 | trace_handler = &replay_ops; | 1774 | sched.tp_handler = &replay_ops; |
1931 | if (argc) { | 1775 | if (argc) { |
1932 | argc = parse_options(argc, argv, replay_options, replay_usage, 0); | 1776 | argc = parse_options(argc, argv, replay_options, replay_usage, 0); |
1933 | if (argc) | 1777 | if (argc) |
1934 | usage_with_options(replay_usage, replay_options); | 1778 | usage_with_options(replay_usage, replay_options); |
1935 | } | 1779 | } |
1936 | __cmd_replay(); | 1780 | return perf_sched__replay(&sched); |
1937 | } else { | 1781 | } else { |
1938 | usage_with_options(sched_usage, sched_options); | 1782 | usage_with_options(sched_usage, sched_options); |
1939 | } | 1783 | } |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 1e60ab70b2b1..1be843aa1546 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include "util/util.h" | 14 | #include "util/util.h" |
15 | #include "util/evlist.h" | 15 | #include "util/evlist.h" |
16 | #include "util/evsel.h" | 16 | #include "util/evsel.h" |
17 | #include "util/sort.h" | ||
17 | #include <linux/bitmap.h> | 18 | #include <linux/bitmap.h> |
18 | 19 | ||
19 | static char const *script_name; | 20 | static char const *script_name; |
@@ -28,11 +29,6 @@ static bool system_wide; | |||
28 | static const char *cpu_list; | 29 | static const char *cpu_list; |
29 | static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); | 30 | static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); |
30 | 31 | ||
31 | struct perf_script { | ||
32 | struct perf_tool tool; | ||
33 | struct perf_session *session; | ||
34 | }; | ||
35 | |||
36 | enum perf_output_field { | 32 | enum perf_output_field { |
37 | PERF_OUTPUT_COMM = 1U << 0, | 33 | PERF_OUTPUT_COMM = 1U << 0, |
38 | PERF_OUTPUT_TID = 1U << 1, | 34 | PERF_OUTPUT_TID = 1U << 1, |
@@ -262,14 +258,11 @@ static int perf_session__check_output_opt(struct perf_session *session) | |||
262 | return 0; | 258 | return 0; |
263 | } | 259 | } |
264 | 260 | ||
265 | static void print_sample_start(struct pevent *pevent, | 261 | static void print_sample_start(struct perf_sample *sample, |
266 | struct perf_sample *sample, | ||
267 | struct thread *thread, | 262 | struct thread *thread, |
268 | struct perf_evsel *evsel) | 263 | struct perf_evsel *evsel) |
269 | { | 264 | { |
270 | int type; | ||
271 | struct perf_event_attr *attr = &evsel->attr; | 265 | struct perf_event_attr *attr = &evsel->attr; |
272 | struct event_format *event; | ||
273 | const char *evname = NULL; | 266 | const char *evname = NULL; |
274 | unsigned long secs; | 267 | unsigned long secs; |
275 | unsigned long usecs; | 268 | unsigned long usecs; |
@@ -307,20 +300,7 @@ static void print_sample_start(struct pevent *pevent, | |||
307 | } | 300 | } |
308 | 301 | ||
309 | if (PRINT_FIELD(EVNAME)) { | 302 | if (PRINT_FIELD(EVNAME)) { |
310 | if (attr->type == PERF_TYPE_TRACEPOINT) { | 303 | evname = perf_evsel__name(evsel); |
311 | /* | ||
312 | * XXX Do we really need this here? | ||
313 | * perf_evlist__set_tracepoint_names should have done | ||
314 | * this already | ||
315 | */ | ||
316 | type = trace_parse_common_type(pevent, | ||
317 | sample->raw_data); | ||
318 | event = pevent_find_event(pevent, type); | ||
319 | if (event) | ||
320 | evname = event->name; | ||
321 | } else | ||
322 | evname = perf_evsel__name(evsel); | ||
323 | |||
324 | printf("%s: ", evname ? evname : "[unknown]"); | 304 | printf("%s: ", evname ? evname : "[unknown]"); |
325 | } | 305 | } |
326 | } | 306 | } |
@@ -401,7 +381,7 @@ static void print_sample_bts(union perf_event *event, | |||
401 | printf(" "); | 381 | printf(" "); |
402 | else | 382 | else |
403 | printf("\n"); | 383 | printf("\n"); |
404 | perf_event__print_ip(event, sample, machine, | 384 | perf_evsel__print_ip(evsel, event, sample, machine, |
405 | PRINT_FIELD(SYM), PRINT_FIELD(DSO), | 385 | PRINT_FIELD(SYM), PRINT_FIELD(DSO), |
406 | PRINT_FIELD(SYMOFFSET)); | 386 | PRINT_FIELD(SYMOFFSET)); |
407 | } | 387 | } |
@@ -415,19 +395,17 @@ static void print_sample_bts(union perf_event *event, | |||
415 | printf("\n"); | 395 | printf("\n"); |
416 | } | 396 | } |
417 | 397 | ||
418 | static void process_event(union perf_event *event __unused, | 398 | static void process_event(union perf_event *event, struct perf_sample *sample, |
419 | struct pevent *pevent, | 399 | struct perf_evsel *evsel, struct machine *machine, |
420 | struct perf_sample *sample, | 400 | struct addr_location *al) |
421 | struct perf_evsel *evsel, | ||
422 | struct machine *machine, | ||
423 | struct thread *thread) | ||
424 | { | 401 | { |
425 | struct perf_event_attr *attr = &evsel->attr; | 402 | struct perf_event_attr *attr = &evsel->attr; |
403 | struct thread *thread = al->thread; | ||
426 | 404 | ||
427 | if (output[attr->type].fields == 0) | 405 | if (output[attr->type].fields == 0) |
428 | return; | 406 | return; |
429 | 407 | ||
430 | print_sample_start(pevent, sample, thread, evsel); | 408 | print_sample_start(sample, thread, evsel); |
431 | 409 | ||
432 | if (is_bts_event(attr)) { | 410 | if (is_bts_event(attr)) { |
433 | print_sample_bts(event, sample, evsel, machine, thread); | 411 | print_sample_bts(event, sample, evsel, machine, thread); |
@@ -435,9 +413,8 @@ static void process_event(union perf_event *event __unused, | |||
435 | } | 413 | } |
436 | 414 | ||
437 | if (PRINT_FIELD(TRACE)) | 415 | if (PRINT_FIELD(TRACE)) |
438 | print_trace_event(pevent, sample->cpu, sample->raw_data, | 416 | event_format__print(evsel->tp_format, sample->cpu, |
439 | sample->raw_size); | 417 | sample->raw_data, sample->raw_size); |
440 | |||
441 | if (PRINT_FIELD(ADDR)) | 418 | if (PRINT_FIELD(ADDR)) |
442 | print_sample_addr(event, sample, machine, thread, attr); | 419 | print_sample_addr(event, sample, machine, thread, attr); |
443 | 420 | ||
@@ -446,7 +423,7 @@ static void process_event(union perf_event *event __unused, | |||
446 | printf(" "); | 423 | printf(" "); |
447 | else | 424 | else |
448 | printf("\n"); | 425 | printf("\n"); |
449 | perf_event__print_ip(event, sample, machine, | 426 | perf_evsel__print_ip(evsel, event, sample, machine, |
450 | PRINT_FIELD(SYM), PRINT_FIELD(DSO), | 427 | PRINT_FIELD(SYM), PRINT_FIELD(DSO), |
451 | PRINT_FIELD(SYMOFFSET)); | 428 | PRINT_FIELD(SYMOFFSET)); |
452 | } | 429 | } |
@@ -454,9 +431,9 @@ static void process_event(union perf_event *event __unused, | |||
454 | printf("\n"); | 431 | printf("\n"); |
455 | } | 432 | } |
456 | 433 | ||
457 | static int default_start_script(const char *script __unused, | 434 | static int default_start_script(const char *script __maybe_unused, |
458 | int argc __unused, | 435 | int argc __maybe_unused, |
459 | const char **argv __unused) | 436 | const char **argv __maybe_unused) |
460 | { | 437 | { |
461 | return 0; | 438 | return 0; |
462 | } | 439 | } |
@@ -466,8 +443,8 @@ static int default_stop_script(void) | |||
466 | return 0; | 443 | return 0; |
467 | } | 444 | } |
468 | 445 | ||
469 | static int default_generate_script(struct pevent *pevent __unused, | 446 | static int default_generate_script(struct pevent *pevent __maybe_unused, |
470 | const char *outfile __unused) | 447 | const char *outfile __maybe_unused) |
471 | { | 448 | { |
472 | return 0; | 449 | return 0; |
473 | } | 450 | } |
@@ -498,14 +475,13 @@ static int cleanup_scripting(void) | |||
498 | 475 | ||
499 | static const char *input_name; | 476 | static const char *input_name; |
500 | 477 | ||
501 | static int process_sample_event(struct perf_tool *tool __used, | 478 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
502 | union perf_event *event, | 479 | union perf_event *event, |
503 | struct perf_sample *sample, | 480 | struct perf_sample *sample, |
504 | struct perf_evsel *evsel, | 481 | struct perf_evsel *evsel, |
505 | struct machine *machine) | 482 | struct machine *machine) |
506 | { | 483 | { |
507 | struct addr_location al; | 484 | struct addr_location al; |
508 | struct perf_script *scr = container_of(tool, struct perf_script, tool); | ||
509 | struct thread *thread = machine__findnew_thread(machine, event->ip.tid); | 485 | struct thread *thread = machine__findnew_thread(machine, event->ip.tid); |
510 | 486 | ||
511 | if (thread == NULL) { | 487 | if (thread == NULL) { |
@@ -537,32 +513,29 @@ static int process_sample_event(struct perf_tool *tool __used, | |||
537 | if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) | 513 | if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) |
538 | return 0; | 514 | return 0; |
539 | 515 | ||
540 | scripting_ops->process_event(event, scr->session->pevent, | 516 | scripting_ops->process_event(event, sample, evsel, machine, &al); |
541 | sample, evsel, machine, thread); | ||
542 | 517 | ||
543 | evsel->hists.stats.total_period += sample->period; | 518 | evsel->hists.stats.total_period += sample->period; |
544 | return 0; | 519 | return 0; |
545 | } | 520 | } |
546 | 521 | ||
547 | static struct perf_script perf_script = { | 522 | static struct perf_tool perf_script = { |
548 | .tool = { | 523 | .sample = process_sample_event, |
549 | .sample = process_sample_event, | 524 | .mmap = perf_event__process_mmap, |
550 | .mmap = perf_event__process_mmap, | 525 | .comm = perf_event__process_comm, |
551 | .comm = perf_event__process_comm, | 526 | .exit = perf_event__process_task, |
552 | .exit = perf_event__process_task, | 527 | .fork = perf_event__process_task, |
553 | .fork = perf_event__process_task, | 528 | .attr = perf_event__process_attr, |
554 | .attr = perf_event__process_attr, | 529 | .event_type = perf_event__process_event_type, |
555 | .event_type = perf_event__process_event_type, | 530 | .tracing_data = perf_event__process_tracing_data, |
556 | .tracing_data = perf_event__process_tracing_data, | 531 | .build_id = perf_event__process_build_id, |
557 | .build_id = perf_event__process_build_id, | 532 | .ordered_samples = true, |
558 | .ordered_samples = true, | 533 | .ordering_requires_timestamps = true, |
559 | .ordering_requires_timestamps = true, | ||
560 | }, | ||
561 | }; | 534 | }; |
562 | 535 | ||
563 | extern volatile int session_done; | 536 | extern volatile int session_done; |
564 | 537 | ||
565 | static void sig_handler(int sig __unused) | 538 | static void sig_handler(int sig __maybe_unused) |
566 | { | 539 | { |
567 | session_done = 1; | 540 | session_done = 1; |
568 | } | 541 | } |
@@ -573,7 +546,7 @@ static int __cmd_script(struct perf_session *session) | |||
573 | 546 | ||
574 | signal(SIGINT, sig_handler); | 547 | signal(SIGINT, sig_handler); |
575 | 548 | ||
576 | ret = perf_session__process_events(session, &perf_script.tool); | 549 | ret = perf_session__process_events(session, &perf_script); |
577 | 550 | ||
578 | if (debug_mode) | 551 | if (debug_mode) |
579 | pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); | 552 | pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); |
@@ -672,8 +645,8 @@ static void list_available_languages(void) | |||
672 | fprintf(stderr, "\n"); | 645 | fprintf(stderr, "\n"); |
673 | } | 646 | } |
674 | 647 | ||
675 | static int parse_scriptname(const struct option *opt __used, | 648 | static int parse_scriptname(const struct option *opt __maybe_unused, |
676 | const char *str, int unset __used) | 649 | const char *str, int unset __maybe_unused) |
677 | { | 650 | { |
678 | char spec[PATH_MAX]; | 651 | char spec[PATH_MAX]; |
679 | const char *script, *ext; | 652 | const char *script, *ext; |
@@ -718,8 +691,8 @@ static int parse_scriptname(const struct option *opt __used, | |||
718 | return 0; | 691 | return 0; |
719 | } | 692 | } |
720 | 693 | ||
721 | static int parse_output_fields(const struct option *opt __used, | 694 | static int parse_output_fields(const struct option *opt __maybe_unused, |
722 | const char *arg, int unset __used) | 695 | const char *arg, int unset __maybe_unused) |
723 | { | 696 | { |
724 | char *tok; | 697 | char *tok; |
725 | int i, imax = sizeof(all_output_options) / sizeof(struct output_option); | 698 | int i, imax = sizeof(all_output_options) / sizeof(struct output_option); |
@@ -1010,8 +983,9 @@ static char *get_script_root(struct dirent *script_dirent, const char *suffix) | |||
1010 | return script_root; | 983 | return script_root; |
1011 | } | 984 | } |
1012 | 985 | ||
1013 | static int list_available_scripts(const struct option *opt __used, | 986 | static int list_available_scripts(const struct option *opt __maybe_unused, |
1014 | const char *s __used, int unset __used) | 987 | const char *s __maybe_unused, |
988 | int unset __maybe_unused) | ||
1015 | { | 989 | { |
1016 | struct dirent *script_next, *lang_next, script_dirent, lang_dirent; | 990 | struct dirent *script_next, *lang_next, script_dirent, lang_dirent; |
1017 | char scripts_path[MAXPATHLEN]; | 991 | char scripts_path[MAXPATHLEN]; |
@@ -1058,6 +1032,61 @@ static int list_available_scripts(const struct option *opt __used, | |||
1058 | exit(0); | 1032 | exit(0); |
1059 | } | 1033 | } |
1060 | 1034 | ||
1035 | /* | ||
1036 | * Return -1 if none is found, otherwise the actual scripts number. | ||
1037 | * | ||
1038 | * Currently the only user of this function is the script browser, which | ||
1039 | * will list all statically runnable scripts, select one, execute it and | ||
1040 | * show the output in a perf browser. | ||
1041 | */ | ||
1042 | int find_scripts(char **scripts_array, char **scripts_path_array) | ||
1043 | { | ||
1044 | struct dirent *script_next, *lang_next, script_dirent, lang_dirent; | ||
1045 | char scripts_path[MAXPATHLEN]; | ||
1046 | DIR *scripts_dir, *lang_dir; | ||
1047 | char lang_path[MAXPATHLEN]; | ||
1048 | char *temp; | ||
1049 | int i = 0; | ||
1050 | |||
1051 | snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); | ||
1052 | |||
1053 | scripts_dir = opendir(scripts_path); | ||
1054 | if (!scripts_dir) | ||
1055 | return -1; | ||
1056 | |||
1057 | for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { | ||
1058 | snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, | ||
1059 | lang_dirent.d_name); | ||
1060 | #ifdef NO_LIBPERL | ||
1061 | if (strstr(lang_path, "perl")) | ||
1062 | continue; | ||
1063 | #endif | ||
1064 | #ifdef NO_LIBPYTHON | ||
1065 | if (strstr(lang_path, "python")) | ||
1066 | continue; | ||
1067 | #endif | ||
1068 | |||
1069 | lang_dir = opendir(lang_path); | ||
1070 | if (!lang_dir) | ||
1071 | continue; | ||
1072 | |||
1073 | for_each_script(lang_path, lang_dir, script_dirent, script_next) { | ||
1074 | /* Skip those real time scripts: xxxtop.p[yl] */ | ||
1075 | if (strstr(script_dirent.d_name, "top.")) | ||
1076 | continue; | ||
1077 | sprintf(scripts_path_array[i], "%s/%s", lang_path, | ||
1078 | script_dirent.d_name); | ||
1079 | temp = strchr(script_dirent.d_name, '.'); | ||
1080 | snprintf(scripts_array[i], | ||
1081 | (temp - script_dirent.d_name) + 1, | ||
1082 | "%s", script_dirent.d_name); | ||
1083 | i++; | ||
1084 | } | ||
1085 | } | ||
1086 | |||
1087 | return i; | ||
1088 | } | ||
1089 | |||
1061 | static char *get_script_path(const char *script_root, const char *suffix) | 1090 | static char *get_script_path(const char *script_root, const char *suffix) |
1062 | { | 1091 | { |
1063 | struct dirent *script_next, *lang_next, script_dirent, lang_dirent; | 1092 | struct dirent *script_next, *lang_next, script_dirent, lang_dirent; |
@@ -1170,6 +1199,8 @@ static const struct option options[] = { | |||
1170 | parse_output_fields), | 1199 | parse_output_fields), |
1171 | OPT_BOOLEAN('a', "all-cpus", &system_wide, | 1200 | OPT_BOOLEAN('a', "all-cpus", &system_wide, |
1172 | "system-wide collection from all CPUs"), | 1201 | "system-wide collection from all CPUs"), |
1202 | OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", | ||
1203 | "only consider these symbols"), | ||
1173 | OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), | 1204 | OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), |
1174 | OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", | 1205 | OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", |
1175 | "only display events for these comms"), | 1206 | "only display events for these comms"), |
@@ -1181,21 +1212,26 @@ static const struct option options[] = { | |||
1181 | OPT_END() | 1212 | OPT_END() |
1182 | }; | 1213 | }; |
1183 | 1214 | ||
1184 | static bool have_cmd(int argc, const char **argv) | 1215 | static int have_cmd(int argc, const char **argv) |
1185 | { | 1216 | { |
1186 | char **__argv = malloc(sizeof(const char *) * argc); | 1217 | char **__argv = malloc(sizeof(const char *) * argc); |
1187 | 1218 | ||
1188 | if (!__argv) | 1219 | if (!__argv) { |
1189 | die("malloc"); | 1220 | pr_err("malloc failed\n"); |
1221 | return -1; | ||
1222 | } | ||
1223 | |||
1190 | memcpy(__argv, argv, sizeof(const char *) * argc); | 1224 | memcpy(__argv, argv, sizeof(const char *) * argc); |
1191 | argc = parse_options(argc, (const char **)__argv, record_options, | 1225 | argc = parse_options(argc, (const char **)__argv, record_options, |
1192 | NULL, PARSE_OPT_STOP_AT_NON_OPTION); | 1226 | NULL, PARSE_OPT_STOP_AT_NON_OPTION); |
1193 | free(__argv); | 1227 | free(__argv); |
1194 | 1228 | ||
1195 | return argc != 0; | 1229 | system_wide = (argc == 0); |
1230 | |||
1231 | return 0; | ||
1196 | } | 1232 | } |
1197 | 1233 | ||
1198 | int cmd_script(int argc, const char **argv, const char *prefix __used) | 1234 | int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) |
1199 | { | 1235 | { |
1200 | char *rec_script_path = NULL; | 1236 | char *rec_script_path = NULL; |
1201 | char *rep_script_path = NULL; | 1237 | char *rep_script_path = NULL; |
@@ -1259,13 +1295,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1259 | 1295 | ||
1260 | if (pipe(live_pipe) < 0) { | 1296 | if (pipe(live_pipe) < 0) { |
1261 | perror("failed to create pipe"); | 1297 | perror("failed to create pipe"); |
1262 | exit(-1); | 1298 | return -1; |
1263 | } | 1299 | } |
1264 | 1300 | ||
1265 | pid = fork(); | 1301 | pid = fork(); |
1266 | if (pid < 0) { | 1302 | if (pid < 0) { |
1267 | perror("failed to fork"); | 1303 | perror("failed to fork"); |
1268 | exit(-1); | 1304 | return -1; |
1269 | } | 1305 | } |
1270 | 1306 | ||
1271 | if (!pid) { | 1307 | if (!pid) { |
@@ -1277,13 +1313,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1277 | if (is_top_script(argv[0])) { | 1313 | if (is_top_script(argv[0])) { |
1278 | system_wide = true; | 1314 | system_wide = true; |
1279 | } else if (!system_wide) { | 1315 | } else if (!system_wide) { |
1280 | system_wide = !have_cmd(argc - rep_args, | 1316 | if (have_cmd(argc - rep_args, &argv[rep_args]) != 0) { |
1281 | &argv[rep_args]); | 1317 | err = -1; |
1318 | goto out; | ||
1319 | } | ||
1282 | } | 1320 | } |
1283 | 1321 | ||
1284 | __argv = malloc((argc + 6) * sizeof(const char *)); | 1322 | __argv = malloc((argc + 6) * sizeof(const char *)); |
1285 | if (!__argv) | 1323 | if (!__argv) { |
1286 | die("malloc"); | 1324 | pr_err("malloc failed\n"); |
1325 | err = -ENOMEM; | ||
1326 | goto out; | ||
1327 | } | ||
1287 | 1328 | ||
1288 | __argv[j++] = "/bin/sh"; | 1329 | __argv[j++] = "/bin/sh"; |
1289 | __argv[j++] = rec_script_path; | 1330 | __argv[j++] = rec_script_path; |
@@ -1305,8 +1346,12 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1305 | close(live_pipe[1]); | 1346 | close(live_pipe[1]); |
1306 | 1347 | ||
1307 | __argv = malloc((argc + 4) * sizeof(const char *)); | 1348 | __argv = malloc((argc + 4) * sizeof(const char *)); |
1308 | if (!__argv) | 1349 | if (!__argv) { |
1309 | die("malloc"); | 1350 | pr_err("malloc failed\n"); |
1351 | err = -ENOMEM; | ||
1352 | goto out; | ||
1353 | } | ||
1354 | |||
1310 | j = 0; | 1355 | j = 0; |
1311 | __argv[j++] = "/bin/sh"; | 1356 | __argv[j++] = "/bin/sh"; |
1312 | __argv[j++] = rep_script_path; | 1357 | __argv[j++] = rep_script_path; |
@@ -1331,12 +1376,20 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1331 | 1376 | ||
1332 | if (!rec_script_path) | 1377 | if (!rec_script_path) |
1333 | system_wide = false; | 1378 | system_wide = false; |
1334 | else if (!system_wide) | 1379 | else if (!system_wide) { |
1335 | system_wide = !have_cmd(argc - 1, &argv[1]); | 1380 | if (have_cmd(argc - 1, &argv[1]) != 0) { |
1381 | err = -1; | ||
1382 | goto out; | ||
1383 | } | ||
1384 | } | ||
1336 | 1385 | ||
1337 | __argv = malloc((argc + 2) * sizeof(const char *)); | 1386 | __argv = malloc((argc + 2) * sizeof(const char *)); |
1338 | if (!__argv) | 1387 | if (!__argv) { |
1339 | die("malloc"); | 1388 | pr_err("malloc failed\n"); |
1389 | err = -ENOMEM; | ||
1390 | goto out; | ||
1391 | } | ||
1392 | |||
1340 | __argv[j++] = "/bin/sh"; | 1393 | __argv[j++] = "/bin/sh"; |
1341 | __argv[j++] = script_path; | 1394 | __argv[j++] = script_path; |
1342 | if (system_wide) | 1395 | if (system_wide) |
@@ -1356,12 +1409,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1356 | setup_pager(); | 1409 | setup_pager(); |
1357 | 1410 | ||
1358 | session = perf_session__new(input_name, O_RDONLY, 0, false, | 1411 | session = perf_session__new(input_name, O_RDONLY, 0, false, |
1359 | &perf_script.tool); | 1412 | &perf_script); |
1360 | if (session == NULL) | 1413 | if (session == NULL) |
1361 | return -ENOMEM; | 1414 | return -ENOMEM; |
1362 | 1415 | ||
1363 | perf_script.session = session; | ||
1364 | |||
1365 | if (cpu_list) { | 1416 | if (cpu_list) { |
1366 | if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap)) | 1417 | if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap)) |
1367 | return -1; | 1418 | return -1; |
@@ -1387,18 +1438,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1387 | input = open(session->filename, O_RDONLY); /* input_name */ | 1438 | input = open(session->filename, O_RDONLY); /* input_name */ |
1388 | if (input < 0) { | 1439 | if (input < 0) { |
1389 | perror("failed to open file"); | 1440 | perror("failed to open file"); |
1390 | exit(-1); | 1441 | return -1; |
1391 | } | 1442 | } |
1392 | 1443 | ||
1393 | err = fstat(input, &perf_stat); | 1444 | err = fstat(input, &perf_stat); |
1394 | if (err < 0) { | 1445 | if (err < 0) { |
1395 | perror("failed to stat file"); | 1446 | perror("failed to stat file"); |
1396 | exit(-1); | 1447 | return -1; |
1397 | } | 1448 | } |
1398 | 1449 | ||
1399 | if (!perf_stat.st_size) { | 1450 | if (!perf_stat.st_size) { |
1400 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | 1451 | fprintf(stderr, "zero-sized file, nothing to do!\n"); |
1401 | exit(0); | 1452 | return 0; |
1402 | } | 1453 | } |
1403 | 1454 | ||
1404 | scripting_ops = script_spec__lookup(generate_script_lang); | 1455 | scripting_ops = script_spec__lookup(generate_script_lang); |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 861f0aec77ae..e8cd4d81b06e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -51,13 +51,13 @@ | |||
51 | #include "util/evsel.h" | 51 | #include "util/evsel.h" |
52 | #include "util/debug.h" | 52 | #include "util/debug.h" |
53 | #include "util/color.h" | 53 | #include "util/color.h" |
54 | #include "util/stat.h" | ||
54 | #include "util/header.h" | 55 | #include "util/header.h" |
55 | #include "util/cpumap.h" | 56 | #include "util/cpumap.h" |
56 | #include "util/thread.h" | 57 | #include "util/thread.h" |
57 | #include "util/thread_map.h" | 58 | #include "util/thread_map.h" |
58 | 59 | ||
59 | #include <sys/prctl.h> | 60 | #include <sys/prctl.h> |
60 | #include <math.h> | ||
61 | #include <locale.h> | 61 | #include <locale.h> |
62 | 62 | ||
63 | #define DEFAULT_SEPARATOR " " | 63 | #define DEFAULT_SEPARATOR " " |
@@ -199,11 +199,6 @@ static int output_fd; | |||
199 | 199 | ||
200 | static volatile int done = 0; | 200 | static volatile int done = 0; |
201 | 201 | ||
202 | struct stats | ||
203 | { | ||
204 | double n, mean, M2; | ||
205 | }; | ||
206 | |||
207 | struct perf_stat { | 202 | struct perf_stat { |
208 | struct stats res_stats[3]; | 203 | struct stats res_stats[3]; |
209 | }; | 204 | }; |
@@ -220,48 +215,14 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) | |||
220 | evsel->priv = NULL; | 215 | evsel->priv = NULL; |
221 | } | 216 | } |
222 | 217 | ||
223 | static void update_stats(struct stats *stats, u64 val) | 218 | static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) |
224 | { | ||
225 | double delta; | ||
226 | |||
227 | stats->n++; | ||
228 | delta = val - stats->mean; | ||
229 | stats->mean += delta / stats->n; | ||
230 | stats->M2 += delta*(val - stats->mean); | ||
231 | } | ||
232 | |||
233 | static double avg_stats(struct stats *stats) | ||
234 | { | 219 | { |
235 | return stats->mean; | 220 | return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; |
236 | } | 221 | } |
237 | 222 | ||
238 | /* | 223 | static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) |
239 | * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance | ||
240 | * | ||
241 | * (\Sum n_i^2) - ((\Sum n_i)^2)/n | ||
242 | * s^2 = ------------------------------- | ||
243 | * n - 1 | ||
244 | * | ||
245 | * http://en.wikipedia.org/wiki/Stddev | ||
246 | * | ||
247 | * The std dev of the mean is related to the std dev by: | ||
248 | * | ||
249 | * s | ||
250 | * s_mean = ------- | ||
251 | * sqrt(n) | ||
252 | * | ||
253 | */ | ||
254 | static double stddev_stats(struct stats *stats) | ||
255 | { | 224 | { |
256 | double variance, variance_mean; | 225 | return perf_evsel__cpus(evsel)->nr; |
257 | |||
258 | if (!stats->n) | ||
259 | return 0.0; | ||
260 | |||
261 | variance = stats->M2 / (stats->n - 1); | ||
262 | variance_mean = variance / stats->n; | ||
263 | |||
264 | return sqrt(variance_mean); | ||
265 | } | 226 | } |
266 | 227 | ||
267 | static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; | 228 | static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; |
@@ -281,13 +242,9 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, | |||
281 | struct perf_evsel *first) | 242 | struct perf_evsel *first) |
282 | { | 243 | { |
283 | struct perf_event_attr *attr = &evsel->attr; | 244 | struct perf_event_attr *attr = &evsel->attr; |
284 | struct xyarray *group_fd = NULL; | ||
285 | bool exclude_guest_missing = false; | 245 | bool exclude_guest_missing = false; |
286 | int ret; | 246 | int ret; |
287 | 247 | ||
288 | if (group && evsel != first) | ||
289 | group_fd = first->fd; | ||
290 | |||
291 | if (scale) | 248 | if (scale) |
292 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | | 249 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | |
293 | PERF_FORMAT_TOTAL_TIME_RUNNING; | 250 | PERF_FORMAT_TOTAL_TIME_RUNNING; |
@@ -299,8 +256,7 @@ retry: | |||
299 | evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; | 256 | evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; |
300 | 257 | ||
301 | if (perf_target__has_cpu(&target)) { | 258 | if (perf_target__has_cpu(&target)) { |
302 | ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus, | 259 | ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); |
303 | group, group_fd); | ||
304 | if (ret) | 260 | if (ret) |
305 | goto check_ret; | 261 | goto check_ret; |
306 | return 0; | 262 | return 0; |
@@ -311,8 +267,7 @@ retry: | |||
311 | attr->enable_on_exec = 1; | 267 | attr->enable_on_exec = 1; |
312 | } | 268 | } |
313 | 269 | ||
314 | ret = perf_evsel__open_per_thread(evsel, evsel_list->threads, | 270 | ret = perf_evsel__open_per_thread(evsel, evsel_list->threads); |
315 | group, group_fd); | ||
316 | if (!ret) | 271 | if (!ret) |
317 | return 0; | 272 | return 0; |
318 | /* fall through */ | 273 | /* fall through */ |
@@ -382,7 +337,7 @@ static int read_counter_aggr(struct perf_evsel *counter) | |||
382 | u64 *count = counter->counts->aggr.values; | 337 | u64 *count = counter->counts->aggr.values; |
383 | int i; | 338 | int i; |
384 | 339 | ||
385 | if (__perf_evsel__read(counter, evsel_list->cpus->nr, | 340 | if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter), |
386 | evsel_list->threads->nr, scale) < 0) | 341 | evsel_list->threads->nr, scale) < 0) |
387 | return -1; | 342 | return -1; |
388 | 343 | ||
@@ -411,7 +366,7 @@ static int read_counter(struct perf_evsel *counter) | |||
411 | u64 *count; | 366 | u64 *count; |
412 | int cpu; | 367 | int cpu; |
413 | 368 | ||
414 | for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) { | 369 | for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { |
415 | if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0) | 370 | if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0) |
416 | return -1; | 371 | return -1; |
417 | 372 | ||
@@ -423,7 +378,7 @@ static int read_counter(struct perf_evsel *counter) | |||
423 | return 0; | 378 | return 0; |
424 | } | 379 | } |
425 | 380 | ||
426 | static int run_perf_stat(int argc __used, const char **argv) | 381 | static int run_perf_stat(int argc __maybe_unused, const char **argv) |
427 | { | 382 | { |
428 | unsigned long long t0, t1; | 383 | unsigned long long t0, t1; |
429 | struct perf_evsel *counter, *first; | 384 | struct perf_evsel *counter, *first; |
@@ -434,7 +389,7 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
434 | 389 | ||
435 | if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { | 390 | if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { |
436 | perror("failed to create pipes"); | 391 | perror("failed to create pipes"); |
437 | exit(1); | 392 | return -1; |
438 | } | 393 | } |
439 | 394 | ||
440 | if (forks) { | 395 | if (forks) { |
@@ -483,7 +438,10 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
483 | close(child_ready_pipe[0]); | 438 | close(child_ready_pipe[0]); |
484 | } | 439 | } |
485 | 440 | ||
486 | first = list_entry(evsel_list->entries.next, struct perf_evsel, node); | 441 | if (group) |
442 | perf_evlist__set_leader(evsel_list); | ||
443 | |||
444 | first = perf_evlist__first(evsel_list); | ||
487 | 445 | ||
488 | list_for_each_entry(counter, &evsel_list->entries, node) { | 446 | list_for_each_entry(counter, &evsel_list->entries, node) { |
489 | if (create_perf_stat_counter(counter, first) < 0) { | 447 | if (create_perf_stat_counter(counter, first) < 0) { |
@@ -513,13 +471,14 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
513 | } | 471 | } |
514 | if (child_pid != -1) | 472 | if (child_pid != -1) |
515 | kill(child_pid, SIGTERM); | 473 | kill(child_pid, SIGTERM); |
516 | die("Not all events could be opened.\n"); | 474 | |
475 | pr_err("Not all events could be opened.\n"); | ||
517 | return -1; | 476 | return -1; |
518 | } | 477 | } |
519 | counter->supported = true; | 478 | counter->supported = true; |
520 | } | 479 | } |
521 | 480 | ||
522 | if (perf_evlist__set_filters(evsel_list)) { | 481 | if (perf_evlist__apply_filters(evsel_list)) { |
523 | error("failed to set filter with %d (%s)\n", errno, | 482 | error("failed to set filter with %d (%s)\n", errno, |
524 | strerror(errno)); | 483 | strerror(errno)); |
525 | return -1; | 484 | return -1; |
@@ -546,12 +505,12 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
546 | if (no_aggr) { | 505 | if (no_aggr) { |
547 | list_for_each_entry(counter, &evsel_list->entries, node) { | 506 | list_for_each_entry(counter, &evsel_list->entries, node) { |
548 | read_counter(counter); | 507 | read_counter(counter); |
549 | perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1); | 508 | perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1); |
550 | } | 509 | } |
551 | } else { | 510 | } else { |
552 | list_for_each_entry(counter, &evsel_list->entries, node) { | 511 | list_for_each_entry(counter, &evsel_list->entries, node) { |
553 | read_counter_aggr(counter); | 512 | read_counter_aggr(counter); |
554 | perf_evsel__close_fd(counter, evsel_list->cpus->nr, | 513 | perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), |
555 | evsel_list->threads->nr); | 514 | evsel_list->threads->nr); |
556 | } | 515 | } |
557 | } | 516 | } |
@@ -561,10 +520,7 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
561 | 520 | ||
562 | static void print_noise_pct(double total, double avg) | 521 | static void print_noise_pct(double total, double avg) |
563 | { | 522 | { |
564 | double pct = 0.0; | 523 | double pct = rel_stddev_stats(total, avg); |
565 | |||
566 | if (avg) | ||
567 | pct = 100.0*total/avg; | ||
568 | 524 | ||
569 | if (csv_output) | 525 | if (csv_output) |
570 | fprintf(output, "%s%.2f%%", csv_sep, pct); | 526 | fprintf(output, "%s%.2f%%", csv_sep, pct); |
@@ -592,7 +548,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) | |||
592 | if (no_aggr) | 548 | if (no_aggr) |
593 | sprintf(cpustr, "CPU%*d%s", | 549 | sprintf(cpustr, "CPU%*d%s", |
594 | csv_output ? 0 : -4, | 550 | csv_output ? 0 : -4, |
595 | evsel_list->cpus->map[cpu], csv_sep); | 551 | perf_evsel__cpus(evsel)->map[cpu], csv_sep); |
596 | 552 | ||
597 | fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel)); | 553 | fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel)); |
598 | 554 | ||
@@ -636,7 +592,9 @@ static const char *get_ratio_color(enum grc_type type, double ratio) | |||
636 | return color; | 592 | return color; |
637 | } | 593 | } |
638 | 594 | ||
639 | static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) | 595 | static void print_stalled_cycles_frontend(int cpu, |
596 | struct perf_evsel *evsel | ||
597 | __maybe_unused, double avg) | ||
640 | { | 598 | { |
641 | double total, ratio = 0.0; | 599 | double total, ratio = 0.0; |
642 | const char *color; | 600 | const char *color; |
@@ -653,7 +611,9 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us | |||
653 | fprintf(output, " frontend cycles idle "); | 611 | fprintf(output, " frontend cycles idle "); |
654 | } | 612 | } |
655 | 613 | ||
656 | static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) | 614 | static void print_stalled_cycles_backend(int cpu, |
615 | struct perf_evsel *evsel | ||
616 | __maybe_unused, double avg) | ||
657 | { | 617 | { |
658 | double total, ratio = 0.0; | 618 | double total, ratio = 0.0; |
659 | const char *color; | 619 | const char *color; |
@@ -670,7 +630,9 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use | |||
670 | fprintf(output, " backend cycles idle "); | 630 | fprintf(output, " backend cycles idle "); |
671 | } | 631 | } |
672 | 632 | ||
673 | static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 633 | static void print_branch_misses(int cpu, |
634 | struct perf_evsel *evsel __maybe_unused, | ||
635 | double avg) | ||
674 | { | 636 | { |
675 | double total, ratio = 0.0; | 637 | double total, ratio = 0.0; |
676 | const char *color; | 638 | const char *color; |
@@ -687,7 +649,9 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double | |||
687 | fprintf(output, " of all branches "); | 649 | fprintf(output, " of all branches "); |
688 | } | 650 | } |
689 | 651 | ||
690 | static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 652 | static void print_l1_dcache_misses(int cpu, |
653 | struct perf_evsel *evsel __maybe_unused, | ||
654 | double avg) | ||
691 | { | 655 | { |
692 | double total, ratio = 0.0; | 656 | double total, ratio = 0.0; |
693 | const char *color; | 657 | const char *color; |
@@ -704,7 +668,9 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou | |||
704 | fprintf(output, " of all L1-dcache hits "); | 668 | fprintf(output, " of all L1-dcache hits "); |
705 | } | 669 | } |
706 | 670 | ||
707 | static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 671 | static void print_l1_icache_misses(int cpu, |
672 | struct perf_evsel *evsel __maybe_unused, | ||
673 | double avg) | ||
708 | { | 674 | { |
709 | double total, ratio = 0.0; | 675 | double total, ratio = 0.0; |
710 | const char *color; | 676 | const char *color; |
@@ -721,7 +687,9 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou | |||
721 | fprintf(output, " of all L1-icache hits "); | 687 | fprintf(output, " of all L1-icache hits "); |
722 | } | 688 | } |
723 | 689 | ||
724 | static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 690 | static void print_dtlb_cache_misses(int cpu, |
691 | struct perf_evsel *evsel __maybe_unused, | ||
692 | double avg) | ||
725 | { | 693 | { |
726 | double total, ratio = 0.0; | 694 | double total, ratio = 0.0; |
727 | const char *color; | 695 | const char *color; |
@@ -738,7 +706,9 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do | |||
738 | fprintf(output, " of all dTLB cache hits "); | 706 | fprintf(output, " of all dTLB cache hits "); |
739 | } | 707 | } |
740 | 708 | ||
741 | static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 709 | static void print_itlb_cache_misses(int cpu, |
710 | struct perf_evsel *evsel __maybe_unused, | ||
711 | double avg) | ||
742 | { | 712 | { |
743 | double total, ratio = 0.0; | 713 | double total, ratio = 0.0; |
744 | const char *color; | 714 | const char *color; |
@@ -755,7 +725,9 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do | |||
755 | fprintf(output, " of all iTLB cache hits "); | 725 | fprintf(output, " of all iTLB cache hits "); |
756 | } | 726 | } |
757 | 727 | ||
758 | static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | 728 | static void print_ll_cache_misses(int cpu, |
729 | struct perf_evsel *evsel __maybe_unused, | ||
730 | double avg) | ||
759 | { | 731 | { |
760 | double total, ratio = 0.0; | 732 | double total, ratio = 0.0; |
761 | const char *color; | 733 | const char *color; |
@@ -788,7 +760,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) | |||
788 | if (no_aggr) | 760 | if (no_aggr) |
789 | sprintf(cpustr, "CPU%*d%s", | 761 | sprintf(cpustr, "CPU%*d%s", |
790 | csv_output ? 0 : -4, | 762 | csv_output ? 0 : -4, |
791 | evsel_list->cpus->map[cpu], csv_sep); | 763 | perf_evsel__cpus(evsel)->map[cpu], csv_sep); |
792 | else | 764 | else |
793 | cpu = 0; | 765 | cpu = 0; |
794 | 766 | ||
@@ -949,14 +921,14 @@ static void print_counter(struct perf_evsel *counter) | |||
949 | u64 ena, run, val; | 921 | u64 ena, run, val; |
950 | int cpu; | 922 | int cpu; |
951 | 923 | ||
952 | for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) { | 924 | for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { |
953 | val = counter->counts->cpu[cpu].val; | 925 | val = counter->counts->cpu[cpu].val; |
954 | ena = counter->counts->cpu[cpu].ena; | 926 | ena = counter->counts->cpu[cpu].ena; |
955 | run = counter->counts->cpu[cpu].run; | 927 | run = counter->counts->cpu[cpu].run; |
956 | if (run == 0 || ena == 0) { | 928 | if (run == 0 || ena == 0) { |
957 | fprintf(output, "CPU%*d%s%*s%s%*s", | 929 | fprintf(output, "CPU%*d%s%*s%s%*s", |
958 | csv_output ? 0 : -4, | 930 | csv_output ? 0 : -4, |
959 | evsel_list->cpus->map[cpu], csv_sep, | 931 | perf_evsel__cpus(counter)->map[cpu], csv_sep, |
960 | csv_output ? 0 : 18, | 932 | csv_output ? 0 : 18, |
961 | counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, | 933 | counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, |
962 | csv_sep, | 934 | csv_sep, |
@@ -1061,8 +1033,8 @@ static const char * const stat_usage[] = { | |||
1061 | NULL | 1033 | NULL |
1062 | }; | 1034 | }; |
1063 | 1035 | ||
1064 | static int stat__set_big_num(const struct option *opt __used, | 1036 | static int stat__set_big_num(const struct option *opt __maybe_unused, |
1065 | const char *s __used, int unset) | 1037 | const char *s __maybe_unused, int unset) |
1066 | { | 1038 | { |
1067 | big_num_opt = unset ? 0 : 1; | 1039 | big_num_opt = unset ? 0 : 1; |
1068 | return 0; | 1040 | return 0; |
@@ -1156,7 +1128,7 @@ static int add_default_attributes(void) | |||
1156 | return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); | 1128 | return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); |
1157 | } | 1129 | } |
1158 | 1130 | ||
1159 | int cmd_stat(int argc, const char **argv, const char *prefix __used) | 1131 | int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) |
1160 | { | 1132 | { |
1161 | struct perf_evsel *pos; | 1133 | struct perf_evsel *pos; |
1162 | int status = -ENOMEM; | 1134 | int status = -ENOMEM; |
@@ -1192,7 +1164,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
1192 | output = fopen(output_name, mode); | 1164 | output = fopen(output_name, mode); |
1193 | if (!output) { | 1165 | if (!output) { |
1194 | perror("failed to create output file"); | 1166 | perror("failed to create output file"); |
1195 | exit(-1); | 1167 | return -1; |
1196 | } | 1168 | } |
1197 | clock_gettime(CLOCK_REALTIME, &tm); | 1169 | clock_gettime(CLOCK_REALTIME, &tm); |
1198 | fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); | 1170 | fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); |
@@ -1255,7 +1227,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
1255 | 1227 | ||
1256 | list_for_each_entry(pos, &evsel_list->entries, node) { | 1228 | list_for_each_entry(pos, &evsel_list->entries, node) { |
1257 | if (perf_evsel__alloc_stat_priv(pos) < 0 || | 1229 | if (perf_evsel__alloc_stat_priv(pos) < 0 || |
1258 | perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0) | 1230 | perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0) |
1259 | goto out_free_fd; | 1231 | goto out_free_fd; |
1260 | } | 1232 | } |
1261 | 1233 | ||
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 1d592f5cbea9..484f26cc0c00 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -14,11 +14,13 @@ | |||
14 | #include "util/symbol.h" | 14 | #include "util/symbol.h" |
15 | #include "util/thread_map.h" | 15 | #include "util/thread_map.h" |
16 | #include "util/pmu.h" | 16 | #include "util/pmu.h" |
17 | #include "event-parse.h" | ||
17 | #include "../../include/linux/hw_breakpoint.h" | 18 | #include "../../include/linux/hw_breakpoint.h" |
18 | 19 | ||
19 | #include <sys/mman.h> | 20 | #include <sys/mman.h> |
20 | 21 | ||
21 | static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym) | 22 | static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, |
23 | struct symbol *sym) | ||
22 | { | 24 | { |
23 | bool *visited = symbol__priv(sym); | 25 | bool *visited = symbol__priv(sym); |
24 | *visited = true; | 26 | *visited = true; |
@@ -294,7 +296,7 @@ static int test__open_syscall_event(void) | |||
294 | goto out_thread_map_delete; | 296 | goto out_thread_map_delete; |
295 | } | 297 | } |
296 | 298 | ||
297 | if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) { | 299 | if (perf_evsel__open_per_thread(evsel, threads) < 0) { |
298 | pr_debug("failed to open counter: %s, " | 300 | pr_debug("failed to open counter: %s, " |
299 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | 301 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", |
300 | strerror(errno)); | 302 | strerror(errno)); |
@@ -369,7 +371,7 @@ static int test__open_syscall_event_on_all_cpus(void) | |||
369 | goto out_thread_map_delete; | 371 | goto out_thread_map_delete; |
370 | } | 372 | } |
371 | 373 | ||
372 | if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) { | 374 | if (perf_evsel__open(evsel, cpus, threads) < 0) { |
373 | pr_debug("failed to open counter: %s, " | 375 | pr_debug("failed to open counter: %s, " |
374 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | 376 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", |
375 | strerror(errno)); | 377 | strerror(errno)); |
@@ -533,7 +535,7 @@ static int test__basic_mmap(void) | |||
533 | 535 | ||
534 | perf_evlist__add(evlist, evsels[i]); | 536 | perf_evlist__add(evlist, evsels[i]); |
535 | 537 | ||
536 | if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) { | 538 | if (perf_evsel__open(evsels[i], cpus, threads) < 0) { |
537 | pr_debug("failed to open counter: %s, " | 539 | pr_debug("failed to open counter: %s, " |
538 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", | 540 | "tweak /proc/sys/kernel/perf_event_paranoid?\n", |
539 | strerror(errno)); | 541 | strerror(errno)); |
@@ -562,7 +564,7 @@ static int test__basic_mmap(void) | |||
562 | goto out_munmap; | 564 | goto out_munmap; |
563 | } | 565 | } |
564 | 566 | ||
565 | err = perf_evlist__parse_sample(evlist, event, &sample, false); | 567 | err = perf_evlist__parse_sample(evlist, event, &sample); |
566 | if (err) { | 568 | if (err) { |
567 | pr_err("Can't parse sample, err = %d\n", err); | 569 | pr_err("Can't parse sample, err = %d\n", err); |
568 | goto out_munmap; | 570 | goto out_munmap; |
@@ -710,7 +712,7 @@ static int test__PERF_RECORD(void) | |||
710 | /* | 712 | /* |
711 | * Config the evsels, setting attr->comm on the first one, etc. | 713 | * Config the evsels, setting attr->comm on the first one, etc. |
712 | */ | 714 | */ |
713 | evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | 715 | evsel = perf_evlist__first(evlist); |
714 | evsel->attr.sample_type |= PERF_SAMPLE_CPU; | 716 | evsel->attr.sample_type |= PERF_SAMPLE_CPU; |
715 | evsel->attr.sample_type |= PERF_SAMPLE_TID; | 717 | evsel->attr.sample_type |= PERF_SAMPLE_TID; |
716 | evsel->attr.sample_type |= PERF_SAMPLE_TIME; | 718 | evsel->attr.sample_type |= PERF_SAMPLE_TIME; |
@@ -737,7 +739,7 @@ static int test__PERF_RECORD(void) | |||
737 | * Call sys_perf_event_open on all the fds on all the evsels, | 739 | * Call sys_perf_event_open on all the fds on all the evsels, |
738 | * grouping them if asked to. | 740 | * grouping them if asked to. |
739 | */ | 741 | */ |
740 | err = perf_evlist__open(evlist, opts.group); | 742 | err = perf_evlist__open(evlist); |
741 | if (err < 0) { | 743 | if (err < 0) { |
742 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | 744 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); |
743 | goto out_delete_evlist; | 745 | goto out_delete_evlist; |
@@ -779,7 +781,7 @@ static int test__PERF_RECORD(void) | |||
779 | if (type < PERF_RECORD_MAX) | 781 | if (type < PERF_RECORD_MAX) |
780 | nr_events[type]++; | 782 | nr_events[type]++; |
781 | 783 | ||
782 | err = perf_evlist__parse_sample(evlist, event, &sample, false); | 784 | err = perf_evlist__parse_sample(evlist, event, &sample); |
783 | if (err < 0) { | 785 | if (err < 0) { |
784 | if (verbose) | 786 | if (verbose) |
785 | perf_event__fprintf(event, stderr); | 787 | perf_event__fprintf(event, stderr); |
@@ -996,7 +998,9 @@ static u64 mmap_read_self(void *addr) | |||
996 | /* | 998 | /* |
997 | * If the RDPMC instruction faults then signal this back to the test parent task: | 999 | * If the RDPMC instruction faults then signal this back to the test parent task: |
998 | */ | 1000 | */ |
999 | static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used) | 1001 | static void segfault_handler(int sig __maybe_unused, |
1002 | siginfo_t *info __maybe_unused, | ||
1003 | void *uc __maybe_unused) | ||
1000 | { | 1004 | { |
1001 | exit(-1); | 1005 | exit(-1); |
1002 | } | 1006 | } |
@@ -1023,14 +1027,16 @@ static int __test__rdpmc(void) | |||
1023 | 1027 | ||
1024 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | 1028 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); |
1025 | if (fd < 0) { | 1029 | if (fd < 0) { |
1026 | die("Error: sys_perf_event_open() syscall returned " | 1030 | pr_err("Error: sys_perf_event_open() syscall returned " |
1027 | "with %d (%s)\n", fd, strerror(errno)); | 1031 | "with %d (%s)\n", fd, strerror(errno)); |
1032 | return -1; | ||
1028 | } | 1033 | } |
1029 | 1034 | ||
1030 | addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); | 1035 | addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); |
1031 | if (addr == (void *)(-1)) { | 1036 | if (addr == (void *)(-1)) { |
1032 | die("Error: mmap() syscall returned " | 1037 | pr_err("Error: mmap() syscall returned with (%s)\n", |
1033 | "with (%s)\n", strerror(errno)); | 1038 | strerror(errno)); |
1039 | goto out_close; | ||
1034 | } | 1040 | } |
1035 | 1041 | ||
1036 | for (n = 0; n < 6; n++) { | 1042 | for (n = 0; n < 6; n++) { |
@@ -1051,9 +1057,9 @@ static int __test__rdpmc(void) | |||
1051 | } | 1057 | } |
1052 | 1058 | ||
1053 | munmap(addr, page_size); | 1059 | munmap(addr, page_size); |
1054 | close(fd); | ||
1055 | |||
1056 | pr_debug(" "); | 1060 | pr_debug(" "); |
1061 | out_close: | ||
1062 | close(fd); | ||
1057 | 1063 | ||
1058 | if (!delta_sum) | 1064 | if (!delta_sum) |
1059 | return -1; | 1065 | return -1; |
@@ -1092,6 +1098,309 @@ static int test__perf_pmu(void) | |||
1092 | return perf_pmu__test(); | 1098 | return perf_pmu__test(); |
1093 | } | 1099 | } |
1094 | 1100 | ||
1101 | static int perf_evsel__roundtrip_cache_name_test(void) | ||
1102 | { | ||
1103 | char name[128]; | ||
1104 | int type, op, err = 0, ret = 0, i, idx; | ||
1105 | struct perf_evsel *evsel; | ||
1106 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1107 | |||
1108 | if (evlist == NULL) | ||
1109 | return -ENOMEM; | ||
1110 | |||
1111 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
1112 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
1113 | /* skip invalid cache type */ | ||
1114 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
1115 | continue; | ||
1116 | |||
1117 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
1118 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
1119 | name, sizeof(name)); | ||
1120 | err = parse_events(evlist, name, 0); | ||
1121 | if (err) | ||
1122 | ret = err; | ||
1123 | } | ||
1124 | } | ||
1125 | } | ||
1126 | |||
1127 | idx = 0; | ||
1128 | evsel = perf_evlist__first(evlist); | ||
1129 | |||
1130 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
1131 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
1132 | /* skip invalid cache type */ | ||
1133 | if (!perf_evsel__is_cache_op_valid(type, op)) | ||
1134 | continue; | ||
1135 | |||
1136 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
1137 | __perf_evsel__hw_cache_type_op_res_name(type, op, i, | ||
1138 | name, sizeof(name)); | ||
1139 | if (evsel->idx != idx) | ||
1140 | continue; | ||
1141 | |||
1142 | ++idx; | ||
1143 | |||
1144 | if (strcmp(perf_evsel__name(evsel), name)) { | ||
1145 | pr_debug("%s != %s\n", perf_evsel__name(evsel), name); | ||
1146 | ret = -1; | ||
1147 | } | ||
1148 | |||
1149 | evsel = perf_evsel__next(evsel); | ||
1150 | } | ||
1151 | } | ||
1152 | } | ||
1153 | |||
1154 | perf_evlist__delete(evlist); | ||
1155 | return ret; | ||
1156 | } | ||
1157 | |||
1158 | static int __perf_evsel__name_array_test(const char *names[], int nr_names) | ||
1159 | { | ||
1160 | int i, err; | ||
1161 | struct perf_evsel *evsel; | ||
1162 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1163 | |||
1164 | if (evlist == NULL) | ||
1165 | return -ENOMEM; | ||
1166 | |||
1167 | for (i = 0; i < nr_names; ++i) { | ||
1168 | err = parse_events(evlist, names[i], 0); | ||
1169 | if (err) { | ||
1170 | pr_debug("failed to parse event '%s', err %d\n", | ||
1171 | names[i], err); | ||
1172 | goto out_delete_evlist; | ||
1173 | } | ||
1174 | } | ||
1175 | |||
1176 | err = 0; | ||
1177 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
1178 | if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { | ||
1179 | --err; | ||
1180 | pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); | ||
1181 | } | ||
1182 | } | ||
1183 | |||
1184 | out_delete_evlist: | ||
1185 | perf_evlist__delete(evlist); | ||
1186 | return err; | ||
1187 | } | ||
1188 | |||
1189 | #define perf_evsel__name_array_test(names) \ | ||
1190 | __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) | ||
1191 | |||
1192 | static int perf_evsel__roundtrip_name_test(void) | ||
1193 | { | ||
1194 | int err = 0, ret = 0; | ||
1195 | |||
1196 | err = perf_evsel__name_array_test(perf_evsel__hw_names); | ||
1197 | if (err) | ||
1198 | ret = err; | ||
1199 | |||
1200 | err = perf_evsel__name_array_test(perf_evsel__sw_names); | ||
1201 | if (err) | ||
1202 | ret = err; | ||
1203 | |||
1204 | err = perf_evsel__roundtrip_cache_name_test(); | ||
1205 | if (err) | ||
1206 | ret = err; | ||
1207 | |||
1208 | return ret; | ||
1209 | } | ||
1210 | |||
1211 | static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, | ||
1212 | int size, bool should_be_signed) | ||
1213 | { | ||
1214 | struct format_field *field = perf_evsel__field(evsel, name); | ||
1215 | int is_signed; | ||
1216 | int ret = 0; | ||
1217 | |||
1218 | if (field == NULL) { | ||
1219 | pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); | ||
1220 | return -1; | ||
1221 | } | ||
1222 | |||
1223 | is_signed = !!(field->flags | FIELD_IS_SIGNED); | ||
1224 | if (should_be_signed && !is_signed) { | ||
1225 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", | ||
1226 | evsel->name, name, is_signed, should_be_signed); | ||
1227 | ret = -1; | ||
1228 | } | ||
1229 | |||
1230 | if (field->size != size) { | ||
1231 | pr_debug("%s: \"%s\" size (%d) should be %d!\n", | ||
1232 | evsel->name, name, field->size, size); | ||
1233 | ret = -1; | ||
1234 | } | ||
1235 | |||
1236 | return ret; | ||
1237 | } | ||
1238 | |||
1239 | static int perf_evsel__tp_sched_test(void) | ||
1240 | { | ||
1241 | struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); | ||
1242 | int ret = 0; | ||
1243 | |||
1244 | if (evsel == NULL) { | ||
1245 | pr_debug("perf_evsel__new\n"); | ||
1246 | return -1; | ||
1247 | } | ||
1248 | |||
1249 | if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) | ||
1250 | ret = -1; | ||
1251 | |||
1252 | if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) | ||
1253 | ret = -1; | ||
1254 | |||
1255 | if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) | ||
1256 | ret = -1; | ||
1257 | |||
1258 | if (perf_evsel__test_field(evsel, "prev_state", 8, true)) | ||
1259 | ret = -1; | ||
1260 | |||
1261 | if (perf_evsel__test_field(evsel, "next_comm", 16, true)) | ||
1262 | ret = -1; | ||
1263 | |||
1264 | if (perf_evsel__test_field(evsel, "next_pid", 4, true)) | ||
1265 | ret = -1; | ||
1266 | |||
1267 | if (perf_evsel__test_field(evsel, "next_prio", 4, true)) | ||
1268 | ret = -1; | ||
1269 | |||
1270 | perf_evsel__delete(evsel); | ||
1271 | |||
1272 | evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); | ||
1273 | |||
1274 | if (perf_evsel__test_field(evsel, "comm", 16, true)) | ||
1275 | ret = -1; | ||
1276 | |||
1277 | if (perf_evsel__test_field(evsel, "pid", 4, true)) | ||
1278 | ret = -1; | ||
1279 | |||
1280 | if (perf_evsel__test_field(evsel, "prio", 4, true)) | ||
1281 | ret = -1; | ||
1282 | |||
1283 | if (perf_evsel__test_field(evsel, "success", 4, true)) | ||
1284 | ret = -1; | ||
1285 | |||
1286 | if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) | ||
1287 | ret = -1; | ||
1288 | |||
1289 | return ret; | ||
1290 | } | ||
1291 | |||
1292 | static int test__syscall_open_tp_fields(void) | ||
1293 | { | ||
1294 | struct perf_record_opts opts = { | ||
1295 | .target = { | ||
1296 | .uid = UINT_MAX, | ||
1297 | .uses_mmap = true, | ||
1298 | }, | ||
1299 | .no_delay = true, | ||
1300 | .freq = 1, | ||
1301 | .mmap_pages = 256, | ||
1302 | .raw_samples = true, | ||
1303 | }; | ||
1304 | const char *filename = "/etc/passwd"; | ||
1305 | int flags = O_RDONLY | O_DIRECTORY; | ||
1306 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
1307 | struct perf_evsel *evsel; | ||
1308 | int err = -1, i, nr_events = 0, nr_polls = 0; | ||
1309 | |||
1310 | if (evlist == NULL) { | ||
1311 | pr_debug("%s: perf_evlist__new\n", __func__); | ||
1312 | goto out; | ||
1313 | } | ||
1314 | |||
1315 | evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); | ||
1316 | if (evsel == NULL) { | ||
1317 | pr_debug("%s: perf_evsel__newtp\n", __func__); | ||
1318 | goto out_delete_evlist; | ||
1319 | } | ||
1320 | |||
1321 | perf_evlist__add(evlist, evsel); | ||
1322 | |||
1323 | err = perf_evlist__create_maps(evlist, &opts.target); | ||
1324 | if (err < 0) { | ||
1325 | pr_debug("%s: perf_evlist__create_maps\n", __func__); | ||
1326 | goto out_delete_evlist; | ||
1327 | } | ||
1328 | |||
1329 | perf_evsel__config(evsel, &opts, evsel); | ||
1330 | |||
1331 | evlist->threads->map[0] = getpid(); | ||
1332 | |||
1333 | err = perf_evlist__open(evlist); | ||
1334 | if (err < 0) { | ||
1335 | pr_debug("perf_evlist__open: %s\n", strerror(errno)); | ||
1336 | goto out_delete_evlist; | ||
1337 | } | ||
1338 | |||
1339 | err = perf_evlist__mmap(evlist, UINT_MAX, false); | ||
1340 | if (err < 0) { | ||
1341 | pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); | ||
1342 | goto out_delete_evlist; | ||
1343 | } | ||
1344 | |||
1345 | perf_evlist__enable(evlist); | ||
1346 | |||
1347 | /* | ||
1348 | * Generate the event: | ||
1349 | */ | ||
1350 | open(filename, flags); | ||
1351 | |||
1352 | while (1) { | ||
1353 | int before = nr_events; | ||
1354 | |||
1355 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
1356 | union perf_event *event; | ||
1357 | |||
1358 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
1359 | const u32 type = event->header.type; | ||
1360 | int tp_flags; | ||
1361 | struct perf_sample sample; | ||
1362 | |||
1363 | ++nr_events; | ||
1364 | |||
1365 | if (type != PERF_RECORD_SAMPLE) | ||
1366 | continue; | ||
1367 | |||
1368 | err = perf_evsel__parse_sample(evsel, event, &sample); | ||
1369 | if (err) { | ||
1370 | pr_err("Can't parse sample, err = %d\n", err); | ||
1371 | goto out_munmap; | ||
1372 | } | ||
1373 | |||
1374 | tp_flags = perf_evsel__intval(evsel, &sample, "flags"); | ||
1375 | |||
1376 | if (flags != tp_flags) { | ||
1377 | pr_debug("%s: Expected flags=%#x, got %#x\n", | ||
1378 | __func__, flags, tp_flags); | ||
1379 | goto out_munmap; | ||
1380 | } | ||
1381 | |||
1382 | goto out_ok; | ||
1383 | } | ||
1384 | } | ||
1385 | |||
1386 | if (nr_events == before) | ||
1387 | poll(evlist->pollfd, evlist->nr_fds, 10); | ||
1388 | |||
1389 | if (++nr_polls > 5) { | ||
1390 | pr_debug("%s: no events!\n", __func__); | ||
1391 | goto out_munmap; | ||
1392 | } | ||
1393 | } | ||
1394 | out_ok: | ||
1395 | err = 0; | ||
1396 | out_munmap: | ||
1397 | perf_evlist__munmap(evlist); | ||
1398 | out_delete_evlist: | ||
1399 | perf_evlist__delete(evlist); | ||
1400 | out: | ||
1401 | return err; | ||
1402 | } | ||
1403 | |||
1095 | static struct test { | 1404 | static struct test { |
1096 | const char *desc; | 1405 | const char *desc; |
1097 | int (*func)(void); | 1406 | int (*func)(void); |
@@ -1135,6 +1444,18 @@ static struct test { | |||
1135 | .func = dso__test_data, | 1444 | .func = dso__test_data, |
1136 | }, | 1445 | }, |
1137 | { | 1446 | { |
1447 | .desc = "roundtrip evsel->name check", | ||
1448 | .func = perf_evsel__roundtrip_name_test, | ||
1449 | }, | ||
1450 | { | ||
1451 | .desc = "Check parsing of sched tracepoints fields", | ||
1452 | .func = perf_evsel__tp_sched_test, | ||
1453 | }, | ||
1454 | { | ||
1455 | .desc = "Generate and check syscalls:sys_enter_open event fields", | ||
1456 | .func = test__syscall_open_tp_fields, | ||
1457 | }, | ||
1458 | { | ||
1138 | .func = NULL, | 1459 | .func = NULL, |
1139 | }, | 1460 | }, |
1140 | }; | 1461 | }; |
@@ -1199,7 +1520,7 @@ static int perf_test__list(int argc, const char **argv) | |||
1199 | return 0; | 1520 | return 0; |
1200 | } | 1521 | } |
1201 | 1522 | ||
1202 | int cmd_test(int argc, const char **argv, const char *prefix __used) | 1523 | int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) |
1203 | { | 1524 | { |
1204 | const char * const test_usage[] = { | 1525 | const char * const test_usage[] = { |
1205 | "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", | 1526 | "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", |
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 3b75b2e21ea5..b1a8a3b841cc 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
@@ -168,9 +168,8 @@ static struct per_pid *find_create_pid(int pid) | |||
168 | return cursor; | 168 | return cursor; |
169 | cursor = cursor->next; | 169 | cursor = cursor->next; |
170 | } | 170 | } |
171 | cursor = malloc(sizeof(struct per_pid)); | 171 | cursor = zalloc(sizeof(*cursor)); |
172 | assert(cursor != NULL); | 172 | assert(cursor != NULL); |
173 | memset(cursor, 0, sizeof(struct per_pid)); | ||
174 | cursor->pid = pid; | 173 | cursor->pid = pid; |
175 | cursor->next = all_data; | 174 | cursor->next = all_data; |
176 | all_data = cursor; | 175 | all_data = cursor; |
@@ -195,9 +194,8 @@ static void pid_set_comm(int pid, char *comm) | |||
195 | } | 194 | } |
196 | c = c->next; | 195 | c = c->next; |
197 | } | 196 | } |
198 | c = malloc(sizeof(struct per_pidcomm)); | 197 | c = zalloc(sizeof(*c)); |
199 | assert(c != NULL); | 198 | assert(c != NULL); |
200 | memset(c, 0, sizeof(struct per_pidcomm)); | ||
201 | c->comm = strdup(comm); | 199 | c->comm = strdup(comm); |
202 | p->current = c; | 200 | p->current = c; |
203 | c->next = p->all; | 201 | c->next = p->all; |
@@ -239,17 +237,15 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) | |||
239 | p = find_create_pid(pid); | 237 | p = find_create_pid(pid); |
240 | c = p->current; | 238 | c = p->current; |
241 | if (!c) { | 239 | if (!c) { |
242 | c = malloc(sizeof(struct per_pidcomm)); | 240 | c = zalloc(sizeof(*c)); |
243 | assert(c != NULL); | 241 | assert(c != NULL); |
244 | memset(c, 0, sizeof(struct per_pidcomm)); | ||
245 | p->current = c; | 242 | p->current = c; |
246 | c->next = p->all; | 243 | c->next = p->all; |
247 | p->all = c; | 244 | p->all = c; |
248 | } | 245 | } |
249 | 246 | ||
250 | sample = malloc(sizeof(struct cpu_sample)); | 247 | sample = zalloc(sizeof(*sample)); |
251 | assert(sample != NULL); | 248 | assert(sample != NULL); |
252 | memset(sample, 0, sizeof(struct cpu_sample)); | ||
253 | sample->start_time = start; | 249 | sample->start_time = start; |
254 | sample->end_time = end; | 250 | sample->end_time = end; |
255 | sample->type = type; | 251 | sample->type = type; |
@@ -275,28 +271,28 @@ static int cpus_cstate_state[MAX_CPUS]; | |||
275 | static u64 cpus_pstate_start_times[MAX_CPUS]; | 271 | static u64 cpus_pstate_start_times[MAX_CPUS]; |
276 | static u64 cpus_pstate_state[MAX_CPUS]; | 272 | static u64 cpus_pstate_state[MAX_CPUS]; |
277 | 273 | ||
278 | static int process_comm_event(struct perf_tool *tool __used, | 274 | static int process_comm_event(struct perf_tool *tool __maybe_unused, |
279 | union perf_event *event, | 275 | union perf_event *event, |
280 | struct perf_sample *sample __used, | 276 | struct perf_sample *sample __maybe_unused, |
281 | struct machine *machine __used) | 277 | struct machine *machine __maybe_unused) |
282 | { | 278 | { |
283 | pid_set_comm(event->comm.tid, event->comm.comm); | 279 | pid_set_comm(event->comm.tid, event->comm.comm); |
284 | return 0; | 280 | return 0; |
285 | } | 281 | } |
286 | 282 | ||
287 | static int process_fork_event(struct perf_tool *tool __used, | 283 | static int process_fork_event(struct perf_tool *tool __maybe_unused, |
288 | union perf_event *event, | 284 | union perf_event *event, |
289 | struct perf_sample *sample __used, | 285 | struct perf_sample *sample __maybe_unused, |
290 | struct machine *machine __used) | 286 | struct machine *machine __maybe_unused) |
291 | { | 287 | { |
292 | pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); | 288 | pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); |
293 | return 0; | 289 | return 0; |
294 | } | 290 | } |
295 | 291 | ||
296 | static int process_exit_event(struct perf_tool *tool __used, | 292 | static int process_exit_event(struct perf_tool *tool __maybe_unused, |
297 | union perf_event *event, | 293 | union perf_event *event, |
298 | struct perf_sample *sample __used, | 294 | struct perf_sample *sample __maybe_unused, |
299 | struct machine *machine __used) | 295 | struct machine *machine __maybe_unused) |
300 | { | 296 | { |
301 | pid_exit(event->fork.pid, event->fork.time); | 297 | pid_exit(event->fork.pid, event->fork.time); |
302 | return 0; | 298 | return 0; |
@@ -373,11 +369,10 @@ static void c_state_start(int cpu, u64 timestamp, int state) | |||
373 | 369 | ||
374 | static void c_state_end(int cpu, u64 timestamp) | 370 | static void c_state_end(int cpu, u64 timestamp) |
375 | { | 371 | { |
376 | struct power_event *pwr; | 372 | struct power_event *pwr = zalloc(sizeof(*pwr)); |
377 | pwr = malloc(sizeof(struct power_event)); | 373 | |
378 | if (!pwr) | 374 | if (!pwr) |
379 | return; | 375 | return; |
380 | memset(pwr, 0, sizeof(struct power_event)); | ||
381 | 376 | ||
382 | pwr->state = cpus_cstate_state[cpu]; | 377 | pwr->state = cpus_cstate_state[cpu]; |
383 | pwr->start_time = cpus_cstate_start_times[cpu]; | 378 | pwr->start_time = cpus_cstate_start_times[cpu]; |
@@ -392,14 +387,13 @@ static void c_state_end(int cpu, u64 timestamp) | |||
392 | static void p_state_change(int cpu, u64 timestamp, u64 new_freq) | 387 | static void p_state_change(int cpu, u64 timestamp, u64 new_freq) |
393 | { | 388 | { |
394 | struct power_event *pwr; | 389 | struct power_event *pwr; |
395 | pwr = malloc(sizeof(struct power_event)); | ||
396 | 390 | ||
397 | if (new_freq > 8000000) /* detect invalid data */ | 391 | if (new_freq > 8000000) /* detect invalid data */ |
398 | return; | 392 | return; |
399 | 393 | ||
394 | pwr = zalloc(sizeof(*pwr)); | ||
400 | if (!pwr) | 395 | if (!pwr) |
401 | return; | 396 | return; |
402 | memset(pwr, 0, sizeof(struct power_event)); | ||
403 | 397 | ||
404 | pwr->state = cpus_pstate_state[cpu]; | 398 | pwr->state = cpus_pstate_state[cpu]; |
405 | pwr->start_time = cpus_pstate_start_times[cpu]; | 399 | pwr->start_time = cpus_pstate_start_times[cpu]; |
@@ -429,15 +423,13 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq) | |||
429 | static void | 423 | static void |
430 | sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) | 424 | sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) |
431 | { | 425 | { |
432 | struct wake_event *we; | ||
433 | struct per_pid *p; | 426 | struct per_pid *p; |
434 | struct wakeup_entry *wake = (void *)te; | 427 | struct wakeup_entry *wake = (void *)te; |
428 | struct wake_event *we = zalloc(sizeof(*we)); | ||
435 | 429 | ||
436 | we = malloc(sizeof(struct wake_event)); | ||
437 | if (!we) | 430 | if (!we) |
438 | return; | 431 | return; |
439 | 432 | ||
440 | memset(we, 0, sizeof(struct wake_event)); | ||
441 | we->time = timestamp; | 433 | we->time = timestamp; |
442 | we->waker = pid; | 434 | we->waker = pid; |
443 | 435 | ||
@@ -491,11 +483,11 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) | |||
491 | } | 483 | } |
492 | 484 | ||
493 | 485 | ||
494 | static int process_sample_event(struct perf_tool *tool __used, | 486 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
495 | union perf_event *event __used, | 487 | union perf_event *event __maybe_unused, |
496 | struct perf_sample *sample, | 488 | struct perf_sample *sample, |
497 | struct perf_evsel *evsel, | 489 | struct perf_evsel *evsel, |
498 | struct machine *machine __used) | 490 | struct machine *machine __maybe_unused) |
499 | { | 491 | { |
500 | struct trace_entry *te; | 492 | struct trace_entry *te; |
501 | 493 | ||
@@ -579,13 +571,12 @@ static void end_sample_processing(void) | |||
579 | struct power_event *pwr; | 571 | struct power_event *pwr; |
580 | 572 | ||
581 | for (cpu = 0; cpu <= numcpus; cpu++) { | 573 | for (cpu = 0; cpu <= numcpus; cpu++) { |
582 | pwr = malloc(sizeof(struct power_event)); | 574 | /* C state */ |
575 | #if 0 | ||
576 | pwr = zalloc(sizeof(*pwr)); | ||
583 | if (!pwr) | 577 | if (!pwr) |
584 | return; | 578 | return; |
585 | memset(pwr, 0, sizeof(struct power_event)); | ||
586 | 579 | ||
587 | /* C state */ | ||
588 | #if 0 | ||
589 | pwr->state = cpus_cstate_state[cpu]; | 580 | pwr->state = cpus_cstate_state[cpu]; |
590 | pwr->start_time = cpus_cstate_start_times[cpu]; | 581 | pwr->start_time = cpus_cstate_start_times[cpu]; |
591 | pwr->end_time = last_time; | 582 | pwr->end_time = last_time; |
@@ -597,10 +588,9 @@ static void end_sample_processing(void) | |||
597 | #endif | 588 | #endif |
598 | /* P state */ | 589 | /* P state */ |
599 | 590 | ||
600 | pwr = malloc(sizeof(struct power_event)); | 591 | pwr = zalloc(sizeof(*pwr)); |
601 | if (!pwr) | 592 | if (!pwr) |
602 | return; | 593 | return; |
603 | memset(pwr, 0, sizeof(struct power_event)); | ||
604 | 594 | ||
605 | pwr->state = cpus_pstate_state[cpu]; | 595 | pwr->state = cpus_pstate_state[cpu]; |
606 | pwr->start_time = cpus_pstate_start_times[cpu]; | 596 | pwr->start_time = cpus_pstate_start_times[cpu]; |
@@ -830,11 +820,9 @@ static void draw_process_bars(void) | |||
830 | 820 | ||
831 | static void add_process_filter(const char *string) | 821 | static void add_process_filter(const char *string) |
832 | { | 822 | { |
833 | struct process_filter *filt; | 823 | int pid = strtoull(string, NULL, 10); |
834 | int pid; | 824 | struct process_filter *filt = malloc(sizeof(*filt)); |
835 | 825 | ||
836 | pid = strtoull(string, NULL, 10); | ||
837 | filt = malloc(sizeof(struct process_filter)); | ||
838 | if (!filt) | 826 | if (!filt) |
839 | return; | 827 | return; |
840 | 828 | ||
@@ -1081,7 +1069,8 @@ static int __cmd_record(int argc, const char **argv) | |||
1081 | } | 1069 | } |
1082 | 1070 | ||
1083 | static int | 1071 | static int |
1084 | parse_process(const struct option *opt __used, const char *arg, int __used unset) | 1072 | parse_process(const struct option *opt __maybe_unused, const char *arg, |
1073 | int __maybe_unused unset) | ||
1085 | { | 1074 | { |
1086 | if (arg) | 1075 | if (arg) |
1087 | add_process_filter(arg); | 1076 | add_process_filter(arg); |
@@ -1106,7 +1095,8 @@ static const struct option options[] = { | |||
1106 | }; | 1095 | }; |
1107 | 1096 | ||
1108 | 1097 | ||
1109 | int cmd_timechart(int argc, const char **argv, const char *prefix __used) | 1098 | int cmd_timechart(int argc, const char **argv, |
1099 | const char *prefix __maybe_unused) | ||
1110 | { | 1100 | { |
1111 | argc = parse_options(argc, argv, options, timechart_usage, | 1101 | argc = parse_options(argc, argv, options, timechart_usage, |
1112 | PARSE_OPT_STOP_AT_NON_OPTION); | 1102 | PARSE_OPT_STOP_AT_NON_OPTION); |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 68cd61ef6ac5..e434a16bb5ac 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -95,7 +95,8 @@ static void perf_top__update_print_entries(struct perf_top *top) | |||
95 | top->print_entries -= 9; | 95 | top->print_entries -= 9; |
96 | } | 96 | } |
97 | 97 | ||
98 | static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg) | 98 | static void perf_top__sig_winch(int sig __maybe_unused, |
99 | siginfo_t *info __maybe_unused, void *arg) | ||
99 | { | 100 | { |
100 | struct perf_top *top = arg; | 101 | struct perf_top *top = arg; |
101 | 102 | ||
@@ -509,7 +510,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) | |||
509 | prompt_integer(&counter, "Enter details event counter"); | 510 | prompt_integer(&counter, "Enter details event counter"); |
510 | 511 | ||
511 | if (counter >= top->evlist->nr_entries) { | 512 | if (counter >= top->evlist->nr_entries) { |
512 | top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); | 513 | top->sym_evsel = perf_evlist__first(top->evlist); |
513 | fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel)); | 514 | fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel)); |
514 | sleep(1); | 515 | sleep(1); |
515 | break; | 516 | break; |
@@ -518,7 +519,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) | |||
518 | if (top->sym_evsel->idx == counter) | 519 | if (top->sym_evsel->idx == counter) |
519 | break; | 520 | break; |
520 | } else | 521 | } else |
521 | top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); | 522 | top->sym_evsel = perf_evlist__first(top->evlist); |
522 | break; | 523 | break; |
523 | case 'f': | 524 | case 'f': |
524 | prompt_integer(&top->count_filter, "Enter display event count filter"); | 525 | prompt_integer(&top->count_filter, "Enter display event count filter"); |
@@ -663,7 +664,7 @@ static const char *skip_symbols[] = { | |||
663 | NULL | 664 | NULL |
664 | }; | 665 | }; |
665 | 666 | ||
666 | static int symbol_filter(struct map *map __used, struct symbol *sym) | 667 | static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym) |
667 | { | 668 | { |
668 | const char *name = sym->name; | 669 | const char *name = sym->name; |
669 | int i; | 670 | int i; |
@@ -783,8 +784,10 @@ static void perf_event__process_sample(struct perf_tool *tool, | |||
783 | 784 | ||
784 | if ((sort__has_parent || symbol_conf.use_callchain) && | 785 | if ((sort__has_parent || symbol_conf.use_callchain) && |
785 | sample->callchain) { | 786 | sample->callchain) { |
786 | err = machine__resolve_callchain(machine, al.thread, | 787 | err = machine__resolve_callchain(machine, evsel, |
787 | sample->callchain, &parent); | 788 | al.thread, sample, |
789 | &parent); | ||
790 | |||
788 | if (err) | 791 | if (err) |
789 | return; | 792 | return; |
790 | } | 793 | } |
@@ -820,7 +823,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) | |||
820 | int ret; | 823 | int ret; |
821 | 824 | ||
822 | while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { | 825 | while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { |
823 | ret = perf_evlist__parse_sample(top->evlist, event, &sample, false); | 826 | ret = perf_evlist__parse_sample(top->evlist, event, &sample); |
824 | if (ret) { | 827 | if (ret) { |
825 | pr_err("Can't parse sample, err = %d\n", ret); | 828 | pr_err("Can't parse sample, err = %d\n", ret); |
826 | continue; | 829 | continue; |
@@ -884,17 +887,14 @@ static void perf_top__mmap_read(struct perf_top *top) | |||
884 | 887 | ||
885 | static void perf_top__start_counters(struct perf_top *top) | 888 | static void perf_top__start_counters(struct perf_top *top) |
886 | { | 889 | { |
887 | struct perf_evsel *counter, *first; | 890 | struct perf_evsel *counter; |
888 | struct perf_evlist *evlist = top->evlist; | 891 | struct perf_evlist *evlist = top->evlist; |
889 | 892 | ||
890 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | 893 | if (top->group) |
894 | perf_evlist__set_leader(evlist); | ||
891 | 895 | ||
892 | list_for_each_entry(counter, &evlist->entries, node) { | 896 | list_for_each_entry(counter, &evlist->entries, node) { |
893 | struct perf_event_attr *attr = &counter->attr; | 897 | struct perf_event_attr *attr = &counter->attr; |
894 | struct xyarray *group_fd = NULL; | ||
895 | |||
896 | if (top->group && counter != first) | ||
897 | group_fd = first->fd; | ||
898 | 898 | ||
899 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | 899 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; |
900 | 900 | ||
@@ -925,8 +925,7 @@ retry_sample_id: | |||
925 | attr->sample_id_all = top->sample_id_all_missing ? 0 : 1; | 925 | attr->sample_id_all = top->sample_id_all_missing ? 0 : 1; |
926 | try_again: | 926 | try_again: |
927 | if (perf_evsel__open(counter, top->evlist->cpus, | 927 | if (perf_evsel__open(counter, top->evlist->cpus, |
928 | top->evlist->threads, top->group, | 928 | top->evlist->threads) < 0) { |
929 | group_fd) < 0) { | ||
930 | int err = errno; | 929 | int err = errno; |
931 | 930 | ||
932 | if (err == EPERM || err == EACCES) { | 931 | if (err == EPERM || err == EACCES) { |
@@ -1165,7 +1164,7 @@ static const char * const top_usage[] = { | |||
1165 | NULL | 1164 | NULL |
1166 | }; | 1165 | }; |
1167 | 1166 | ||
1168 | int cmd_top(int argc, const char **argv, const char *prefix __used) | 1167 | int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) |
1169 | { | 1168 | { |
1170 | struct perf_evsel *pos; | 1169 | struct perf_evsel *pos; |
1171 | int status; | 1170 | int status; |
@@ -1328,7 +1327,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1328 | pos->attr.sample_period = top.default_interval; | 1327 | pos->attr.sample_period = top.default_interval; |
1329 | } | 1328 | } |
1330 | 1329 | ||
1331 | top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node); | 1330 | top.sym_evsel = perf_evlist__first(top.evlist); |
1332 | 1331 | ||
1333 | symbol_conf.priv_size = sizeof(struct annotation); | 1332 | symbol_conf.priv_size = sizeof(struct annotation); |
1334 | 1333 | ||
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c new file mode 100644 index 000000000000..8f113dab8bf1 --- /dev/null +++ b/tools/perf/builtin-trace.c | |||
@@ -0,0 +1,310 @@ | |||
1 | #include "builtin.h" | ||
2 | #include "util/evlist.h" | ||
3 | #include "util/parse-options.h" | ||
4 | #include "util/thread_map.h" | ||
5 | #include "event-parse.h" | ||
6 | |||
7 | #include <libaudit.h> | ||
8 | #include <stdlib.h> | ||
9 | |||
10 | static struct syscall_fmt { | ||
11 | const char *name; | ||
12 | const char *alias; | ||
13 | bool errmsg; | ||
14 | bool timeout; | ||
15 | } syscall_fmts[] = { | ||
16 | { .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, | ||
17 | { .name = "fstat", .errmsg = true, .alias = "newfstat", }, | ||
18 | { .name = "fstatat", .errmsg = true, .alias = "newfstatat", }, | ||
19 | { .name = "futex", .errmsg = true, }, | ||
20 | { .name = "poll", .errmsg = true, .timeout = true, }, | ||
21 | { .name = "ppoll", .errmsg = true, .timeout = true, }, | ||
22 | { .name = "read", .errmsg = true, }, | ||
23 | { .name = "recvfrom", .errmsg = true, }, | ||
24 | { .name = "select", .errmsg = true, .timeout = true, }, | ||
25 | { .name = "stat", .errmsg = true, .alias = "newstat", }, | ||
26 | }; | ||
27 | |||
28 | static int syscall_fmt__cmp(const void *name, const void *fmtp) | ||
29 | { | ||
30 | const struct syscall_fmt *fmt = fmtp; | ||
31 | return strcmp(name, fmt->name); | ||
32 | } | ||
33 | |||
34 | static struct syscall_fmt *syscall_fmt__find(const char *name) | ||
35 | { | ||
36 | const int nmemb = ARRAY_SIZE(syscall_fmts); | ||
37 | return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); | ||
38 | } | ||
39 | |||
40 | struct syscall { | ||
41 | struct event_format *tp_format; | ||
42 | const char *name; | ||
43 | struct syscall_fmt *fmt; | ||
44 | }; | ||
45 | |||
46 | struct trace { | ||
47 | int audit_machine; | ||
48 | struct { | ||
49 | int max; | ||
50 | struct syscall *table; | ||
51 | } syscalls; | ||
52 | struct perf_record_opts opts; | ||
53 | }; | ||
54 | |||
55 | static int trace__read_syscall_info(struct trace *trace, int id) | ||
56 | { | ||
57 | char tp_name[128]; | ||
58 | struct syscall *sc; | ||
59 | |||
60 | if (id > trace->syscalls.max) { | ||
61 | struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); | ||
62 | |||
63 | if (nsyscalls == NULL) | ||
64 | return -1; | ||
65 | |||
66 | if (trace->syscalls.max != -1) { | ||
67 | memset(nsyscalls + trace->syscalls.max + 1, 0, | ||
68 | (id - trace->syscalls.max) * sizeof(*sc)); | ||
69 | } else { | ||
70 | memset(nsyscalls, 0, (id + 1) * sizeof(*sc)); | ||
71 | } | ||
72 | |||
73 | trace->syscalls.table = nsyscalls; | ||
74 | trace->syscalls.max = id; | ||
75 | } | ||
76 | |||
77 | sc = trace->syscalls.table + id; | ||
78 | sc->name = audit_syscall_to_name(id, trace->audit_machine); | ||
79 | if (sc->name == NULL) | ||
80 | return -1; | ||
81 | |||
82 | sc->fmt = syscall_fmt__find(sc->name); | ||
83 | |||
84 | snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); | ||
85 | sc->tp_format = event_format__new("syscalls", tp_name); | ||
86 | |||
87 | if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) { | ||
88 | snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); | ||
89 | sc->tp_format = event_format__new("syscalls", tp_name); | ||
90 | } | ||
91 | |||
92 | return sc->tp_format != NULL ? 0 : -1; | ||
93 | } | ||
94 | |||
95 | static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FILE *fp) | ||
96 | { | ||
97 | int i = 0; | ||
98 | size_t printed = 0; | ||
99 | |||
100 | if (sc->tp_format != NULL) { | ||
101 | struct format_field *field; | ||
102 | |||
103 | for (field = sc->tp_format->format.fields->next; field; field = field->next) { | ||
104 | printed += fprintf(fp, "%s%s: %ld", printed ? ", " : "", | ||
105 | field->name, args[i++]); | ||
106 | } | ||
107 | } else { | ||
108 | while (i < 6) { | ||
109 | printed += fprintf(fp, "%sarg%d: %ld", printed ? ", " : "", i, args[i]); | ||
110 | ++i; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | return printed; | ||
115 | } | ||
116 | |||
117 | static int trace__run(struct trace *trace) | ||
118 | { | ||
119 | struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); | ||
120 | struct perf_evsel *evsel, *evsel_enter, *evsel_exit; | ||
121 | int err = -1, i, nr_events = 0, before; | ||
122 | |||
123 | if (evlist == NULL) { | ||
124 | printf("Not enough memory to run!\n"); | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | evsel_enter = perf_evsel__newtp("raw_syscalls", "sys_enter", 0); | ||
129 | if (evsel_enter == NULL) { | ||
130 | printf("Couldn't read the raw_syscalls:sys_enter tracepoint information!\n"); | ||
131 | goto out_delete_evlist; | ||
132 | } | ||
133 | |||
134 | perf_evlist__add(evlist, evsel_enter); | ||
135 | |||
136 | evsel_exit = perf_evsel__newtp("raw_syscalls", "sys_exit", 1); | ||
137 | if (evsel_exit == NULL) { | ||
138 | printf("Couldn't read the raw_syscalls:sys_exit tracepoint information!\n"); | ||
139 | goto out_delete_evlist; | ||
140 | } | ||
141 | |||
142 | perf_evlist__add(evlist, evsel_exit); | ||
143 | |||
144 | err = perf_evlist__create_maps(evlist, &trace->opts.target); | ||
145 | if (err < 0) { | ||
146 | printf("Problems parsing the target to trace, check your options!\n"); | ||
147 | goto out_delete_evlist; | ||
148 | } | ||
149 | |||
150 | perf_evlist__config_attrs(evlist, &trace->opts); | ||
151 | |||
152 | err = perf_evlist__open(evlist); | ||
153 | if (err < 0) { | ||
154 | printf("Couldn't create the events: %s\n", strerror(errno)); | ||
155 | goto out_delete_evlist; | ||
156 | } | ||
157 | |||
158 | err = perf_evlist__mmap(evlist, UINT_MAX, false); | ||
159 | if (err < 0) { | ||
160 | printf("Couldn't mmap the events: %s\n", strerror(errno)); | ||
161 | goto out_delete_evlist; | ||
162 | } | ||
163 | |||
164 | perf_evlist__enable(evlist); | ||
165 | again: | ||
166 | before = nr_events; | ||
167 | |||
168 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
169 | union perf_event *event; | ||
170 | |||
171 | while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { | ||
172 | const u32 type = event->header.type; | ||
173 | struct syscall *sc; | ||
174 | struct perf_sample sample; | ||
175 | int id; | ||
176 | |||
177 | ++nr_events; | ||
178 | |||
179 | switch (type) { | ||
180 | case PERF_RECORD_SAMPLE: | ||
181 | break; | ||
182 | case PERF_RECORD_LOST: | ||
183 | printf("LOST %" PRIu64 " events!\n", event->lost.lost); | ||
184 | continue; | ||
185 | default: | ||
186 | printf("Unexpected %s event, skipping...\n", | ||
187 | perf_event__name(type)); | ||
188 | continue; | ||
189 | } | ||
190 | |||
191 | err = perf_evlist__parse_sample(evlist, event, &sample); | ||
192 | if (err) { | ||
193 | printf("Can't parse sample, err = %d, skipping...\n", err); | ||
194 | continue; | ||
195 | } | ||
196 | |||
197 | evsel = perf_evlist__id2evsel(evlist, sample.id); | ||
198 | if (evsel == NULL) { | ||
199 | printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id); | ||
200 | continue; | ||
201 | } | ||
202 | |||
203 | id = perf_evsel__intval(evsel, &sample, "id"); | ||
204 | if (id < 0) { | ||
205 | printf("Invalid syscall %d id, skipping...\n", id); | ||
206 | continue; | ||
207 | } | ||
208 | |||
209 | if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) && | ||
210 | trace__read_syscall_info(trace, id)) | ||
211 | continue; | ||
212 | |||
213 | if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL)) | ||
214 | continue; | ||
215 | |||
216 | sc = &trace->syscalls.table[id]; | ||
217 | |||
218 | if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1) | ||
219 | printf("%d ", sample.tid); | ||
220 | |||
221 | if (evsel == evsel_enter) { | ||
222 | void *args = perf_evsel__rawptr(evsel, &sample, "args"); | ||
223 | |||
224 | printf("%s(", sc->name); | ||
225 | syscall__fprintf_args(sc, args, stdout); | ||
226 | } else if (evsel == evsel_exit) { | ||
227 | int ret = perf_evsel__intval(evsel, &sample, "ret"); | ||
228 | |||
229 | if (ret < 0 && sc->fmt && sc->fmt->errmsg) { | ||
230 | char bf[256]; | ||
231 | const char *emsg = strerror_r(-ret, bf, sizeof(bf)), | ||
232 | *e = audit_errno_to_name(-ret); | ||
233 | |||
234 | printf(") = -1 %s %s", e, emsg); | ||
235 | } else if (ret == 0 && sc->fmt && sc->fmt->timeout) | ||
236 | printf(") = 0 Timeout"); | ||
237 | else | ||
238 | printf(") = %d", ret); | ||
239 | |||
240 | putchar('\n'); | ||
241 | } | ||
242 | } | ||
243 | } | ||
244 | |||
245 | if (nr_events == before) | ||
246 | poll(evlist->pollfd, evlist->nr_fds, -1); | ||
247 | |||
248 | goto again; | ||
249 | |||
250 | out_delete_evlist: | ||
251 | perf_evlist__delete(evlist); | ||
252 | out: | ||
253 | return err; | ||
254 | } | ||
255 | |||
256 | int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) | ||
257 | { | ||
258 | const char * const trace_usage[] = { | ||
259 | "perf trace [<options>]", | ||
260 | NULL | ||
261 | }; | ||
262 | struct trace trace = { | ||
263 | .audit_machine = audit_detect_machine(), | ||
264 | .syscalls = { | ||
265 | . max = -1, | ||
266 | }, | ||
267 | .opts = { | ||
268 | .target = { | ||
269 | .uid = UINT_MAX, | ||
270 | .uses_mmap = true, | ||
271 | }, | ||
272 | .user_freq = UINT_MAX, | ||
273 | .user_interval = ULLONG_MAX, | ||
274 | .no_delay = true, | ||
275 | .mmap_pages = 1024, | ||
276 | }, | ||
277 | }; | ||
278 | const struct option trace_options[] = { | ||
279 | OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", | ||
280 | "trace events on existing process id"), | ||
281 | OPT_STRING(0, "tid", &trace.opts.target.tid, "tid", | ||
282 | "trace events on existing thread id"), | ||
283 | OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide, | ||
284 | "system-wide collection from all CPUs"), | ||
285 | OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu", | ||
286 | "list of cpus to monitor"), | ||
287 | OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, | ||
288 | "child tasks do not inherit counters"), | ||
289 | OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages, | ||
290 | "number of mmap data pages"), | ||
291 | OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user", | ||
292 | "user to profile"), | ||
293 | OPT_END() | ||
294 | }; | ||
295 | int err; | ||
296 | |||
297 | argc = parse_options(argc, argv, trace_options, trace_usage, 0); | ||
298 | if (argc) | ||
299 | usage_with_options(trace_usage, trace_options); | ||
300 | |||
301 | err = perf_target__parse_uid(&trace.opts.target); | ||
302 | if (err) { | ||
303 | char bf[BUFSIZ]; | ||
304 | perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf)); | ||
305 | printf("%s", bf); | ||
306 | return err; | ||
307 | } | ||
308 | |||
309 | return trace__run(&trace); | ||
310 | } | ||
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index b382bd551aac..08143bd854c7 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h | |||
@@ -34,6 +34,8 @@ extern int cmd_kmem(int argc, const char **argv, const char *prefix); | |||
34 | extern int cmd_lock(int argc, const char **argv, const char *prefix); | 34 | extern int cmd_lock(int argc, const char **argv, const char *prefix); |
35 | extern int cmd_kvm(int argc, const char **argv, const char *prefix); | 35 | extern int cmd_kvm(int argc, const char **argv, const char *prefix); |
36 | extern int cmd_test(int argc, const char **argv, const char *prefix); | 36 | extern int cmd_test(int argc, const char **argv, const char *prefix); |
37 | extern int cmd_trace(int argc, const char **argv, const char *prefix); | ||
37 | extern int cmd_inject(int argc, const char **argv, const char *prefix); | 38 | extern int cmd_inject(int argc, const char **argv, const char *prefix); |
38 | 39 | ||
40 | extern int find_scripts(char **scripts_array, char **scripts_path_array); | ||
39 | #endif | 41 | #endif |
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index d695fe40fbff..3e86bbd8c2d5 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt | |||
@@ -17,8 +17,9 @@ perf-report mainporcelain common | |||
17 | perf-stat mainporcelain common | 17 | perf-stat mainporcelain common |
18 | perf-timechart mainporcelain common | 18 | perf-timechart mainporcelain common |
19 | perf-top mainporcelain common | 19 | perf-top mainporcelain common |
20 | perf-trace mainporcelain common | ||
20 | perf-script mainporcelain common | 21 | perf-script mainporcelain common |
21 | perf-probe mainporcelain common | 22 | perf-probe mainporcelain full |
22 | perf-kmem mainporcelain common | 23 | perf-kmem mainporcelain common |
23 | perf-lock mainporcelain common | 24 | perf-lock mainporcelain common |
24 | perf-kvm mainporcelain common | 25 | perf-kvm mainporcelain common |
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak index 6c18785a6417..4add41bb0c7e 100644 --- a/tools/perf/config/feature-tests.mak +++ b/tools/perf/config/feature-tests.mak | |||
@@ -154,3 +154,53 @@ int main(void) | |||
154 | return 0; | 154 | return 0; |
155 | } | 155 | } |
156 | endef | 156 | endef |
157 | |||
158 | ifndef NO_LIBUNWIND | ||
159 | define SOURCE_LIBUNWIND | ||
160 | #include <libunwind.h> | ||
161 | #include <stdlib.h> | ||
162 | |||
163 | extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, | ||
164 | unw_word_t ip, | ||
165 | unw_dyn_info_t *di, | ||
166 | unw_proc_info_t *pi, | ||
167 | int need_unwind_info, void *arg); | ||
168 | |||
169 | |||
170 | #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) | ||
171 | |||
172 | int main(void) | ||
173 | { | ||
174 | unw_addr_space_t addr_space; | ||
175 | addr_space = unw_create_addr_space(NULL, 0); | ||
176 | unw_init_remote(NULL, addr_space, NULL); | ||
177 | dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); | ||
178 | return 0; | ||
179 | } | ||
180 | endef | ||
181 | endif | ||
182 | |||
183 | ifndef NO_BACKTRACE | ||
184 | define SOURCE_BACKTRACE | ||
185 | #include <execinfo.h> | ||
186 | #include <stdio.h> | ||
187 | |||
188 | int main(void) | ||
189 | { | ||
190 | backtrace(NULL, 0); | ||
191 | backtrace_symbols(NULL, 0); | ||
192 | return 0; | ||
193 | } | ||
194 | endef | ||
195 | endif | ||
196 | |||
197 | ifndef NO_LIBAUDIT | ||
198 | define SOURCE_LIBAUDIT | ||
199 | #include <libaudit.h> | ||
200 | |||
201 | int main(void) | ||
202 | { | ||
203 | return audit_open(); | ||
204 | } | ||
205 | endef | ||
206 | endif \ No newline at end of file | ||
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh index 95b6f8b6177a..e91930620269 100644 --- a/tools/perf/perf-archive.sh +++ b/tools/perf/perf-archive.sh | |||
@@ -24,7 +24,7 @@ NOBUILDID=0000000000000000000000000000000000000000 | |||
24 | perf buildid-list -i $PERF_DATA --with-hits | grep -v "^$NOBUILDID " > $BUILDIDS | 24 | perf buildid-list -i $PERF_DATA --with-hits | grep -v "^$NOBUILDID " > $BUILDIDS |
25 | if [ ! -s $BUILDIDS ] ; then | 25 | if [ ! -s $BUILDIDS ] ; then |
26 | echo "perf archive: no build-ids found" | 26 | echo "perf archive: no build-ids found" |
27 | rm -f $BUILDIDS | 27 | rm $BUILDIDS || true |
28 | exit 1 | 28 | exit 1 |
29 | fi | 29 | fi |
30 | 30 | ||
@@ -39,8 +39,8 @@ while read build_id ; do | |||
39 | echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST | 39 | echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST |
40 | done | 40 | done |
41 | 41 | ||
42 | tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST | 42 | tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST |
43 | rm -f $MANIFEST $BUILDIDS | 43 | rm $MANIFEST $BUILDIDS || true |
44 | echo -e "Now please run:\n" | 44 | echo -e "Now please run:\n" |
45 | echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n" | 45 | echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n" |
46 | echo "wherever you need to run 'perf report' on." | 46 | echo "wherever you need to run 'perf report' on." |
diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 2b2e225a4d4c..fc2f770e3027 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include "util/run-command.h" | 14 | #include "util/run-command.h" |
15 | #include "util/parse-events.h" | 15 | #include "util/parse-events.h" |
16 | #include "util/debugfs.h" | 16 | #include "util/debugfs.h" |
17 | #include <pthread.h> | ||
17 | 18 | ||
18 | const char perf_usage_string[] = | 19 | const char perf_usage_string[] = |
19 | "perf [--version] [--help] COMMAND [ARGS]"; | 20 | "perf [--version] [--help] COMMAND [ARGS]"; |
@@ -24,6 +25,42 @@ const char perf_more_info_string[] = | |||
24 | int use_browser = -1; | 25 | int use_browser = -1; |
25 | static int use_pager = -1; | 26 | static int use_pager = -1; |
26 | 27 | ||
28 | struct cmd_struct { | ||
29 | const char *cmd; | ||
30 | int (*fn)(int, const char **, const char *); | ||
31 | int option; | ||
32 | }; | ||
33 | |||
34 | static struct cmd_struct commands[] = { | ||
35 | { "buildid-cache", cmd_buildid_cache, 0 }, | ||
36 | { "buildid-list", cmd_buildid_list, 0 }, | ||
37 | { "diff", cmd_diff, 0 }, | ||
38 | { "evlist", cmd_evlist, 0 }, | ||
39 | { "help", cmd_help, 0 }, | ||
40 | { "list", cmd_list, 0 }, | ||
41 | { "record", cmd_record, 0 }, | ||
42 | { "report", cmd_report, 0 }, | ||
43 | { "bench", cmd_bench, 0 }, | ||
44 | { "stat", cmd_stat, 0 }, | ||
45 | { "timechart", cmd_timechart, 0 }, | ||
46 | { "top", cmd_top, 0 }, | ||
47 | { "annotate", cmd_annotate, 0 }, | ||
48 | { "version", cmd_version, 0 }, | ||
49 | { "script", cmd_script, 0 }, | ||
50 | { "sched", cmd_sched, 0 }, | ||
51 | #ifndef NO_LIBELF_SUPPORT | ||
52 | { "probe", cmd_probe, 0 }, | ||
53 | #endif | ||
54 | { "kmem", cmd_kmem, 0 }, | ||
55 | { "lock", cmd_lock, 0 }, | ||
56 | { "kvm", cmd_kvm, 0 }, | ||
57 | { "test", cmd_test, 0 }, | ||
58 | #ifndef NO_LIBAUDIT_SUPPORT | ||
59 | { "trace", cmd_trace, 0 }, | ||
60 | #endif | ||
61 | { "inject", cmd_inject, 0 }, | ||
62 | }; | ||
63 | |||
27 | struct pager_config { | 64 | struct pager_config { |
28 | const char *cmd; | 65 | const char *cmd; |
29 | int val; | 66 | int val; |
@@ -160,6 +197,14 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) | |||
160 | fprintf(stderr, "dir: %s\n", debugfs_mountpoint); | 197 | fprintf(stderr, "dir: %s\n", debugfs_mountpoint); |
161 | if (envchanged) | 198 | if (envchanged) |
162 | *envchanged = 1; | 199 | *envchanged = 1; |
200 | } else if (!strcmp(cmd, "--list-cmds")) { | ||
201 | unsigned int i; | ||
202 | |||
203 | for (i = 0; i < ARRAY_SIZE(commands); i++) { | ||
204 | struct cmd_struct *p = commands+i; | ||
205 | printf("%s ", p->cmd); | ||
206 | } | ||
207 | exit(0); | ||
163 | } else { | 208 | } else { |
164 | fprintf(stderr, "Unknown option: %s\n", cmd); | 209 | fprintf(stderr, "Unknown option: %s\n", cmd); |
165 | usage(perf_usage_string); | 210 | usage(perf_usage_string); |
@@ -245,12 +290,6 @@ const char perf_version_string[] = PERF_VERSION; | |||
245 | */ | 290 | */ |
246 | #define NEED_WORK_TREE (1<<2) | 291 | #define NEED_WORK_TREE (1<<2) |
247 | 292 | ||
248 | struct cmd_struct { | ||
249 | const char *cmd; | ||
250 | int (*fn)(int, const char **, const char *); | ||
251 | int option; | ||
252 | }; | ||
253 | |||
254 | static int run_builtin(struct cmd_struct *p, int argc, const char **argv) | 293 | static int run_builtin(struct cmd_struct *p, int argc, const char **argv) |
255 | { | 294 | { |
256 | int status; | 295 | int status; |
@@ -296,30 +335,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) | |||
296 | static void handle_internal_command(int argc, const char **argv) | 335 | static void handle_internal_command(int argc, const char **argv) |
297 | { | 336 | { |
298 | const char *cmd = argv[0]; | 337 | const char *cmd = argv[0]; |
299 | static struct cmd_struct commands[] = { | ||
300 | { "buildid-cache", cmd_buildid_cache, 0 }, | ||
301 | { "buildid-list", cmd_buildid_list, 0 }, | ||
302 | { "diff", cmd_diff, 0 }, | ||
303 | { "evlist", cmd_evlist, 0 }, | ||
304 | { "help", cmd_help, 0 }, | ||
305 | { "list", cmd_list, 0 }, | ||
306 | { "record", cmd_record, 0 }, | ||
307 | { "report", cmd_report, 0 }, | ||
308 | { "bench", cmd_bench, 0 }, | ||
309 | { "stat", cmd_stat, 0 }, | ||
310 | { "timechart", cmd_timechart, 0 }, | ||
311 | { "top", cmd_top, 0 }, | ||
312 | { "annotate", cmd_annotate, 0 }, | ||
313 | { "version", cmd_version, 0 }, | ||
314 | { "script", cmd_script, 0 }, | ||
315 | { "sched", cmd_sched, 0 }, | ||
316 | { "probe", cmd_probe, 0 }, | ||
317 | { "kmem", cmd_kmem, 0 }, | ||
318 | { "lock", cmd_lock, 0 }, | ||
319 | { "kvm", cmd_kvm, 0 }, | ||
320 | { "test", cmd_test, 0 }, | ||
321 | { "inject", cmd_inject, 0 }, | ||
322 | }; | ||
323 | unsigned int i; | 338 | unsigned int i; |
324 | static const char ext[] = STRIP_EXTENSION; | 339 | static const char ext[] = STRIP_EXTENSION; |
325 | 340 | ||
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index f960ccb2edc6..a89cbbb61801 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
@@ -88,6 +88,12 @@ void get_term_dimensions(struct winsize *ws); | |||
88 | #define CPUINFO_PROC "Processor" | 88 | #define CPUINFO_PROC "Processor" |
89 | #endif | 89 | #endif |
90 | 90 | ||
91 | #ifdef __aarch64__ | ||
92 | #include "../../arch/arm64/include/asm/unistd.h" | ||
93 | #define rmb() asm volatile("dmb ld" ::: "memory") | ||
94 | #define cpu_relax() asm volatile("yield" ::: "memory") | ||
95 | #endif | ||
96 | |||
91 | #ifdef __mips__ | 97 | #ifdef __mips__ |
92 | #include "../../arch/mips/include/asm/unistd.h" | 98 | #include "../../arch/mips/include/asm/unistd.h" |
93 | #define rmb() asm volatile( \ | 99 | #define rmb() asm volatile( \ |
@@ -209,9 +215,15 @@ void pthread__unblock_sigwinch(void); | |||
209 | 215 | ||
210 | #include "util/target.h" | 216 | #include "util/target.h" |
211 | 217 | ||
218 | enum perf_call_graph_mode { | ||
219 | CALLCHAIN_NONE, | ||
220 | CALLCHAIN_FP, | ||
221 | CALLCHAIN_DWARF | ||
222 | }; | ||
223 | |||
212 | struct perf_record_opts { | 224 | struct perf_record_opts { |
213 | struct perf_target target; | 225 | struct perf_target target; |
214 | bool call_graph; | 226 | int call_graph; |
215 | bool group; | 227 | bool group; |
216 | bool inherit_stat; | 228 | bool inherit_stat; |
217 | bool no_delay; | 229 | bool no_delay; |
@@ -230,6 +242,7 @@ struct perf_record_opts { | |||
230 | u64 branch_stack; | 242 | u64 branch_stack; |
231 | u64 default_interval; | 243 | u64 default_interval; |
232 | u64 user_interval; | 244 | u64 user_interval; |
245 | u16 stack_dump_size; | ||
233 | }; | 246 | }; |
234 | 247 | ||
235 | #endif | 248 | #endif |
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py new file mode 100755 index 000000000000..9e0985794e20 --- /dev/null +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | |||
@@ -0,0 +1,94 @@ | |||
1 | # EventClass.py | ||
2 | # | ||
3 | # This is a library defining some events types classes, which could | ||
4 | # be used by other scripts to analyzing the perf samples. | ||
5 | # | ||
6 | # Currently there are just a few classes defined for examples, | ||
7 | # PerfEvent is the base class for all perf event sample, PebsEvent | ||
8 | # is a HW base Intel x86 PEBS event, and user could add more SW/HW | ||
9 | # event classes based on requirements. | ||
10 | |||
11 | import struct | ||
12 | |||
13 | # Event types, user could add more here | ||
14 | EVTYPE_GENERIC = 0 | ||
15 | EVTYPE_PEBS = 1 # Basic PEBS event | ||
16 | EVTYPE_PEBS_LL = 2 # PEBS event with load latency info | ||
17 | EVTYPE_IBS = 3 | ||
18 | |||
19 | # | ||
20 | # Currently we don't have good way to tell the event type, but by | ||
21 | # the size of raw buffer, raw PEBS event with load latency data's | ||
22 | # size is 176 bytes, while the pure PEBS event's size is 144 bytes. | ||
23 | # | ||
24 | def create_event(name, comm, dso, symbol, raw_buf): | ||
25 | if (len(raw_buf) == 144): | ||
26 | event = PebsEvent(name, comm, dso, symbol, raw_buf) | ||
27 | elif (len(raw_buf) == 176): | ||
28 | event = PebsNHM(name, comm, dso, symbol, raw_buf) | ||
29 | else: | ||
30 | event = PerfEvent(name, comm, dso, symbol, raw_buf) | ||
31 | |||
32 | return event | ||
33 | |||
34 | class PerfEvent(object): | ||
35 | event_num = 0 | ||
36 | def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC): | ||
37 | self.name = name | ||
38 | self.comm = comm | ||
39 | self.dso = dso | ||
40 | self.symbol = symbol | ||
41 | self.raw_buf = raw_buf | ||
42 | self.ev_type = ev_type | ||
43 | PerfEvent.event_num += 1 | ||
44 | |||
45 | def show(self): | ||
46 | print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) | ||
47 | |||
48 | # | ||
49 | # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer | ||
50 | # contains the context info when that event happened: the EFLAGS and | ||
51 | # linear IP info, as well as all the registers. | ||
52 | # | ||
53 | class PebsEvent(PerfEvent): | ||
54 | pebs_num = 0 | ||
55 | def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS): | ||
56 | tmp_buf=raw_buf[0:80] | ||
57 | flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) | ||
58 | self.flags = flags | ||
59 | self.ip = ip | ||
60 | self.ax = ax | ||
61 | self.bx = bx | ||
62 | self.cx = cx | ||
63 | self.dx = dx | ||
64 | self.si = si | ||
65 | self.di = di | ||
66 | self.bp = bp | ||
67 | self.sp = sp | ||
68 | |||
69 | PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) | ||
70 | PebsEvent.pebs_num += 1 | ||
71 | del tmp_buf | ||
72 | |||
73 | # | ||
74 | # Intel Nehalem and Westmere support PEBS plus Load Latency info which lie | ||
75 | # in the four 64 bit words write after the PEBS data: | ||
76 | # Status: records the IA32_PERF_GLOBAL_STATUS register value | ||
77 | # DLA: Data Linear Address (EIP) | ||
78 | # DSE: Data Source Encoding, where the latency happens, hit or miss | ||
79 | # in L1/L2/L3 or IO operations | ||
80 | # LAT: the actual latency in cycles | ||
81 | # | ||
82 | class PebsNHM(PebsEvent): | ||
83 | pebs_nhm_num = 0 | ||
84 | def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL): | ||
85 | tmp_buf=raw_buf[144:176] | ||
86 | status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf) | ||
87 | self.status = status | ||
88 | self.dla = dla | ||
89 | self.dse = dse | ||
90 | self.lat = lat | ||
91 | |||
92 | PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) | ||
93 | PebsNHM.pebs_nhm_num += 1 | ||
94 | del tmp_buf | ||
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-record b/tools/perf/scripts/python/bin/event_analyzing_sample-record new file mode 100644 index 000000000000..5ce652dabd02 --- /dev/null +++ b/tools/perf/scripts/python/bin/event_analyzing_sample-record | |||
@@ -0,0 +1,8 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | # | ||
4 | # event_analyzing_sample.py can cover all type of perf samples including | ||
5 | # the tracepoints, so no special record requirements, just record what | ||
6 | # you want to analyze. | ||
7 | # | ||
8 | perf record $@ | ||
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-report b/tools/perf/scripts/python/bin/event_analyzing_sample-report new file mode 100644 index 000000000000..0941fc94e158 --- /dev/null +++ b/tools/perf/scripts/python/bin/event_analyzing_sample-report | |||
@@ -0,0 +1,3 @@ | |||
1 | #!/bin/bash | ||
2 | # description: analyze all perf samples | ||
3 | perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py | ||
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py new file mode 100644 index 000000000000..163c39fa12d9 --- /dev/null +++ b/tools/perf/scripts/python/event_analyzing_sample.py | |||
@@ -0,0 +1,189 @@ | |||
1 | # event_analyzing_sample.py: general event handler in python | ||
2 | # | ||
3 | # Current perf report is already very powerful with the annotation integrated, | ||
4 | # and this script is not trying to be as powerful as perf report, but | ||
5 | # providing end user/developer a flexible way to analyze the events other | ||
6 | # than trace points. | ||
7 | # | ||
8 | # The 2 database related functions in this script just show how to gather | ||
9 | # the basic information, and users can modify and write their own functions | ||
10 | # according to their specific requirement. | ||
11 | # | ||
12 | # The first function "show_general_events" just does a basic grouping for all | ||
13 | # generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is | ||
14 | # for a x86 HW PMU event: PEBS with load latency data. | ||
15 | # | ||
16 | |||
17 | import os | ||
18 | import sys | ||
19 | import math | ||
20 | import struct | ||
21 | import sqlite3 | ||
22 | |||
23 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | ||
24 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | ||
25 | |||
26 | from perf_trace_context import * | ||
27 | from EventClass import * | ||
28 | |||
29 | # | ||
30 | # If the perf.data has a big number of samples, then the insert operation | ||
31 | # will be very time consuming (about 10+ minutes for 10000 samples) if the | ||
32 | # .db database is on disk. Move the .db file to RAM based FS to speedup | ||
33 | # the handling, which will cut the time down to several seconds. | ||
34 | # | ||
35 | con = sqlite3.connect("/dev/shm/perf.db") | ||
36 | con.isolation_level = None | ||
37 | |||
38 | def trace_begin(): | ||
39 | print "In trace_begin:\n" | ||
40 | |||
41 | # | ||
42 | # Will create several tables at the start, pebs_ll is for PEBS data with | ||
43 | # load latency info, while gen_events is for general event. | ||
44 | # | ||
45 | con.execute(""" | ||
46 | create table if not exists gen_events ( | ||
47 | name text, | ||
48 | symbol text, | ||
49 | comm text, | ||
50 | dso text | ||
51 | );""") | ||
52 | con.execute(""" | ||
53 | create table if not exists pebs_ll ( | ||
54 | name text, | ||
55 | symbol text, | ||
56 | comm text, | ||
57 | dso text, | ||
58 | flags integer, | ||
59 | ip integer, | ||
60 | status integer, | ||
61 | dse integer, | ||
62 | dla integer, | ||
63 | lat integer | ||
64 | );""") | ||
65 | |||
66 | # | ||
67 | # Create and insert event object to a database so that user could | ||
68 | # do more analysis with simple database commands. | ||
69 | # | ||
70 | def process_event(param_dict): | ||
71 | event_attr = param_dict["attr"] | ||
72 | sample = param_dict["sample"] | ||
73 | raw_buf = param_dict["raw_buf"] | ||
74 | comm = param_dict["comm"] | ||
75 | name = param_dict["ev_name"] | ||
76 | |||
77 | # Symbol and dso info are not always resolved | ||
78 | if (param_dict.has_key("dso")): | ||
79 | dso = param_dict["dso"] | ||
80 | else: | ||
81 | dso = "Unknown_dso" | ||
82 | |||
83 | if (param_dict.has_key("symbol")): | ||
84 | symbol = param_dict["symbol"] | ||
85 | else: | ||
86 | symbol = "Unknown_symbol" | ||
87 | |||
88 | # Create the event object and insert it to the right table in database | ||
89 | event = create_event(name, comm, dso, symbol, raw_buf) | ||
90 | insert_db(event) | ||
91 | |||
92 | def insert_db(event): | ||
93 | if event.ev_type == EVTYPE_GENERIC: | ||
94 | con.execute("insert into gen_events values(?, ?, ?, ?)", | ||
95 | (event.name, event.symbol, event.comm, event.dso)) | ||
96 | elif event.ev_type == EVTYPE_PEBS_LL: | ||
97 | event.ip &= 0x7fffffffffffffff | ||
98 | event.dla &= 0x7fffffffffffffff | ||
99 | con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", | ||
100 | (event.name, event.symbol, event.comm, event.dso, event.flags, | ||
101 | event.ip, event.status, event.dse, event.dla, event.lat)) | ||
102 | |||
103 | def trace_end(): | ||
104 | print "In trace_end:\n" | ||
105 | # We show the basic info for the 2 type of event classes | ||
106 | show_general_events() | ||
107 | show_pebs_ll() | ||
108 | con.close() | ||
109 | |||
110 | # | ||
111 | # As the event number may be very big, so we can't use linear way | ||
112 | # to show the histogram in real number, but use a log2 algorithm. | ||
113 | # | ||
114 | |||
115 | def num2sym(num): | ||
116 | # Each number will have at least one '#' | ||
117 | snum = '#' * (int)(math.log(num, 2) + 1) | ||
118 | return snum | ||
119 | |||
120 | def show_general_events(): | ||
121 | |||
122 | # Check the total record number in the table | ||
123 | count = con.execute("select count(*) from gen_events") | ||
124 | for t in count: | ||
125 | print "There is %d records in gen_events table" % t[0] | ||
126 | if t[0] == 0: | ||
127 | return | ||
128 | |||
129 | print "Statistics about the general events grouped by thread/symbol/dso: \n" | ||
130 | |||
131 | # Group by thread | ||
132 | commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") | ||
133 | print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) | ||
134 | for row in commq: | ||
135 | print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
136 | |||
137 | # Group by symbol | ||
138 | print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) | ||
139 | symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") | ||
140 | for row in symbolq: | ||
141 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
142 | |||
143 | # Group by dso | ||
144 | print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74) | ||
145 | dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") | ||
146 | for row in dsoq: | ||
147 | print "%40s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
148 | |||
149 | # | ||
150 | # This function just shows the basic info, and we could do more with the | ||
151 | # data in the tables, like checking the function parameters when some | ||
152 | # big latency events happen. | ||
153 | # | ||
154 | def show_pebs_ll(): | ||
155 | |||
156 | count = con.execute("select count(*) from pebs_ll") | ||
157 | for t in count: | ||
158 | print "There is %d records in pebs_ll table" % t[0] | ||
159 | if t[0] == 0: | ||
160 | return | ||
161 | |||
162 | print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n" | ||
163 | |||
164 | # Group by thread | ||
165 | commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") | ||
166 | print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) | ||
167 | for row in commq: | ||
168 | print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
169 | |||
170 | # Group by symbol | ||
171 | print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) | ||
172 | symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") | ||
173 | for row in symbolq: | ||
174 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
175 | |||
176 | # Group by dse | ||
177 | dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") | ||
178 | print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58) | ||
179 | for row in dseq: | ||
180 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
181 | |||
182 | # Group by latency | ||
183 | latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat") | ||
184 | print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58) | ||
185 | for row in latq: | ||
186 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
187 | |||
188 | def trace_unhandled(event_name, context, event_fields_dict): | ||
189 | print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) | ||
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index 1818a531f1d3..4aeb7d5df939 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c | |||
@@ -269,7 +269,7 @@ int ui_browser__show(struct ui_browser *browser, const char *title, | |||
269 | return err ? 0 : -1; | 269 | return err ? 0 : -1; |
270 | } | 270 | } |
271 | 271 | ||
272 | void ui_browser__hide(struct ui_browser *browser __used) | 272 | void ui_browser__hide(struct ui_browser *browser __maybe_unused) |
273 | { | 273 | { |
274 | pthread_mutex_lock(&ui__lock); | 274 | pthread_mutex_lock(&ui__lock); |
275 | ui_helpline__pop(); | 275 | ui_helpline__pop(); |
@@ -518,7 +518,7 @@ static struct ui_browser__colorset { | |||
518 | 518 | ||
519 | 519 | ||
520 | static int ui_browser__color_config(const char *var, const char *value, | 520 | static int ui_browser__color_config(const char *var, const char *value, |
521 | void *data __used) | 521 | void *data __maybe_unused) |
522 | { | 522 | { |
523 | char *fg = NULL, *bg; | 523 | char *fg = NULL, *bg; |
524 | int i; | 524 | int i; |
@@ -602,7 +602,8 @@ void __ui_browser__vline(struct ui_browser *browser, unsigned int column, | |||
602 | SLsmg_set_char_set(0); | 602 | SLsmg_set_char_set(0); |
603 | } | 603 | } |
604 | 604 | ||
605 | void ui_browser__write_graph(struct ui_browser *browser __used, int graph) | 605 | void ui_browser__write_graph(struct ui_browser *browser __maybe_unused, |
606 | int graph) | ||
606 | { | 607 | { |
607 | SLsmg_set_char_set(1); | 608 | SLsmg_set_char_set(1); |
608 | SLsmg_write_char(graph); | 609 | SLsmg_write_char(graph); |
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 67a2703e666a..8f8cd2d73b3b 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c | |||
@@ -54,7 +54,8 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin | |||
54 | return (struct browser_disasm_line *)(dl + 1); | 54 | return (struct browser_disasm_line *)(dl + 1); |
55 | } | 55 | } |
56 | 56 | ||
57 | static bool disasm_line__filter(struct ui_browser *browser __used, void *entry) | 57 | static bool disasm_line__filter(struct ui_browser *browser __maybe_unused, |
58 | void *entry) | ||
58 | { | 59 | { |
59 | if (annotate_browser__opts.hide_src_code) { | 60 | if (annotate_browser__opts.hide_src_code) { |
60 | struct disasm_line *dl = list_entry(entry, struct disasm_line, node); | 61 | struct disasm_line *dl = list_entry(entry, struct disasm_line, node); |
@@ -928,7 +929,8 @@ static int annotate_config__cmp(const void *name, const void *cfgp) | |||
928 | return strcmp(name, cfg->name); | 929 | return strcmp(name, cfg->name); |
929 | } | 930 | } |
930 | 931 | ||
931 | static int annotate__config(const char *var, const char *value, void *data __used) | 932 | static int annotate__config(const char *var, const char *value, |
933 | void *data __maybe_unused) | ||
932 | { | 934 | { |
933 | struct annotate__config *cfg; | 935 | struct annotate__config *cfg; |
934 | const char *name; | 936 | const char *name; |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 413bd62eedb1..a21f40bebbac 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
@@ -24,9 +24,12 @@ struct hist_browser { | |||
24 | struct hist_entry *he_selection; | 24 | struct hist_entry *he_selection; |
25 | struct map_symbol *selection; | 25 | struct map_symbol *selection; |
26 | int print_seq; | 26 | int print_seq; |
27 | bool show_dso; | ||
27 | bool has_symbols; | 28 | bool has_symbols; |
28 | }; | 29 | }; |
29 | 30 | ||
31 | extern void hist_browser__init_hpp(void); | ||
32 | |||
30 | static int hists__browser_title(struct hists *hists, char *bf, size_t size, | 33 | static int hists__browser_title(struct hists *hists, char *bf, size_t size, |
31 | const char *ev_name); | 34 | const char *ev_name); |
32 | 35 | ||
@@ -376,12 +379,19 @@ out: | |||
376 | } | 379 | } |
377 | 380 | ||
378 | static char *callchain_list__sym_name(struct callchain_list *cl, | 381 | static char *callchain_list__sym_name(struct callchain_list *cl, |
379 | char *bf, size_t bfsize) | 382 | char *bf, size_t bfsize, bool show_dso) |
380 | { | 383 | { |
384 | int printed; | ||
385 | |||
381 | if (cl->ms.sym) | 386 | if (cl->ms.sym) |
382 | return cl->ms.sym->name; | 387 | printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name); |
388 | else | ||
389 | printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip); | ||
390 | |||
391 | if (show_dso) | ||
392 | scnprintf(bf + printed, bfsize - printed, " %s", | ||
393 | cl->ms.map ? cl->ms.map->dso->short_name : "unknown"); | ||
383 | 394 | ||
384 | snprintf(bf, bfsize, "%#" PRIx64, cl->ip); | ||
385 | return bf; | 395 | return bf; |
386 | } | 396 | } |
387 | 397 | ||
@@ -417,7 +427,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse | |||
417 | remaining -= cumul; | 427 | remaining -= cumul; |
418 | 428 | ||
419 | list_for_each_entry(chain, &child->val, list) { | 429 | list_for_each_entry(chain, &child->val, list) { |
420 | char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str; | 430 | char bf[1024], *alloc_str; |
421 | const char *str; | 431 | const char *str; |
422 | int color; | 432 | int color; |
423 | bool was_first = first; | 433 | bool was_first = first; |
@@ -434,7 +444,8 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse | |||
434 | } | 444 | } |
435 | 445 | ||
436 | alloc_str = NULL; | 446 | alloc_str = NULL; |
437 | str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); | 447 | str = callchain_list__sym_name(chain, bf, sizeof(bf), |
448 | browser->show_dso); | ||
438 | if (was_first) { | 449 | if (was_first) { |
439 | double percent = cumul * 100.0 / new_total; | 450 | double percent = cumul * 100.0 / new_total; |
440 | 451 | ||
@@ -493,7 +504,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser, | |||
493 | char folded_sign = ' '; | 504 | char folded_sign = ' '; |
494 | 505 | ||
495 | list_for_each_entry(chain, &node->val, list) { | 506 | list_for_each_entry(chain, &node->val, list) { |
496 | char ipstr[BITS_PER_LONG / 4 + 1], *s; | 507 | char bf[1024], *s; |
497 | int color; | 508 | int color; |
498 | 509 | ||
499 | folded_sign = callchain_list__folded(chain); | 510 | folded_sign = callchain_list__folded(chain); |
@@ -510,7 +521,8 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser, | |||
510 | *is_current_entry = true; | 521 | *is_current_entry = true; |
511 | } | 522 | } |
512 | 523 | ||
513 | s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); | 524 | s = callchain_list__sym_name(chain, bf, sizeof(bf), |
525 | browser->show_dso); | ||
514 | ui_browser__gotorc(&browser->b, row, 0); | 526 | ui_browser__gotorc(&browser->b, row, 0); |
515 | ui_browser__set_color(&browser->b, color); | 527 | ui_browser__set_color(&browser->b, color); |
516 | slsmg_write_nstring(" ", offset); | 528 | slsmg_write_nstring(" ", offset); |
@@ -553,14 +565,47 @@ static int hist_browser__show_callchain(struct hist_browser *browser, | |||
553 | return row - first_row; | 565 | return row - first_row; |
554 | } | 566 | } |
555 | 567 | ||
568 | #define HPP__COLOR_FN(_name, _field) \ | ||
569 | static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp, \ | ||
570 | struct hist_entry *he) \ | ||
571 | { \ | ||
572 | double percent = 100.0 * he->_field / hpp->total_period; \ | ||
573 | *(double *)hpp->ptr = percent; \ | ||
574 | return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); \ | ||
575 | } | ||
576 | |||
577 | HPP__COLOR_FN(overhead, period) | ||
578 | HPP__COLOR_FN(overhead_sys, period_sys) | ||
579 | HPP__COLOR_FN(overhead_us, period_us) | ||
580 | HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) | ||
581 | HPP__COLOR_FN(overhead_guest_us, period_guest_us) | ||
582 | |||
583 | #undef HPP__COLOR_FN | ||
584 | |||
585 | void hist_browser__init_hpp(void) | ||
586 | { | ||
587 | perf_hpp__init(false, false); | ||
588 | |||
589 | perf_hpp__format[PERF_HPP__OVERHEAD].color = | ||
590 | hist_browser__hpp_color_overhead; | ||
591 | perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = | ||
592 | hist_browser__hpp_color_overhead_sys; | ||
593 | perf_hpp__format[PERF_HPP__OVERHEAD_US].color = | ||
594 | hist_browser__hpp_color_overhead_us; | ||
595 | perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = | ||
596 | hist_browser__hpp_color_overhead_guest_sys; | ||
597 | perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = | ||
598 | hist_browser__hpp_color_overhead_guest_us; | ||
599 | } | ||
600 | |||
556 | static int hist_browser__show_entry(struct hist_browser *browser, | 601 | static int hist_browser__show_entry(struct hist_browser *browser, |
557 | struct hist_entry *entry, | 602 | struct hist_entry *entry, |
558 | unsigned short row) | 603 | unsigned short row) |
559 | { | 604 | { |
560 | char s[256]; | 605 | char s[256]; |
561 | double percent; | 606 | double percent; |
562 | int printed = 0; | 607 | int i, printed = 0; |
563 | int width = browser->b.width - 6; /* The percentage */ | 608 | int width = browser->b.width; |
564 | char folded_sign = ' '; | 609 | char folded_sign = ' '; |
565 | bool current_entry = ui_browser__is_current_entry(&browser->b, row); | 610 | bool current_entry = ui_browser__is_current_entry(&browser->b, row); |
566 | off_t row_offset = entry->row_offset; | 611 | off_t row_offset = entry->row_offset; |
@@ -576,35 +621,50 @@ static int hist_browser__show_entry(struct hist_browser *browser, | |||
576 | } | 621 | } |
577 | 622 | ||
578 | if (row_offset == 0) { | 623 | if (row_offset == 0) { |
579 | hist_entry__snprintf(entry, s, sizeof(s), browser->hists); | 624 | struct perf_hpp hpp = { |
580 | percent = (entry->period * 100.0) / browser->hists->stats.total_period; | 625 | .buf = s, |
626 | .size = sizeof(s), | ||
627 | .total_period = browser->hists->stats.total_period, | ||
628 | }; | ||
581 | 629 | ||
582 | ui_browser__set_percent_color(&browser->b, percent, current_entry); | ||
583 | ui_browser__gotorc(&browser->b, row, 0); | 630 | ui_browser__gotorc(&browser->b, row, 0); |
584 | if (symbol_conf.use_callchain) { | ||
585 | slsmg_printf("%c ", folded_sign); | ||
586 | width -= 2; | ||
587 | } | ||
588 | 631 | ||
589 | slsmg_printf(" %5.2f%%", percent); | 632 | for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { |
633 | if (!perf_hpp__format[i].cond) | ||
634 | continue; | ||
590 | 635 | ||
591 | /* The scroll bar isn't being used */ | 636 | if (i) { |
592 | if (!browser->b.navkeypressed) | 637 | slsmg_printf(" "); |
593 | width += 1; | 638 | width -= 2; |
639 | } | ||
594 | 640 | ||
595 | if (!current_entry || !browser->b.navkeypressed) | 641 | if (perf_hpp__format[i].color) { |
596 | ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL); | 642 | hpp.ptr = &percent; |
643 | /* It will set percent for us. See HPP__COLOR_FN above. */ | ||
644 | width -= perf_hpp__format[i].color(&hpp, entry); | ||
597 | 645 | ||
598 | if (symbol_conf.show_nr_samples) { | 646 | ui_browser__set_percent_color(&browser->b, percent, current_entry); |
599 | slsmg_printf(" %11u", entry->nr_events); | 647 | |
600 | width -= 12; | 648 | if (i == 0 && symbol_conf.use_callchain) { |
601 | } | 649 | slsmg_printf("%c ", folded_sign); |
650 | width -= 2; | ||
651 | } | ||
652 | |||
653 | slsmg_printf("%s", s); | ||
602 | 654 | ||
603 | if (symbol_conf.show_total_period) { | 655 | if (!current_entry || !browser->b.navkeypressed) |
604 | slsmg_printf(" %12" PRIu64, entry->period); | 656 | ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL); |
605 | width -= 13; | 657 | } else { |
658 | width -= perf_hpp__format[i].entry(&hpp, entry); | ||
659 | slsmg_printf("%s", s); | ||
660 | } | ||
606 | } | 661 | } |
607 | 662 | ||
663 | /* The scroll bar isn't being used */ | ||
664 | if (!browser->b.navkeypressed) | ||
665 | width += 1; | ||
666 | |||
667 | hist_entry__sort_snprintf(entry, s, sizeof(s), browser->hists); | ||
608 | slsmg_write_nstring(s, width); | 668 | slsmg_write_nstring(s, width); |
609 | ++row; | 669 | ++row; |
610 | ++printed; | 670 | ++printed; |
@@ -830,7 +890,7 @@ static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *bro | |||
830 | remaining -= cumul; | 890 | remaining -= cumul; |
831 | 891 | ||
832 | list_for_each_entry(chain, &child->val, list) { | 892 | list_for_each_entry(chain, &child->val, list) { |
833 | char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str; | 893 | char bf[1024], *alloc_str; |
834 | const char *str; | 894 | const char *str; |
835 | bool was_first = first; | 895 | bool was_first = first; |
836 | 896 | ||
@@ -842,7 +902,8 @@ static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *bro | |||
842 | folded_sign = callchain_list__folded(chain); | 902 | folded_sign = callchain_list__folded(chain); |
843 | 903 | ||
844 | alloc_str = NULL; | 904 | alloc_str = NULL; |
845 | str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); | 905 | str = callchain_list__sym_name(chain, bf, sizeof(bf), |
906 | browser->show_dso); | ||
846 | if (was_first) { | 907 | if (was_first) { |
847 | double percent = cumul * 100.0 / new_total; | 908 | double percent = cumul * 100.0 / new_total; |
848 | 909 | ||
@@ -880,10 +941,10 @@ static int hist_browser__fprintf_callchain_node(struct hist_browser *browser, | |||
880 | int printed = 0; | 941 | int printed = 0; |
881 | 942 | ||
882 | list_for_each_entry(chain, &node->val, list) { | 943 | list_for_each_entry(chain, &node->val, list) { |
883 | char ipstr[BITS_PER_LONG / 4 + 1], *s; | 944 | char bf[1024], *s; |
884 | 945 | ||
885 | folded_sign = callchain_list__folded(chain); | 946 | folded_sign = callchain_list__folded(chain); |
886 | s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); | 947 | s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso); |
887 | printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s); | 948 | printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s); |
888 | } | 949 | } |
889 | 950 | ||
@@ -920,7 +981,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser, | |||
920 | if (symbol_conf.use_callchain) | 981 | if (symbol_conf.use_callchain) |
921 | folded_sign = hist_entry__folded(he); | 982 | folded_sign = hist_entry__folded(he); |
922 | 983 | ||
923 | hist_entry__snprintf(he, s, sizeof(s), browser->hists); | 984 | hist_entry__sort_snprintf(he, s, sizeof(s), browser->hists); |
924 | percent = (he->period * 100.0) / browser->hists->stats.total_period; | 985 | percent = (he->period * 100.0) / browser->hists->stats.total_period; |
925 | 986 | ||
926 | if (symbol_conf.use_callchain) | 987 | if (symbol_conf.use_callchain) |
@@ -1133,6 +1194,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
1133 | continue; | 1194 | continue; |
1134 | case 'd': | 1195 | case 'd': |
1135 | goto zoom_dso; | 1196 | goto zoom_dso; |
1197 | case 'V': | ||
1198 | browser->show_dso = !browser->show_dso; | ||
1199 | continue; | ||
1136 | case 't': | 1200 | case 't': |
1137 | goto zoom_thread; | 1201 | goto zoom_thread; |
1138 | case '/': | 1202 | case '/': |
@@ -1164,6 +1228,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
1164 | "d Zoom into current DSO\n" | 1228 | "d Zoom into current DSO\n" |
1165 | "t Zoom into current Thread\n" | 1229 | "t Zoom into current Thread\n" |
1166 | "P Print histograms to perf.hist.N\n" | 1230 | "P Print histograms to perf.hist.N\n" |
1231 | "V Verbose (DSO names in callchains, etc)\n" | ||
1167 | "/ Filter symbol by name"); | 1232 | "/ Filter symbol by name"); |
1168 | continue; | 1233 | continue; |
1169 | case K_ENTER: | 1234 | case K_ENTER: |
diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c index ec12e0b4ded6..7ff99ec1d95e 100644 --- a/tools/perf/ui/gtk/browser.c +++ b/tools/perf/ui/gtk/browser.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include "../evsel.h" | 3 | #include "../evsel.h" |
4 | #include "../sort.h" | 4 | #include "../sort.h" |
5 | #include "../hist.h" | 5 | #include "../hist.h" |
6 | #include "../helpline.h" | ||
6 | #include "gtk.h" | 7 | #include "gtk.h" |
7 | 8 | ||
8 | #include <signal.h> | 9 | #include <signal.h> |
@@ -35,6 +36,57 @@ static void perf_gtk__resize_window(GtkWidget *window) | |||
35 | gtk_window_resize(GTK_WINDOW(window), width, height); | 36 | gtk_window_resize(GTK_WINDOW(window), width, height); |
36 | } | 37 | } |
37 | 38 | ||
39 | static const char *perf_gtk__get_percent_color(double percent) | ||
40 | { | ||
41 | if (percent >= MIN_RED) | ||
42 | return "<span fgcolor='red'>"; | ||
43 | if (percent >= MIN_GREEN) | ||
44 | return "<span fgcolor='dark green'>"; | ||
45 | return NULL; | ||
46 | } | ||
47 | |||
48 | #define HPP__COLOR_FN(_name, _field) \ | ||
49 | static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \ | ||
50 | struct hist_entry *he) \ | ||
51 | { \ | ||
52 | double percent = 100.0 * he->_field / hpp->total_period; \ | ||
53 | const char *markup; \ | ||
54 | int ret = 0; \ | ||
55 | \ | ||
56 | markup = perf_gtk__get_percent_color(percent); \ | ||
57 | if (markup) \ | ||
58 | ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \ | ||
59 | ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \ | ||
60 | if (markup) \ | ||
61 | ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>"); \ | ||
62 | \ | ||
63 | return ret; \ | ||
64 | } | ||
65 | |||
66 | HPP__COLOR_FN(overhead, period) | ||
67 | HPP__COLOR_FN(overhead_sys, period_sys) | ||
68 | HPP__COLOR_FN(overhead_us, period_us) | ||
69 | HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) | ||
70 | HPP__COLOR_FN(overhead_guest_us, period_guest_us) | ||
71 | |||
72 | #undef HPP__COLOR_FN | ||
73 | |||
74 | void perf_gtk__init_hpp(void) | ||
75 | { | ||
76 | perf_hpp__init(false, false); | ||
77 | |||
78 | perf_hpp__format[PERF_HPP__OVERHEAD].color = | ||
79 | perf_gtk__hpp_color_overhead; | ||
80 | perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = | ||
81 | perf_gtk__hpp_color_overhead_sys; | ||
82 | perf_hpp__format[PERF_HPP__OVERHEAD_US].color = | ||
83 | perf_gtk__hpp_color_overhead_us; | ||
84 | perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = | ||
85 | perf_gtk__hpp_color_overhead_guest_sys; | ||
86 | perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = | ||
87 | perf_gtk__hpp_color_overhead_guest_us; | ||
88 | } | ||
89 | |||
38 | static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) | 90 | static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) |
39 | { | 91 | { |
40 | GType col_types[MAX_COLUMNS]; | 92 | GType col_types[MAX_COLUMNS]; |
@@ -42,15 +94,25 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) | |||
42 | struct sort_entry *se; | 94 | struct sort_entry *se; |
43 | GtkListStore *store; | 95 | GtkListStore *store; |
44 | struct rb_node *nd; | 96 | struct rb_node *nd; |
45 | u64 total_period; | ||
46 | GtkWidget *view; | 97 | GtkWidget *view; |
47 | int col_idx; | 98 | int i, col_idx; |
48 | int nr_cols; | 99 | int nr_cols; |
100 | char s[512]; | ||
101 | |||
102 | struct perf_hpp hpp = { | ||
103 | .buf = s, | ||
104 | .size = sizeof(s), | ||
105 | .total_period = hists->stats.total_period, | ||
106 | }; | ||
49 | 107 | ||
50 | nr_cols = 0; | 108 | nr_cols = 0; |
51 | 109 | ||
52 | /* The percentage column */ | 110 | for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { |
53 | col_types[nr_cols++] = G_TYPE_STRING; | 111 | if (!perf_hpp__format[i].cond) |
112 | continue; | ||
113 | |||
114 | col_types[nr_cols++] = G_TYPE_STRING; | ||
115 | } | ||
54 | 116 | ||
55 | list_for_each_entry(se, &hist_entry__sort_list, list) { | 117 | list_for_each_entry(se, &hist_entry__sort_list, list) { |
56 | if (se->elide) | 118 | if (se->elide) |
@@ -67,11 +129,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) | |||
67 | 129 | ||
68 | col_idx = 0; | 130 | col_idx = 0; |
69 | 131 | ||
70 | /* The percentage column */ | 132 | for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { |
71 | gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), | 133 | if (!perf_hpp__format[i].cond) |
72 | -1, "Overhead (%)", | 134 | continue; |
73 | renderer, "text", | 135 | |
74 | col_idx++, NULL); | 136 | perf_hpp__format[i].header(&hpp); |
137 | |||
138 | gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), | ||
139 | -1, s, | ||
140 | renderer, "markup", | ||
141 | col_idx++, NULL); | ||
142 | } | ||
75 | 143 | ||
76 | list_for_each_entry(se, &hist_entry__sort_list, list) { | 144 | list_for_each_entry(se, &hist_entry__sort_list, list) { |
77 | if (se->elide) | 145 | if (se->elide) |
@@ -87,13 +155,9 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) | |||
87 | 155 | ||
88 | g_object_unref(GTK_TREE_MODEL(store)); | 156 | g_object_unref(GTK_TREE_MODEL(store)); |
89 | 157 | ||
90 | total_period = hists->stats.total_period; | ||
91 | |||
92 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { | 158 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
93 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | 159 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
94 | GtkTreeIter iter; | 160 | GtkTreeIter iter; |
95 | double percent; | ||
96 | char s[512]; | ||
97 | 161 | ||
98 | if (h->filtered) | 162 | if (h->filtered) |
99 | continue; | 163 | continue; |
@@ -102,11 +166,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) | |||
102 | 166 | ||
103 | col_idx = 0; | 167 | col_idx = 0; |
104 | 168 | ||
105 | percent = (h->period * 100.0) / total_period; | 169 | for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { |
170 | if (!perf_hpp__format[i].cond) | ||
171 | continue; | ||
106 | 172 | ||
107 | snprintf(s, ARRAY_SIZE(s), "%.2f", percent); | 173 | if (perf_hpp__format[i].color) |
174 | perf_hpp__format[i].color(&hpp, h); | ||
175 | else | ||
176 | perf_hpp__format[i].entry(&hpp, h); | ||
108 | 177 | ||
109 | gtk_list_store_set(store, &iter, col_idx++, s, -1); | 178 | gtk_list_store_set(store, &iter, col_idx++, s, -1); |
179 | } | ||
110 | 180 | ||
111 | list_for_each_entry(se, &hist_entry__sort_list, list) { | 181 | list_for_each_entry(se, &hist_entry__sort_list, list) { |
112 | if (se->elide) | 182 | if (se->elide) |
@@ -166,9 +236,10 @@ static GtkWidget *perf_gtk__setup_statusbar(void) | |||
166 | } | 236 | } |
167 | 237 | ||
168 | int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, | 238 | int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, |
169 | const char *help __used, | 239 | const char *help, |
170 | void (*timer) (void *arg)__used, | 240 | void (*timer) (void *arg)__maybe_unused, |
171 | void *arg __used, int delay_secs __used) | 241 | void *arg __maybe_unused, |
242 | int delay_secs __maybe_unused) | ||
172 | { | 243 | { |
173 | struct perf_evsel *pos; | 244 | struct perf_evsel *pos; |
174 | GtkWidget *vbox; | 245 | GtkWidget *vbox; |
@@ -233,6 +304,8 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, | |||
233 | 304 | ||
234 | gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); | 305 | gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); |
235 | 306 | ||
307 | ui_helpline__push(help); | ||
308 | |||
236 | gtk_main(); | 309 | gtk_main(); |
237 | 310 | ||
238 | perf_gtk__deactivate_context(&pgctx); | 311 | perf_gtk__deactivate_context(&pgctx); |
diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h index a4d0f2b4a2dc..687af0bba187 100644 --- a/tools/perf/ui/gtk/gtk.h +++ b/tools/perf/ui/gtk/gtk.h | |||
@@ -29,6 +29,9 @@ static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx) | |||
29 | struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window); | 29 | struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window); |
30 | int perf_gtk__deactivate_context(struct perf_gtk_context **ctx); | 30 | int perf_gtk__deactivate_context(struct perf_gtk_context **ctx); |
31 | 31 | ||
32 | void perf_gtk__init_helpline(void); | ||
33 | void perf_gtk__init_hpp(void); | ||
34 | |||
32 | #ifndef HAVE_GTK_INFO_BAR | 35 | #ifndef HAVE_GTK_INFO_BAR |
33 | static inline GtkWidget *perf_gtk__setup_info_bar(void) | 36 | static inline GtkWidget *perf_gtk__setup_info_bar(void) |
34 | { | 37 | { |
diff --git a/tools/perf/ui/gtk/helpline.c b/tools/perf/ui/gtk/helpline.c new file mode 100644 index 000000000000..5db4432ff12a --- /dev/null +++ b/tools/perf/ui/gtk/helpline.c | |||
@@ -0,0 +1,56 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <string.h> | ||
3 | |||
4 | #include "gtk.h" | ||
5 | #include "../ui.h" | ||
6 | #include "../helpline.h" | ||
7 | #include "../../util/debug.h" | ||
8 | |||
9 | static void gtk_helpline_pop(void) | ||
10 | { | ||
11 | if (!perf_gtk__is_active_context(pgctx)) | ||
12 | return; | ||
13 | |||
14 | gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar), | ||
15 | pgctx->statbar_ctx_id); | ||
16 | } | ||
17 | |||
18 | static void gtk_helpline_push(const char *msg) | ||
19 | { | ||
20 | if (!perf_gtk__is_active_context(pgctx)) | ||
21 | return; | ||
22 | |||
23 | gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar), | ||
24 | pgctx->statbar_ctx_id, msg); | ||
25 | } | ||
26 | |||
27 | static struct ui_helpline gtk_helpline_fns = { | ||
28 | .pop = gtk_helpline_pop, | ||
29 | .push = gtk_helpline_push, | ||
30 | }; | ||
31 | |||
32 | void perf_gtk__init_helpline(void) | ||
33 | { | ||
34 | helpline_fns = >k_helpline_fns; | ||
35 | } | ||
36 | |||
37 | int perf_gtk__show_helpline(const char *fmt, va_list ap) | ||
38 | { | ||
39 | int ret; | ||
40 | char *ptr; | ||
41 | static int backlog; | ||
42 | |||
43 | ret = vscnprintf(ui_helpline__current + backlog, | ||
44 | sizeof(ui_helpline__current) - backlog, fmt, ap); | ||
45 | backlog += ret; | ||
46 | |||
47 | /* only first line can be displayed */ | ||
48 | ptr = strchr(ui_helpline__current, '\n'); | ||
49 | if (ptr && (ptr - ui_helpline__current) <= backlog) { | ||
50 | *ptr = '\0'; | ||
51 | ui_helpline__puts(ui_helpline__current); | ||
52 | backlog = 0; | ||
53 | } | ||
54 | |||
55 | return ret; | ||
56 | } | ||
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c index 92879ce61e2f..3c4c6ef78283 100644 --- a/tools/perf/ui/gtk/setup.c +++ b/tools/perf/ui/gtk/setup.c | |||
@@ -7,11 +7,15 @@ extern struct perf_error_ops perf_gtk_eops; | |||
7 | int perf_gtk__init(void) | 7 | int perf_gtk__init(void) |
8 | { | 8 | { |
9 | perf_error__register(&perf_gtk_eops); | 9 | perf_error__register(&perf_gtk_eops); |
10 | perf_gtk__init_helpline(); | ||
11 | perf_gtk__init_hpp(); | ||
10 | return gtk_init_check(NULL, NULL) ? 0 : -1; | 12 | return gtk_init_check(NULL, NULL) ? 0 : -1; |
11 | } | 13 | } |
12 | 14 | ||
13 | void perf_gtk__exit(bool wait_for_ok __used) | 15 | void perf_gtk__exit(bool wait_for_ok __maybe_unused) |
14 | { | 16 | { |
17 | if (!perf_gtk__is_active_context(pgctx)) | ||
18 | return; | ||
15 | perf_error__unregister(&perf_gtk_eops); | 19 | perf_error__unregister(&perf_gtk_eops); |
16 | gtk_main_quit(); | 20 | gtk_main_quit(); |
17 | } | 21 | } |
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c index 0ead373c0dfb..8aada5b3c04c 100644 --- a/tools/perf/ui/gtk/util.c +++ b/tools/perf/ui/gtk/util.c | |||
@@ -117,13 +117,8 @@ struct perf_error_ops perf_gtk_eops = { | |||
117 | * For now, just add stubs for NO_NEWT=1 build. | 117 | * For now, just add stubs for NO_NEWT=1 build. |
118 | */ | 118 | */ |
119 | #ifdef NO_NEWT_SUPPORT | 119 | #ifdef NO_NEWT_SUPPORT |
120 | int ui_helpline__show_help(const char *format __used, va_list ap __used) | 120 | void ui_progress__update(u64 curr __maybe_unused, u64 total __maybe_unused, |
121 | { | 121 | const char *title __maybe_unused) |
122 | return 0; | ||
123 | } | ||
124 | |||
125 | void ui_progress__update(u64 curr __used, u64 total __used, | ||
126 | const char *title __used) | ||
127 | { | 122 | { |
128 | } | 123 | } |
129 | #endif | 124 | #endif |
diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c index 2f950c2641c8..a49bcf3c190b 100644 --- a/tools/perf/ui/helpline.c +++ b/tools/perf/ui/helpline.c | |||
@@ -5,23 +5,32 @@ | |||
5 | #include "../debug.h" | 5 | #include "../debug.h" |
6 | #include "helpline.h" | 6 | #include "helpline.h" |
7 | #include "ui.h" | 7 | #include "ui.h" |
8 | #include "libslang.h" | ||
9 | 8 | ||
10 | void ui_helpline__pop(void) | 9 | char ui_helpline__current[512]; |
10 | |||
11 | static void nop_helpline__pop(void) | ||
11 | { | 12 | { |
12 | } | 13 | } |
13 | 14 | ||
14 | char ui_helpline__current[512]; | 15 | static void nop_helpline__push(const char *msg __maybe_unused) |
16 | { | ||
17 | } | ||
15 | 18 | ||
16 | void ui_helpline__push(const char *msg) | 19 | static struct ui_helpline default_helpline_fns = { |
20 | .pop = nop_helpline__pop, | ||
21 | .push = nop_helpline__push, | ||
22 | }; | ||
23 | |||
24 | struct ui_helpline *helpline_fns = &default_helpline_fns; | ||
25 | |||
26 | void ui_helpline__pop(void) | ||
17 | { | 27 | { |
18 | const size_t sz = sizeof(ui_helpline__current); | 28 | helpline_fns->pop(); |
29 | } | ||
19 | 30 | ||
20 | SLsmg_gotorc(SLtt_Screen_Rows - 1, 0); | 31 | void ui_helpline__push(const char *msg) |
21 | SLsmg_set_color(0); | 32 | { |
22 | SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols); | 33 | helpline_fns->push(msg); |
23 | SLsmg_refresh(); | ||
24 | strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; | ||
25 | } | 34 | } |
26 | 35 | ||
27 | void ui_helpline__vpush(const char *fmt, va_list ap) | 36 | void ui_helpline__vpush(const char *fmt, va_list ap) |
@@ -50,30 +59,3 @@ void ui_helpline__puts(const char *msg) | |||
50 | ui_helpline__pop(); | 59 | ui_helpline__pop(); |
51 | ui_helpline__push(msg); | 60 | ui_helpline__push(msg); |
52 | } | 61 | } |
53 | |||
54 | void ui_helpline__init(void) | ||
55 | { | ||
56 | ui_helpline__puts(" "); | ||
57 | } | ||
58 | |||
59 | char ui_helpline__last_msg[1024]; | ||
60 | |||
61 | int ui_helpline__show_help(const char *format, va_list ap) | ||
62 | { | ||
63 | int ret; | ||
64 | static int backlog; | ||
65 | |||
66 | pthread_mutex_lock(&ui__lock); | ||
67 | ret = vscnprintf(ui_helpline__last_msg + backlog, | ||
68 | sizeof(ui_helpline__last_msg) - backlog, format, ap); | ||
69 | backlog += ret; | ||
70 | |||
71 | if (ui_helpline__last_msg[backlog - 1] == '\n') { | ||
72 | ui_helpline__puts(ui_helpline__last_msg); | ||
73 | SLsmg_refresh(); | ||
74 | backlog = 0; | ||
75 | } | ||
76 | pthread_mutex_unlock(&ui__lock); | ||
77 | |||
78 | return ret; | ||
79 | } | ||
diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h index 7bab6b34e35e..2b667ee454c3 100644 --- a/tools/perf/ui/helpline.h +++ b/tools/perf/ui/helpline.h | |||
@@ -4,13 +4,44 @@ | |||
4 | #include <stdio.h> | 4 | #include <stdio.h> |
5 | #include <stdarg.h> | 5 | #include <stdarg.h> |
6 | 6 | ||
7 | #include "../util/cache.h" | ||
8 | |||
9 | struct ui_helpline { | ||
10 | void (*pop)(void); | ||
11 | void (*push)(const char *msg); | ||
12 | }; | ||
13 | |||
14 | extern struct ui_helpline *helpline_fns; | ||
15 | |||
7 | void ui_helpline__init(void); | 16 | void ui_helpline__init(void); |
17 | |||
8 | void ui_helpline__pop(void); | 18 | void ui_helpline__pop(void); |
9 | void ui_helpline__push(const char *msg); | 19 | void ui_helpline__push(const char *msg); |
10 | void ui_helpline__vpush(const char *fmt, va_list ap); | 20 | void ui_helpline__vpush(const char *fmt, va_list ap); |
11 | void ui_helpline__fpush(const char *fmt, ...); | 21 | void ui_helpline__fpush(const char *fmt, ...); |
12 | void ui_helpline__puts(const char *msg); | 22 | void ui_helpline__puts(const char *msg); |
13 | 23 | ||
14 | extern char ui_helpline__current[]; | 24 | extern char ui_helpline__current[512]; |
25 | |||
26 | #ifdef NO_NEWT_SUPPORT | ||
27 | static inline int ui_helpline__show_help(const char *format __maybe_unused, | ||
28 | va_list ap __maybe_unused) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | #else | ||
33 | extern char ui_helpline__last_msg[]; | ||
34 | int ui_helpline__show_help(const char *format, va_list ap); | ||
35 | #endif /* NO_NEWT_SUPPORT */ | ||
36 | |||
37 | #ifdef NO_GTK2_SUPPORT | ||
38 | static inline int perf_gtk__show_helpline(const char *format __maybe_unused, | ||
39 | va_list ap __maybe_unused) | ||
40 | { | ||
41 | return 0; | ||
42 | } | ||
43 | #else | ||
44 | int perf_gtk__show_helpline(const char *format, va_list ap); | ||
45 | #endif /* NO_GTK2_SUPPORT */ | ||
15 | 46 | ||
16 | #endif /* _PERF_UI_HELPLINE_H_ */ | 47 | #endif /* _PERF_UI_HELPLINE_H_ */ |
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c new file mode 100644 index 000000000000..e3f8cd46e7d7 --- /dev/null +++ b/tools/perf/ui/hist.c | |||
@@ -0,0 +1,390 @@ | |||
1 | #include <math.h> | ||
2 | |||
3 | #include "../util/hist.h" | ||
4 | #include "../util/util.h" | ||
5 | #include "../util/sort.h" | ||
6 | |||
7 | |||
8 | /* hist period print (hpp) functions */ | ||
9 | static int hpp__header_overhead(struct perf_hpp *hpp) | ||
10 | { | ||
11 | const char *fmt = hpp->ptr ? "Baseline" : "Overhead"; | ||
12 | |||
13 | return scnprintf(hpp->buf, hpp->size, fmt); | ||
14 | } | ||
15 | |||
16 | static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused) | ||
17 | { | ||
18 | return 8; | ||
19 | } | ||
20 | |||
21 | static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he) | ||
22 | { | ||
23 | double percent = 100.0 * he->period / hpp->total_period; | ||
24 | |||
25 | if (hpp->ptr) { | ||
26 | struct hists *old_hists = hpp->ptr; | ||
27 | u64 total_period = old_hists->stats.total_period; | ||
28 | u64 base_period = he->pair ? he->pair->period : 0; | ||
29 | |||
30 | if (total_period) | ||
31 | percent = 100.0 * base_period / total_period; | ||
32 | else | ||
33 | percent = 0.0; | ||
34 | } | ||
35 | |||
36 | return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); | ||
37 | } | ||
38 | |||
39 | static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he) | ||
40 | { | ||
41 | double percent = 100.0 * he->period / hpp->total_period; | ||
42 | const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%"; | ||
43 | |||
44 | if (hpp->ptr) { | ||
45 | struct hists *old_hists = hpp->ptr; | ||
46 | u64 total_period = old_hists->stats.total_period; | ||
47 | u64 base_period = he->pair ? he->pair->period : 0; | ||
48 | |||
49 | if (total_period) | ||
50 | percent = 100.0 * base_period / total_period; | ||
51 | else | ||
52 | percent = 0.0; | ||
53 | } | ||
54 | |||
55 | return scnprintf(hpp->buf, hpp->size, fmt, percent); | ||
56 | } | ||
57 | |||
58 | static int hpp__header_overhead_sys(struct perf_hpp *hpp) | ||
59 | { | ||
60 | const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; | ||
61 | |||
62 | return scnprintf(hpp->buf, hpp->size, fmt, "sys"); | ||
63 | } | ||
64 | |||
65 | static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused) | ||
66 | { | ||
67 | return 7; | ||
68 | } | ||
69 | |||
70 | static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) | ||
71 | { | ||
72 | double percent = 100.0 * he->period_sys / hpp->total_period; | ||
73 | return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); | ||
74 | } | ||
75 | |||
76 | static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) | ||
77 | { | ||
78 | double percent = 100.0 * he->period_sys / hpp->total_period; | ||
79 | const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; | ||
80 | |||
81 | return scnprintf(hpp->buf, hpp->size, fmt, percent); | ||
82 | } | ||
83 | |||
84 | static int hpp__header_overhead_us(struct perf_hpp *hpp) | ||
85 | { | ||
86 | const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; | ||
87 | |||
88 | return scnprintf(hpp->buf, hpp->size, fmt, "user"); | ||
89 | } | ||
90 | |||
91 | static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused) | ||
92 | { | ||
93 | return 7; | ||
94 | } | ||
95 | |||
96 | static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) | ||
97 | { | ||
98 | double percent = 100.0 * he->period_us / hpp->total_period; | ||
99 | return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); | ||
100 | } | ||
101 | |||
102 | static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) | ||
103 | { | ||
104 | double percent = 100.0 * he->period_us / hpp->total_period; | ||
105 | const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; | ||
106 | |||
107 | return scnprintf(hpp->buf, hpp->size, fmt, percent); | ||
108 | } | ||
109 | |||
110 | static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp) | ||
111 | { | ||
112 | return scnprintf(hpp->buf, hpp->size, "guest sys"); | ||
113 | } | ||
114 | |||
115 | static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused) | ||
116 | { | ||
117 | return 9; | ||
118 | } | ||
119 | |||
120 | static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp, | ||
121 | struct hist_entry *he) | ||
122 | { | ||
123 | double percent = 100.0 * he->period_guest_sys / hpp->total_period; | ||
124 | return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); | ||
125 | } | ||
126 | |||
127 | static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp, | ||
128 | struct hist_entry *he) | ||
129 | { | ||
130 | double percent = 100.0 * he->period_guest_sys / hpp->total_period; | ||
131 | const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; | ||
132 | |||
133 | return scnprintf(hpp->buf, hpp->size, fmt, percent); | ||
134 | } | ||
135 | |||
136 | static int hpp__header_overhead_guest_us(struct perf_hpp *hpp) | ||
137 | { | ||
138 | return scnprintf(hpp->buf, hpp->size, "guest usr"); | ||
139 | } | ||
140 | |||
141 | static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused) | ||
142 | { | ||
143 | return 9; | ||
144 | } | ||
145 | |||
146 | static int hpp__color_overhead_guest_us(struct perf_hpp *hpp, | ||
147 | struct hist_entry *he) | ||
148 | { | ||
149 | double percent = 100.0 * he->period_guest_us / hpp->total_period; | ||
150 | return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); | ||
151 | } | ||
152 | |||
153 | static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp, | ||
154 | struct hist_entry *he) | ||
155 | { | ||
156 | double percent = 100.0 * he->period_guest_us / hpp->total_period; | ||
157 | const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; | ||
158 | |||
159 | return scnprintf(hpp->buf, hpp->size, fmt, percent); | ||
160 | } | ||
161 | |||
162 | static int hpp__header_samples(struct perf_hpp *hpp) | ||
163 | { | ||
164 | const char *fmt = symbol_conf.field_sep ? "%s" : "%11s"; | ||
165 | |||
166 | return scnprintf(hpp->buf, hpp->size, fmt, "Samples"); | ||
167 | } | ||
168 | |||
169 | static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused) | ||
170 | { | ||
171 | return 11; | ||
172 | } | ||
173 | |||
174 | static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he) | ||
175 | { | ||
176 | const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64; | ||
177 | |||
178 | return scnprintf(hpp->buf, hpp->size, fmt, he->nr_events); | ||
179 | } | ||
180 | |||
181 | static int hpp__header_period(struct perf_hpp *hpp) | ||
182 | { | ||
183 | const char *fmt = symbol_conf.field_sep ? "%s" : "%12s"; | ||
184 | |||
185 | return scnprintf(hpp->buf, hpp->size, fmt, "Period"); | ||
186 | } | ||
187 | |||
188 | static int hpp__width_period(struct perf_hpp *hpp __maybe_unused) | ||
189 | { | ||
190 | return 12; | ||
191 | } | ||
192 | |||
193 | static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he) | ||
194 | { | ||
195 | const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64; | ||
196 | |||
197 | return scnprintf(hpp->buf, hpp->size, fmt, he->period); | ||
198 | } | ||
199 | |||
200 | static int hpp__header_delta(struct perf_hpp *hpp) | ||
201 | { | ||
202 | const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; | ||
203 | |||
204 | return scnprintf(hpp->buf, hpp->size, fmt, "Delta"); | ||
205 | } | ||
206 | |||
207 | static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused) | ||
208 | { | ||
209 | return 7; | ||
210 | } | ||
211 | |||
212 | static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he) | ||
213 | { | ||
214 | struct hists *pair_hists = hpp->ptr; | ||
215 | u64 old_total, new_total; | ||
216 | double old_percent = 0, new_percent = 0; | ||
217 | double diff; | ||
218 | const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s"; | ||
219 | char buf[32] = " "; | ||
220 | |||
221 | old_total = pair_hists->stats.total_period; | ||
222 | if (old_total > 0 && he->pair) | ||
223 | old_percent = 100.0 * he->pair->period / old_total; | ||
224 | |||
225 | new_total = hpp->total_period; | ||
226 | if (new_total > 0) | ||
227 | new_percent = 100.0 * he->period / new_total; | ||
228 | |||
229 | diff = new_percent - old_percent; | ||
230 | if (fabs(diff) >= 0.01) | ||
231 | scnprintf(buf, sizeof(buf), "%+4.2F%%", diff); | ||
232 | |||
233 | return scnprintf(hpp->buf, hpp->size, fmt, buf); | ||
234 | } | ||
235 | |||
236 | static int hpp__header_displ(struct perf_hpp *hpp) | ||
237 | { | ||
238 | return scnprintf(hpp->buf, hpp->size, "Displ."); | ||
239 | } | ||
240 | |||
241 | static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused) | ||
242 | { | ||
243 | return 6; | ||
244 | } | ||
245 | |||
246 | static int hpp__entry_displ(struct perf_hpp *hpp, | ||
247 | struct hist_entry *he __maybe_unused) | ||
248 | { | ||
249 | const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; | ||
250 | char buf[32] = " "; | ||
251 | |||
252 | if (hpp->displacement) | ||
253 | scnprintf(buf, sizeof(buf), "%+4ld", hpp->displacement); | ||
254 | |||
255 | return scnprintf(hpp->buf, hpp->size, fmt, buf); | ||
256 | } | ||
257 | |||
258 | #define HPP__COLOR_PRINT_FNS(_name) \ | ||
259 | .header = hpp__header_ ## _name, \ | ||
260 | .width = hpp__width_ ## _name, \ | ||
261 | .color = hpp__color_ ## _name, \ | ||
262 | .entry = hpp__entry_ ## _name | ||
263 | |||
264 | #define HPP__PRINT_FNS(_name) \ | ||
265 | .header = hpp__header_ ## _name, \ | ||
266 | .width = hpp__width_ ## _name, \ | ||
267 | .entry = hpp__entry_ ## _name | ||
268 | |||
269 | struct perf_hpp_fmt perf_hpp__format[] = { | ||
270 | { .cond = true, HPP__COLOR_PRINT_FNS(overhead) }, | ||
271 | { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) }, | ||
272 | { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) }, | ||
273 | { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) }, | ||
274 | { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) }, | ||
275 | { .cond = false, HPP__PRINT_FNS(samples) }, | ||
276 | { .cond = false, HPP__PRINT_FNS(period) }, | ||
277 | { .cond = false, HPP__PRINT_FNS(delta) }, | ||
278 | { .cond = false, HPP__PRINT_FNS(displ) } | ||
279 | }; | ||
280 | |||
281 | #undef HPP__COLOR_PRINT_FNS | ||
282 | #undef HPP__PRINT_FNS | ||
283 | |||
284 | void perf_hpp__init(bool need_pair, bool show_displacement) | ||
285 | { | ||
286 | if (symbol_conf.show_cpu_utilization) { | ||
287 | perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true; | ||
288 | perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true; | ||
289 | |||
290 | if (perf_guest) { | ||
291 | perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true; | ||
292 | perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | if (symbol_conf.show_nr_samples) | ||
297 | perf_hpp__format[PERF_HPP__SAMPLES].cond = true; | ||
298 | |||
299 | if (symbol_conf.show_total_period) | ||
300 | perf_hpp__format[PERF_HPP__PERIOD].cond = true; | ||
301 | |||
302 | if (need_pair) { | ||
303 | perf_hpp__format[PERF_HPP__DELTA].cond = true; | ||
304 | |||
305 | if (show_displacement) | ||
306 | perf_hpp__format[PERF_HPP__DISPL].cond = true; | ||
307 | } | ||
308 | } | ||
309 | |||
310 | static inline void advance_hpp(struct perf_hpp *hpp, int inc) | ||
311 | { | ||
312 | hpp->buf += inc; | ||
313 | hpp->size -= inc; | ||
314 | } | ||
315 | |||
316 | int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, | ||
317 | bool color) | ||
318 | { | ||
319 | const char *sep = symbol_conf.field_sep; | ||
320 | char *start = hpp->buf; | ||
321 | int i, ret; | ||
322 | |||
323 | if (symbol_conf.exclude_other && !he->parent) | ||
324 | return 0; | ||
325 | |||
326 | for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { | ||
327 | if (!perf_hpp__format[i].cond) | ||
328 | continue; | ||
329 | |||
330 | if (!sep || i > 0) { | ||
331 | ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); | ||
332 | advance_hpp(hpp, ret); | ||
333 | } | ||
334 | |||
335 | if (color && perf_hpp__format[i].color) | ||
336 | ret = perf_hpp__format[i].color(hpp, he); | ||
337 | else | ||
338 | ret = perf_hpp__format[i].entry(hpp, he); | ||
339 | |||
340 | advance_hpp(hpp, ret); | ||
341 | } | ||
342 | |||
343 | return hpp->buf - start; | ||
344 | } | ||
345 | |||
346 | int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size, | ||
347 | struct hists *hists) | ||
348 | { | ||
349 | const char *sep = symbol_conf.field_sep; | ||
350 | struct sort_entry *se; | ||
351 | int ret = 0; | ||
352 | |||
353 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
354 | if (se->elide) | ||
355 | continue; | ||
356 | |||
357 | ret += scnprintf(s + ret, size - ret, "%s", sep ?: " "); | ||
358 | ret += se->se_snprintf(he, s + ret, size - ret, | ||
359 | hists__col_len(hists, se->se_width_idx)); | ||
360 | } | ||
361 | |||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * See hists__fprintf to match the column widths | ||
367 | */ | ||
368 | unsigned int hists__sort_list_width(struct hists *hists) | ||
369 | { | ||
370 | struct sort_entry *se; | ||
371 | int i, ret = 0; | ||
372 | |||
373 | for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { | ||
374 | if (!perf_hpp__format[i].cond) | ||
375 | continue; | ||
376 | if (i) | ||
377 | ret += 2; | ||
378 | |||
379 | ret += perf_hpp__format[i].width(NULL); | ||
380 | } | ||
381 | |||
382 | list_for_each_entry(se, &hist_entry__sort_list, list) | ||
383 | if (!se->elide) | ||
384 | ret += 2 + hists__col_len(hists, se->se_width_idx); | ||
385 | |||
386 | if (verbose) /* Addr + origin */ | ||
387 | ret += 3 + BITS_PER_LONG / 4; | ||
388 | |||
389 | return ret; | ||
390 | } | ||
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c index 791fb15ce350..bd7d460f844c 100644 --- a/tools/perf/ui/setup.c +++ b/tools/perf/ui/setup.c | |||
@@ -1,6 +1,10 @@ | |||
1 | #include "../cache.h" | 1 | #include <pthread.h> |
2 | #include "../debug.h" | ||
3 | 2 | ||
3 | #include "../util/cache.h" | ||
4 | #include "../util/debug.h" | ||
5 | #include "../util/hist.h" | ||
6 | |||
7 | pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; | ||
4 | 8 | ||
5 | void setup_browser(bool fallback_to_pager) | 9 | void setup_browser(bool fallback_to_pager) |
6 | { | 10 | { |
@@ -25,6 +29,8 @@ void setup_browser(bool fallback_to_pager) | |||
25 | use_browser = 0; | 29 | use_browser = 0; |
26 | if (fallback_to_pager) | 30 | if (fallback_to_pager) |
27 | setup_pager(); | 31 | setup_pager(); |
32 | |||
33 | perf_hpp__init(false, false); | ||
28 | break; | 34 | break; |
29 | } | 35 | } |
30 | } | 36 | } |
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c new file mode 100644 index 000000000000..882461a42830 --- /dev/null +++ b/tools/perf/ui/stdio/hist.c | |||
@@ -0,0 +1,498 @@ | |||
1 | #include <stdio.h> | ||
2 | |||
3 | #include "../../util/util.h" | ||
4 | #include "../../util/hist.h" | ||
5 | #include "../../util/sort.h" | ||
6 | |||
7 | |||
8 | static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) | ||
9 | { | ||
10 | int i; | ||
11 | int ret = fprintf(fp, " "); | ||
12 | |||
13 | for (i = 0; i < left_margin; i++) | ||
14 | ret += fprintf(fp, " "); | ||
15 | |||
16 | return ret; | ||
17 | } | ||
18 | |||
19 | static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, | ||
20 | int left_margin) | ||
21 | { | ||
22 | int i; | ||
23 | size_t ret = callchain__fprintf_left_margin(fp, left_margin); | ||
24 | |||
25 | for (i = 0; i < depth; i++) | ||
26 | if (depth_mask & (1 << i)) | ||
27 | ret += fprintf(fp, "| "); | ||
28 | else | ||
29 | ret += fprintf(fp, " "); | ||
30 | |||
31 | ret += fprintf(fp, "\n"); | ||
32 | |||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, | ||
37 | int depth, int depth_mask, int period, | ||
38 | u64 total_samples, u64 hits, | ||
39 | int left_margin) | ||
40 | { | ||
41 | int i; | ||
42 | size_t ret = 0; | ||
43 | |||
44 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
45 | for (i = 0; i < depth; i++) { | ||
46 | if (depth_mask & (1 << i)) | ||
47 | ret += fprintf(fp, "|"); | ||
48 | else | ||
49 | ret += fprintf(fp, " "); | ||
50 | if (!period && i == depth - 1) { | ||
51 | double percent; | ||
52 | |||
53 | percent = hits * 100.0 / total_samples; | ||
54 | ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); | ||
55 | } else | ||
56 | ret += fprintf(fp, "%s", " "); | ||
57 | } | ||
58 | if (chain->ms.sym) | ||
59 | ret += fprintf(fp, "%s\n", chain->ms.sym->name); | ||
60 | else | ||
61 | ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip); | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | static struct symbol *rem_sq_bracket; | ||
67 | static struct callchain_list rem_hits; | ||
68 | |||
69 | static void init_rem_hits(void) | ||
70 | { | ||
71 | rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); | ||
72 | if (!rem_sq_bracket) { | ||
73 | fprintf(stderr, "Not enough memory to display remaining hits\n"); | ||
74 | return; | ||
75 | } | ||
76 | |||
77 | strcpy(rem_sq_bracket->name, "[...]"); | ||
78 | rem_hits.ms.sym = rem_sq_bracket; | ||
79 | } | ||
80 | |||
81 | static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, | ||
82 | u64 total_samples, int depth, | ||
83 | int depth_mask, int left_margin) | ||
84 | { | ||
85 | struct rb_node *node, *next; | ||
86 | struct callchain_node *child; | ||
87 | struct callchain_list *chain; | ||
88 | int new_depth_mask = depth_mask; | ||
89 | u64 remaining; | ||
90 | size_t ret = 0; | ||
91 | int i; | ||
92 | uint entries_printed = 0; | ||
93 | |||
94 | remaining = total_samples; | ||
95 | |||
96 | node = rb_first(root); | ||
97 | while (node) { | ||
98 | u64 new_total; | ||
99 | u64 cumul; | ||
100 | |||
101 | child = rb_entry(node, struct callchain_node, rb_node); | ||
102 | cumul = callchain_cumul_hits(child); | ||
103 | remaining -= cumul; | ||
104 | |||
105 | /* | ||
106 | * The depth mask manages the output of pipes that show | ||
107 | * the depth. We don't want to keep the pipes of the current | ||
108 | * level for the last child of this depth. | ||
109 | * Except if we have remaining filtered hits. They will | ||
110 | * supersede the last child | ||
111 | */ | ||
112 | next = rb_next(node); | ||
113 | if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) | ||
114 | new_depth_mask &= ~(1 << (depth - 1)); | ||
115 | |||
116 | /* | ||
117 | * But we keep the older depth mask for the line separator | ||
118 | * to keep the level link until we reach the last child | ||
119 | */ | ||
120 | ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, | ||
121 | left_margin); | ||
122 | i = 0; | ||
123 | list_for_each_entry(chain, &child->val, list) { | ||
124 | ret += ipchain__fprintf_graph(fp, chain, depth, | ||
125 | new_depth_mask, i++, | ||
126 | total_samples, | ||
127 | cumul, | ||
128 | left_margin); | ||
129 | } | ||
130 | |||
131 | if (callchain_param.mode == CHAIN_GRAPH_REL) | ||
132 | new_total = child->children_hit; | ||
133 | else | ||
134 | new_total = total_samples; | ||
135 | |||
136 | ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total, | ||
137 | depth + 1, | ||
138 | new_depth_mask | (1 << depth), | ||
139 | left_margin); | ||
140 | node = next; | ||
141 | if (++entries_printed == callchain_param.print_limit) | ||
142 | break; | ||
143 | } | ||
144 | |||
145 | if (callchain_param.mode == CHAIN_GRAPH_REL && | ||
146 | remaining && remaining != total_samples) { | ||
147 | |||
148 | if (!rem_sq_bracket) | ||
149 | return ret; | ||
150 | |||
151 | new_depth_mask &= ~(1 << (depth - 1)); | ||
152 | ret += ipchain__fprintf_graph(fp, &rem_hits, depth, | ||
153 | new_depth_mask, 0, total_samples, | ||
154 | remaining, left_margin); | ||
155 | } | ||
156 | |||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, | ||
161 | u64 total_samples, int left_margin) | ||
162 | { | ||
163 | struct callchain_node *cnode; | ||
164 | struct callchain_list *chain; | ||
165 | u32 entries_printed = 0; | ||
166 | bool printed = false; | ||
167 | struct rb_node *node; | ||
168 | int i = 0; | ||
169 | int ret = 0; | ||
170 | |||
171 | /* | ||
172 | * If have one single callchain root, don't bother printing | ||
173 | * its percentage (100 % in fractal mode and the same percentage | ||
174 | * than the hist in graph mode). This also avoid one level of column. | ||
175 | */ | ||
176 | node = rb_first(root); | ||
177 | if (node && !rb_next(node)) { | ||
178 | cnode = rb_entry(node, struct callchain_node, rb_node); | ||
179 | list_for_each_entry(chain, &cnode->val, list) { | ||
180 | /* | ||
181 | * If we sort by symbol, the first entry is the same than | ||
182 | * the symbol. No need to print it otherwise it appears as | ||
183 | * displayed twice. | ||
184 | */ | ||
185 | if (!i++ && sort__first_dimension == SORT_SYM) | ||
186 | continue; | ||
187 | if (!printed) { | ||
188 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
189 | ret += fprintf(fp, "|\n"); | ||
190 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
191 | ret += fprintf(fp, "---"); | ||
192 | left_margin += 3; | ||
193 | printed = true; | ||
194 | } else | ||
195 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
196 | |||
197 | if (chain->ms.sym) | ||
198 | ret += fprintf(fp, " %s\n", chain->ms.sym->name); | ||
199 | else | ||
200 | ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); | ||
201 | |||
202 | if (++entries_printed == callchain_param.print_limit) | ||
203 | break; | ||
204 | } | ||
205 | root = &cnode->rb_root; | ||
206 | } | ||
207 | |||
208 | ret += __callchain__fprintf_graph(fp, root, total_samples, | ||
209 | 1, 1, left_margin); | ||
210 | ret += fprintf(fp, "\n"); | ||
211 | |||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | static size_t __callchain__fprintf_flat(FILE *fp, | ||
216 | struct callchain_node *self, | ||
217 | u64 total_samples) | ||
218 | { | ||
219 | struct callchain_list *chain; | ||
220 | size_t ret = 0; | ||
221 | |||
222 | if (!self) | ||
223 | return 0; | ||
224 | |||
225 | ret += __callchain__fprintf_flat(fp, self->parent, total_samples); | ||
226 | |||
227 | |||
228 | list_for_each_entry(chain, &self->val, list) { | ||
229 | if (chain->ip >= PERF_CONTEXT_MAX) | ||
230 | continue; | ||
231 | if (chain->ms.sym) | ||
232 | ret += fprintf(fp, " %s\n", chain->ms.sym->name); | ||
233 | else | ||
234 | ret += fprintf(fp, " %p\n", | ||
235 | (void *)(long)chain->ip); | ||
236 | } | ||
237 | |||
238 | return ret; | ||
239 | } | ||
240 | |||
241 | static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self, | ||
242 | u64 total_samples) | ||
243 | { | ||
244 | size_t ret = 0; | ||
245 | u32 entries_printed = 0; | ||
246 | struct rb_node *rb_node; | ||
247 | struct callchain_node *chain; | ||
248 | |||
249 | rb_node = rb_first(self); | ||
250 | while (rb_node) { | ||
251 | double percent; | ||
252 | |||
253 | chain = rb_entry(rb_node, struct callchain_node, rb_node); | ||
254 | percent = chain->hit * 100.0 / total_samples; | ||
255 | |||
256 | ret = percent_color_fprintf(fp, " %6.2f%%\n", percent); | ||
257 | ret += __callchain__fprintf_flat(fp, chain, total_samples); | ||
258 | ret += fprintf(fp, "\n"); | ||
259 | if (++entries_printed == callchain_param.print_limit) | ||
260 | break; | ||
261 | |||
262 | rb_node = rb_next(rb_node); | ||
263 | } | ||
264 | |||
265 | return ret; | ||
266 | } | ||
267 | |||
268 | static size_t hist_entry_callchain__fprintf(struct hist_entry *he, | ||
269 | u64 total_samples, int left_margin, | ||
270 | FILE *fp) | ||
271 | { | ||
272 | switch (callchain_param.mode) { | ||
273 | case CHAIN_GRAPH_REL: | ||
274 | return callchain__fprintf_graph(fp, &he->sorted_chain, he->period, | ||
275 | left_margin); | ||
276 | break; | ||
277 | case CHAIN_GRAPH_ABS: | ||
278 | return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, | ||
279 | left_margin); | ||
280 | break; | ||
281 | case CHAIN_FLAT: | ||
282 | return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); | ||
283 | break; | ||
284 | case CHAIN_NONE: | ||
285 | break; | ||
286 | default: | ||
287 | pr_err("Bad callchain mode\n"); | ||
288 | } | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static size_t hist_entry__callchain_fprintf(struct hist_entry *he, | ||
294 | struct hists *hists, | ||
295 | u64 total_period, FILE *fp) | ||
296 | { | ||
297 | int left_margin = 0; | ||
298 | |||
299 | if (sort__first_dimension == SORT_COMM) { | ||
300 | struct sort_entry *se = list_first_entry(&hist_entry__sort_list, | ||
301 | typeof(*se), list); | ||
302 | left_margin = hists__col_len(hists, se->se_width_idx); | ||
303 | left_margin -= thread__comm_len(he->thread); | ||
304 | } | ||
305 | |||
306 | return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); | ||
307 | } | ||
308 | |||
309 | static int hist_entry__fprintf(struct hist_entry *he, size_t size, | ||
310 | struct hists *hists, struct hists *pair_hists, | ||
311 | long displacement, u64 total_period, FILE *fp) | ||
312 | { | ||
313 | char bf[512]; | ||
314 | int ret; | ||
315 | struct perf_hpp hpp = { | ||
316 | .buf = bf, | ||
317 | .size = size, | ||
318 | .total_period = total_period, | ||
319 | .displacement = displacement, | ||
320 | .ptr = pair_hists, | ||
321 | }; | ||
322 | bool color = !symbol_conf.field_sep; | ||
323 | |||
324 | if (size == 0 || size > sizeof(bf)) | ||
325 | size = hpp.size = sizeof(bf); | ||
326 | |||
327 | ret = hist_entry__period_snprintf(&hpp, he, color); | ||
328 | hist_entry__sort_snprintf(he, bf + ret, size - ret, hists); | ||
329 | |||
330 | ret = fprintf(fp, "%s\n", bf); | ||
331 | |||
332 | if (symbol_conf.use_callchain) | ||
333 | ret += hist_entry__callchain_fprintf(he, hists, | ||
334 | total_period, fp); | ||
335 | |||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | size_t hists__fprintf(struct hists *hists, struct hists *pair, | ||
340 | bool show_displacement, bool show_header, int max_rows, | ||
341 | int max_cols, FILE *fp) | ||
342 | { | ||
343 | struct sort_entry *se; | ||
344 | struct rb_node *nd; | ||
345 | size_t ret = 0; | ||
346 | u64 total_period; | ||
347 | unsigned long position = 1; | ||
348 | long displacement = 0; | ||
349 | unsigned int width; | ||
350 | const char *sep = symbol_conf.field_sep; | ||
351 | const char *col_width = symbol_conf.col_width_list_str; | ||
352 | int idx, nr_rows = 0; | ||
353 | char bf[64]; | ||
354 | struct perf_hpp dummy_hpp = { | ||
355 | .buf = bf, | ||
356 | .size = sizeof(bf), | ||
357 | .ptr = pair, | ||
358 | }; | ||
359 | |||
360 | init_rem_hits(); | ||
361 | |||
362 | if (!show_header) | ||
363 | goto print_entries; | ||
364 | |||
365 | fprintf(fp, "# "); | ||
366 | for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { | ||
367 | if (!perf_hpp__format[idx].cond) | ||
368 | continue; | ||
369 | |||
370 | if (idx) | ||
371 | fprintf(fp, "%s", sep ?: " "); | ||
372 | |||
373 | perf_hpp__format[idx].header(&dummy_hpp); | ||
374 | fprintf(fp, "%s", bf); | ||
375 | } | ||
376 | |||
377 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
378 | if (se->elide) | ||
379 | continue; | ||
380 | if (sep) { | ||
381 | fprintf(fp, "%c%s", *sep, se->se_header); | ||
382 | continue; | ||
383 | } | ||
384 | width = strlen(se->se_header); | ||
385 | if (symbol_conf.col_width_list_str) { | ||
386 | if (col_width) { | ||
387 | hists__set_col_len(hists, se->se_width_idx, | ||
388 | atoi(col_width)); | ||
389 | col_width = strchr(col_width, ','); | ||
390 | if (col_width) | ||
391 | ++col_width; | ||
392 | } | ||
393 | } | ||
394 | if (!hists__new_col_len(hists, se->se_width_idx, width)) | ||
395 | width = hists__col_len(hists, se->se_width_idx); | ||
396 | fprintf(fp, " %*s", width, se->se_header); | ||
397 | } | ||
398 | |||
399 | fprintf(fp, "\n"); | ||
400 | if (max_rows && ++nr_rows >= max_rows) | ||
401 | goto out; | ||
402 | |||
403 | if (sep) | ||
404 | goto print_entries; | ||
405 | |||
406 | fprintf(fp, "# "); | ||
407 | for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { | ||
408 | unsigned int i; | ||
409 | |||
410 | if (!perf_hpp__format[idx].cond) | ||
411 | continue; | ||
412 | |||
413 | if (idx) | ||
414 | fprintf(fp, "%s", sep ?: " "); | ||
415 | |||
416 | width = perf_hpp__format[idx].width(&dummy_hpp); | ||
417 | for (i = 0; i < width; i++) | ||
418 | fprintf(fp, "."); | ||
419 | } | ||
420 | |||
421 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
422 | unsigned int i; | ||
423 | |||
424 | if (se->elide) | ||
425 | continue; | ||
426 | |||
427 | fprintf(fp, " "); | ||
428 | width = hists__col_len(hists, se->se_width_idx); | ||
429 | if (width == 0) | ||
430 | width = strlen(se->se_header); | ||
431 | for (i = 0; i < width; i++) | ||
432 | fprintf(fp, "."); | ||
433 | } | ||
434 | |||
435 | fprintf(fp, "\n"); | ||
436 | if (max_rows && ++nr_rows >= max_rows) | ||
437 | goto out; | ||
438 | |||
439 | fprintf(fp, "#\n"); | ||
440 | if (max_rows && ++nr_rows >= max_rows) | ||
441 | goto out; | ||
442 | |||
443 | print_entries: | ||
444 | total_period = hists->stats.total_period; | ||
445 | |||
446 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { | ||
447 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | ||
448 | |||
449 | if (h->filtered) | ||
450 | continue; | ||
451 | |||
452 | if (show_displacement) { | ||
453 | if (h->pair != NULL) | ||
454 | displacement = ((long)h->pair->position - | ||
455 | (long)position); | ||
456 | else | ||
457 | displacement = 0; | ||
458 | ++position; | ||
459 | } | ||
460 | ret += hist_entry__fprintf(h, max_cols, hists, pair, displacement, | ||
461 | total_period, fp); | ||
462 | |||
463 | if (max_rows && ++nr_rows >= max_rows) | ||
464 | goto out; | ||
465 | |||
466 | if (h->ms.map == NULL && verbose > 1) { | ||
467 | __map_groups__fprintf_maps(&h->thread->mg, | ||
468 | MAP__FUNCTION, verbose, fp); | ||
469 | fprintf(fp, "%.10s end\n", graph_dotted_line); | ||
470 | } | ||
471 | } | ||
472 | out: | ||
473 | free(rem_sq_bracket); | ||
474 | |||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) | ||
479 | { | ||
480 | int i; | ||
481 | size_t ret = 0; | ||
482 | |||
483 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { | ||
484 | const char *name; | ||
485 | |||
486 | if (hists->stats.nr_events[i] == 0) | ||
487 | continue; | ||
488 | |||
489 | name = perf_event__name(i); | ||
490 | if (!strcmp(name, "UNKNOWN")) | ||
491 | continue; | ||
492 | |||
493 | ret += fprintf(fp, "%16s events: %10d\n", name, | ||
494 | hists->stats.nr_events[i]); | ||
495 | } | ||
496 | |||
497 | return ret; | ||
498 | } | ||
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c new file mode 100644 index 000000000000..2884d2f41e33 --- /dev/null +++ b/tools/perf/ui/tui/helpline.c | |||
@@ -0,0 +1,57 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <stdlib.h> | ||
3 | #include <string.h> | ||
4 | #include <pthread.h> | ||
5 | |||
6 | #include "../../util/debug.h" | ||
7 | #include "../helpline.h" | ||
8 | #include "../ui.h" | ||
9 | #include "../libslang.h" | ||
10 | |||
11 | static void tui_helpline__pop(void) | ||
12 | { | ||
13 | } | ||
14 | |||
15 | static void tui_helpline__push(const char *msg) | ||
16 | { | ||
17 | const size_t sz = sizeof(ui_helpline__current); | ||
18 | |||
19 | SLsmg_gotorc(SLtt_Screen_Rows - 1, 0); | ||
20 | SLsmg_set_color(0); | ||
21 | SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols); | ||
22 | SLsmg_refresh(); | ||
23 | strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; | ||
24 | } | ||
25 | |||
26 | struct ui_helpline tui_helpline_fns = { | ||
27 | .pop = tui_helpline__pop, | ||
28 | .push = tui_helpline__push, | ||
29 | }; | ||
30 | |||
31 | void ui_helpline__init(void) | ||
32 | { | ||
33 | helpline_fns = &tui_helpline_fns; | ||
34 | ui_helpline__puts(" "); | ||
35 | } | ||
36 | |||
37 | char ui_helpline__last_msg[1024]; | ||
38 | |||
39 | int ui_helpline__show_help(const char *format, va_list ap) | ||
40 | { | ||
41 | int ret; | ||
42 | static int backlog; | ||
43 | |||
44 | pthread_mutex_lock(&ui__lock); | ||
45 | ret = vscnprintf(ui_helpline__last_msg + backlog, | ||
46 | sizeof(ui_helpline__last_msg) - backlog, format, ap); | ||
47 | backlog += ret; | ||
48 | |||
49 | if (ui_helpline__last_msg[backlog - 1] == '\n') { | ||
50 | ui_helpline__puts(ui_helpline__last_msg); | ||
51 | SLsmg_refresh(); | ||
52 | backlog = 0; | ||
53 | } | ||
54 | pthread_mutex_unlock(&ui__lock); | ||
55 | |||
56 | return ret; | ||
57 | } | ||
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index e813c1d17346..60debb81537a 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c | |||
@@ -11,12 +11,12 @@ | |||
11 | #include "../libslang.h" | 11 | #include "../libslang.h" |
12 | #include "../keysyms.h" | 12 | #include "../keysyms.h" |
13 | 13 | ||
14 | pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; | ||
15 | |||
16 | static volatile int ui__need_resize; | 14 | static volatile int ui__need_resize; |
17 | 15 | ||
18 | extern struct perf_error_ops perf_tui_eops; | 16 | extern struct perf_error_ops perf_tui_eops; |
19 | 17 | ||
18 | extern void hist_browser__init_hpp(void); | ||
19 | |||
20 | void ui__refresh_dimensions(bool force) | 20 | void ui__refresh_dimensions(bool force) |
21 | { | 21 | { |
22 | if (force || ui__need_resize) { | 22 | if (force || ui__need_resize) { |
@@ -28,7 +28,7 @@ void ui__refresh_dimensions(bool force) | |||
28 | } | 28 | } |
29 | } | 29 | } |
30 | 30 | ||
31 | static void ui__sigwinch(int sig __used) | 31 | static void ui__sigwinch(int sig __maybe_unused) |
32 | { | 32 | { |
33 | ui__need_resize = 1; | 33 | ui__need_resize = 1; |
34 | } | 34 | } |
@@ -88,7 +88,7 @@ int ui__getch(int delay_secs) | |||
88 | return SLkp_getkey(); | 88 | return SLkp_getkey(); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void newt_suspend(void *d __used) | 91 | static void newt_suspend(void *d __maybe_unused) |
92 | { | 92 | { |
93 | newtSuspend(); | 93 | newtSuspend(); |
94 | raise(SIGTSTP); | 94 | raise(SIGTSTP); |
@@ -126,6 +126,8 @@ int ui__init(void) | |||
126 | signal(SIGTERM, ui__signal); | 126 | signal(SIGTERM, ui__signal); |
127 | 127 | ||
128 | perf_error__register(&perf_tui_eops); | 128 | perf_error__register(&perf_tui_eops); |
129 | |||
130 | hist_browser__init_hpp(); | ||
129 | out: | 131 | out: |
130 | return err; | 132 | return err; |
131 | } | 133 | } |
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c index b8144e80bb1e..e6d134773d0a 100644 --- a/tools/perf/util/alias.c +++ b/tools/perf/util/alias.c | |||
@@ -3,7 +3,8 @@ | |||
3 | static const char *alias_key; | 3 | static const char *alias_key; |
4 | static char *alias_val; | 4 | static char *alias_val; |
5 | 5 | ||
6 | static int alias_lookup_cb(const char *k, const char *v, void *cb __used) | 6 | static int alias_lookup_cb(const char *k, const char *v, |
7 | void *cb __maybe_unused) | ||
7 | { | 8 | { |
8 | if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { | 9 | if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { |
9 | if (!v) | 10 | if (!v) |
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 3a282c0057d2..f0a910371377 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <pthread.h> | 17 | #include <pthread.h> |
18 | 18 | ||
19 | const char *disassembler_style; | 19 | const char *disassembler_style; |
20 | const char *objdump_path; | ||
20 | 21 | ||
21 | static struct ins *ins__find(const char *name); | 22 | static struct ins *ins__find(const char *name); |
22 | static int disasm_line__parse(char *line, char **namep, char **rawp); | 23 | static int disasm_line__parse(char *line, char **namep, char **rawp); |
@@ -312,8 +313,8 @@ static struct ins_ops dec_ops = { | |||
312 | .scnprintf = dec__scnprintf, | 313 | .scnprintf = dec__scnprintf, |
313 | }; | 314 | }; |
314 | 315 | ||
315 | static int nop__scnprintf(struct ins *ins __used, char *bf, size_t size, | 316 | static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, |
316 | struct ins_operands *ops __used) | 317 | struct ins_operands *ops __maybe_unused) |
317 | { | 318 | { |
318 | return scnprintf(bf, size, "%-6.6s", "nop"); | 319 | return scnprintf(bf, size, "%-6.6s", "nop"); |
319 | } | 320 | } |
@@ -415,7 +416,7 @@ static struct ins *ins__find(const char *name) | |||
415 | return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); | 416 | return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); |
416 | } | 417 | } |
417 | 418 | ||
418 | int symbol__annotate_init(struct map *map __used, struct symbol *sym) | 419 | int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym) |
419 | { | 420 | { |
420 | struct annotation *notes = symbol__annotation(sym); | 421 | struct annotation *notes = symbol__annotation(sym); |
421 | pthread_mutex_init(¬es->lock, NULL); | 422 | pthread_mutex_init(¬es->lock, NULL); |
@@ -820,9 +821,10 @@ fallback: | |||
820 | dso, dso->long_name, sym, sym->name); | 821 | dso, dso->long_name, sym, sym->name); |
821 | 822 | ||
822 | snprintf(command, sizeof(command), | 823 | snprintf(command, sizeof(command), |
823 | "objdump %s%s --start-address=0x%016" PRIx64 | 824 | "%s %s%s --start-address=0x%016" PRIx64 |
824 | " --stop-address=0x%016" PRIx64 | 825 | " --stop-address=0x%016" PRIx64 |
825 | " -d %s %s -C %s|grep -v %s|expand", | 826 | " -d %s %s -C %s|grep -v %s|expand", |
827 | objdump_path ? objdump_path : "objdump", | ||
826 | disassembler_style ? "-M " : "", | 828 | disassembler_style ? "-M " : "", |
827 | disassembler_style ? disassembler_style : "", | 829 | disassembler_style ? disassembler_style : "", |
828 | map__rip_2objdump(map, sym->start), | 830 | map__rip_2objdump(map, sym->start), |
@@ -982,7 +984,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | |||
982 | int context) | 984 | int context) |
983 | { | 985 | { |
984 | struct dso *dso = map->dso; | 986 | struct dso *dso = map->dso; |
985 | const char *filename = dso->long_name, *d_filename; | 987 | char *filename; |
988 | const char *d_filename; | ||
986 | struct annotation *notes = symbol__annotation(sym); | 989 | struct annotation *notes = symbol__annotation(sym); |
987 | struct disasm_line *pos, *queue = NULL; | 990 | struct disasm_line *pos, *queue = NULL; |
988 | u64 start = map__rip_2objdump(map, sym->start); | 991 | u64 start = map__rip_2objdump(map, sym->start); |
@@ -990,6 +993,10 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | |||
990 | int more = 0; | 993 | int more = 0; |
991 | u64 len; | 994 | u64 len; |
992 | 995 | ||
996 | filename = strdup(dso->long_name); | ||
997 | if (!filename) | ||
998 | return -ENOMEM; | ||
999 | |||
993 | if (full_paths) | 1000 | if (full_paths) |
994 | d_filename = filename; | 1001 | d_filename = filename; |
995 | else | 1002 | else |
@@ -1040,6 +1047,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | |||
1040 | } | 1047 | } |
1041 | } | 1048 | } |
1042 | 1049 | ||
1050 | free(filename); | ||
1051 | |||
1043 | return more; | 1052 | return more; |
1044 | } | 1053 | } |
1045 | 1054 | ||
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 78a5692dd718..9b5b21e7b032 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include "symbol.h" | 7 | #include "symbol.h" |
8 | #include <linux/list.h> | 8 | #include <linux/list.h> |
9 | #include <linux/rbtree.h> | 9 | #include <linux/rbtree.h> |
10 | #include <pthread.h> | ||
10 | 11 | ||
11 | struct ins; | 12 | struct ins; |
12 | 13 | ||
@@ -125,7 +126,7 @@ int symbol__alloc_hist(struct symbol *sym); | |||
125 | void symbol__annotate_zero_histograms(struct symbol *sym); | 126 | void symbol__annotate_zero_histograms(struct symbol *sym); |
126 | 127 | ||
127 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); | 128 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); |
128 | int symbol__annotate_init(struct map *map __used, struct symbol *sym); | 129 | int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym); |
129 | int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | 130 | int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, |
130 | bool full_paths, int min_pcnt, int max_lines, | 131 | bool full_paths, int min_pcnt, int max_lines, |
131 | int context); | 132 | int context); |
@@ -138,11 +139,12 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, | |||
138 | int max_lines); | 139 | int max_lines); |
139 | 140 | ||
140 | #ifdef NO_NEWT_SUPPORT | 141 | #ifdef NO_NEWT_SUPPORT |
141 | static inline int symbol__tui_annotate(struct symbol *sym __used, | 142 | static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, |
142 | struct map *map __used, | 143 | struct map *map __maybe_unused, |
143 | int evidx __used, | 144 | int evidx __maybe_unused, |
144 | void(*timer)(void *arg) __used, | 145 | void(*timer)(void *arg) __maybe_unused, |
145 | void *arg __used, int delay_secs __used) | 146 | void *arg __maybe_unused, |
147 | int delay_secs __maybe_unused) | ||
146 | { | 148 | { |
147 | return 0; | 149 | return 0; |
148 | } | 150 | } |
@@ -152,5 +154,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, | |||
152 | #endif | 154 | #endif |
153 | 155 | ||
154 | extern const char *disassembler_style; | 156 | extern const char *disassembler_style; |
157 | extern const char *objdump_path; | ||
155 | 158 | ||
156 | #endif /* __PERF_ANNOTATE_H */ | 159 | #endif /* __PERF_ANNOTATE_H */ |
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index fd9a5944b627..8e3a740ddbd4 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c | |||
@@ -16,10 +16,10 @@ | |||
16 | #include "session.h" | 16 | #include "session.h" |
17 | #include "tool.h" | 17 | #include "tool.h" |
18 | 18 | ||
19 | static int build_id__mark_dso_hit(struct perf_tool *tool __used, | 19 | static int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, |
20 | union perf_event *event, | 20 | union perf_event *event, |
21 | struct perf_sample *sample __used, | 21 | struct perf_sample *sample __maybe_unused, |
22 | struct perf_evsel *evsel __used, | 22 | struct perf_evsel *evsel __maybe_unused, |
23 | struct machine *machine) | 23 | struct machine *machine) |
24 | { | 24 | { |
25 | struct addr_location al; | 25 | struct addr_location al; |
@@ -41,9 +41,10 @@ static int build_id__mark_dso_hit(struct perf_tool *tool __used, | |||
41 | return 0; | 41 | return 0; |
42 | } | 42 | } |
43 | 43 | ||
44 | static int perf_event__exit_del_thread(struct perf_tool *tool __used, | 44 | static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, |
45 | union perf_event *event, | 45 | union perf_event *event, |
46 | struct perf_sample *sample __used, | 46 | struct perf_sample *sample |
47 | __maybe_unused, | ||
47 | struct machine *machine) | 48 | struct machine *machine) |
48 | { | 49 | { |
49 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); | 50 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); |
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index cff18c617d13..ab1769426541 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h | |||
@@ -39,7 +39,7 @@ static inline void setup_browser(bool fallback_to_pager) | |||
39 | if (fallback_to_pager) | 39 | if (fallback_to_pager) |
40 | setup_pager(); | 40 | setup_pager(); |
41 | } | 41 | } |
42 | static inline void exit_browser(bool wait_for_ok __used) {} | 42 | static inline void exit_browser(bool wait_for_ok __maybe_unused) {} |
43 | #else | 43 | #else |
44 | void setup_browser(bool fallback_to_pager); | 44 | void setup_browser(bool fallback_to_pager); |
45 | void exit_browser(bool wait_for_ok); | 45 | void exit_browser(bool wait_for_ok); |
@@ -49,7 +49,7 @@ static inline int ui__init(void) | |||
49 | { | 49 | { |
50 | return -1; | 50 | return -1; |
51 | } | 51 | } |
52 | static inline void ui__exit(bool wait_for_ok __used) {} | 52 | static inline void ui__exit(bool wait_for_ok __maybe_unused) {} |
53 | #else | 53 | #else |
54 | int ui__init(void); | 54 | int ui__init(void); |
55 | void ui__exit(bool wait_for_ok); | 55 | void ui__exit(bool wait_for_ok); |
@@ -60,7 +60,7 @@ static inline int perf_gtk__init(void) | |||
60 | { | 60 | { |
61 | return -1; | 61 | return -1; |
62 | } | 62 | } |
63 | static inline void perf_gtk__exit(bool wait_for_ok __used) {} | 63 | static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {} |
64 | #else | 64 | #else |
65 | int perf_gtk__init(void); | 65 | int perf_gtk__init(void); |
66 | void perf_gtk__exit(bool wait_for_ok); | 66 | void perf_gtk__exit(bool wait_for_ok); |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 3a6bff47614f..d3b3f5d82137 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -93,7 +93,7 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, | |||
93 | */ | 93 | */ |
94 | static void | 94 | static void |
95 | sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, | 95 | sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, |
96 | u64 min_hit, struct callchain_param *param __used) | 96 | u64 min_hit, struct callchain_param *param __maybe_unused) |
97 | { | 97 | { |
98 | __sort_chain_flat(rb_root, &root->node, min_hit); | 98 | __sort_chain_flat(rb_root, &root->node, min_hit); |
99 | } | 99 | } |
@@ -115,7 +115,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node, | |||
115 | 115 | ||
116 | static void | 116 | static void |
117 | sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, | 117 | sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, |
118 | u64 min_hit, struct callchain_param *param __used) | 118 | u64 min_hit, struct callchain_param *param __maybe_unused) |
119 | { | 119 | { |
120 | __sort_chain_graph_abs(&chain_root->node, min_hit); | 120 | __sort_chain_graph_abs(&chain_root->node, min_hit); |
121 | rb_root->rb_node = chain_root->node.rb_root.rb_node; | 121 | rb_root->rb_node = chain_root->node.rb_root.rb_node; |
@@ -140,7 +140,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node, | |||
140 | 140 | ||
141 | static void | 141 | static void |
142 | sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, | 142 | sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, |
143 | u64 min_hit __used, struct callchain_param *param) | 143 | u64 min_hit __maybe_unused, struct callchain_param *param) |
144 | { | 144 | { |
145 | __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); | 145 | __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); |
146 | rb_root->rb_node = chain_root->node.rb_root.rb_node; | 146 | rb_root->rb_node = chain_root->node.rb_root.rb_node; |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 3bdb407f9cd9..eb340571e7d6 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -58,7 +58,7 @@ struct callchain_list { | |||
58 | /* | 58 | /* |
59 | * A callchain cursor is a single linked list that | 59 | * A callchain cursor is a single linked list that |
60 | * let one feed a callchain progressively. | 60 | * let one feed a callchain progressively. |
61 | * It keeps persitent allocated entries to minimize | 61 | * It keeps persistent allocated entries to minimize |
62 | * allocations. | 62 | * allocations. |
63 | */ | 63 | */ |
64 | struct callchain_cursor_node { | 64 | struct callchain_cursor_node { |
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index dbe2f16b1a1a..96bbda1ddb83 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c | |||
@@ -138,8 +138,8 @@ void close_cgroup(struct cgroup_sel *cgrp) | |||
138 | } | 138 | } |
139 | } | 139 | } |
140 | 140 | ||
141 | int parse_cgroups(const struct option *opt __used, const char *str, | 141 | int parse_cgroups(const struct option *opt __maybe_unused, const char *str, |
142 | int unset __used) | 142 | int unset __maybe_unused) |
143 | { | 143 | { |
144 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | 144 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; |
145 | const char *p, *e, *eos = str + strlen(str); | 145 | const char *p, *e, *eos = str + strlen(str); |
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 6faa3a18bfbd..3e0fdd369ccb 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c | |||
@@ -342,13 +342,15 @@ const char *perf_config_dirname(const char *name, const char *value) | |||
342 | return value; | 342 | return value; |
343 | } | 343 | } |
344 | 344 | ||
345 | static int perf_default_core_config(const char *var __used, const char *value __used) | 345 | static int perf_default_core_config(const char *var __maybe_unused, |
346 | const char *value __maybe_unused) | ||
346 | { | 347 | { |
347 | /* Add other config variables here. */ | 348 | /* Add other config variables here. */ |
348 | return 0; | 349 | return 0; |
349 | } | 350 | } |
350 | 351 | ||
351 | int perf_default_config(const char *var, const char *value, void *dummy __used) | 352 | int perf_default_config(const char *var, const char *value, |
353 | void *dummy __maybe_unused) | ||
352 | { | 354 | { |
353 | if (!prefixcmp(var, "core.")) | 355 | if (!prefixcmp(var, "core.")) |
354 | return perf_default_core_config(var, value); | 356 | return perf_default_core_config(var, value); |
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index adc72f09914d..2b32ffa9ebdb 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c | |||
@@ -38,24 +38,19 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) | |||
38 | return cpus; | 38 | return cpus; |
39 | } | 39 | } |
40 | 40 | ||
41 | static struct cpu_map *cpu_map__read_all_cpu_map(void) | 41 | struct cpu_map *cpu_map__read(FILE *file) |
42 | { | 42 | { |
43 | struct cpu_map *cpus = NULL; | 43 | struct cpu_map *cpus = NULL; |
44 | FILE *onlnf; | ||
45 | int nr_cpus = 0; | 44 | int nr_cpus = 0; |
46 | int *tmp_cpus = NULL, *tmp; | 45 | int *tmp_cpus = NULL, *tmp; |
47 | int max_entries = 0; | 46 | int max_entries = 0; |
48 | int n, cpu, prev; | 47 | int n, cpu, prev; |
49 | char sep; | 48 | char sep; |
50 | 49 | ||
51 | onlnf = fopen("/sys/devices/system/cpu/online", "r"); | ||
52 | if (!onlnf) | ||
53 | return cpu_map__default_new(); | ||
54 | |||
55 | sep = 0; | 50 | sep = 0; |
56 | prev = -1; | 51 | prev = -1; |
57 | for (;;) { | 52 | for (;;) { |
58 | n = fscanf(onlnf, "%u%c", &cpu, &sep); | 53 | n = fscanf(file, "%u%c", &cpu, &sep); |
59 | if (n <= 0) | 54 | if (n <= 0) |
60 | break; | 55 | break; |
61 | if (prev >= 0) { | 56 | if (prev >= 0) { |
@@ -95,6 +90,19 @@ static struct cpu_map *cpu_map__read_all_cpu_map(void) | |||
95 | cpus = cpu_map__default_new(); | 90 | cpus = cpu_map__default_new(); |
96 | out_free_tmp: | 91 | out_free_tmp: |
97 | free(tmp_cpus); | 92 | free(tmp_cpus); |
93 | return cpus; | ||
94 | } | ||
95 | |||
96 | static struct cpu_map *cpu_map__read_all_cpu_map(void) | ||
97 | { | ||
98 | struct cpu_map *cpus = NULL; | ||
99 | FILE *onlnf; | ||
100 | |||
101 | onlnf = fopen("/sys/devices/system/cpu/online", "r"); | ||
102 | if (!onlnf) | ||
103 | return cpu_map__default_new(); | ||
104 | |||
105 | cpus = cpu_map__read(onlnf); | ||
98 | fclose(onlnf); | 106 | fclose(onlnf); |
99 | return cpus; | 107 | return cpus; |
100 | } | 108 | } |
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index c41518573c6a..2f68a3b8c285 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __PERF_CPUMAP_H | 2 | #define __PERF_CPUMAP_H |
3 | 3 | ||
4 | #include <stdio.h> | 4 | #include <stdio.h> |
5 | #include <stdbool.h> | ||
5 | 6 | ||
6 | struct cpu_map { | 7 | struct cpu_map { |
7 | int nr; | 8 | int nr; |
@@ -11,7 +12,17 @@ struct cpu_map { | |||
11 | struct cpu_map *cpu_map__new(const char *cpu_list); | 12 | struct cpu_map *cpu_map__new(const char *cpu_list); |
12 | struct cpu_map *cpu_map__dummy_new(void); | 13 | struct cpu_map *cpu_map__dummy_new(void); |
13 | void cpu_map__delete(struct cpu_map *map); | 14 | void cpu_map__delete(struct cpu_map *map); |
14 | 15 | struct cpu_map *cpu_map__read(FILE *file); | |
15 | size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); | 16 | size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); |
16 | 17 | ||
18 | static inline int cpu_map__nr(const struct cpu_map *map) | ||
19 | { | ||
20 | return map ? map->nr : 1; | ||
21 | } | ||
22 | |||
23 | static inline bool cpu_map__all(const struct cpu_map *map) | ||
24 | { | ||
25 | return map ? map->map[0] == -1 : true; | ||
26 | } | ||
27 | |||
17 | #endif /* __PERF_CPUMAP_H */ | 28 | #endif /* __PERF_CPUMAP_H */ |
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 4dfe0bb3c322..66eb3828ceb5 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c | |||
@@ -23,8 +23,10 @@ int eprintf(int level, const char *fmt, ...) | |||
23 | 23 | ||
24 | if (verbose >= level) { | 24 | if (verbose >= level) { |
25 | va_start(args, fmt); | 25 | va_start(args, fmt); |
26 | if (use_browser > 0) | 26 | if (use_browser == 1) |
27 | ret = ui_helpline__show_help(fmt, args); | 27 | ret = ui_helpline__show_help(fmt, args); |
28 | else if (use_browser == 2) | ||
29 | ret = perf_gtk__show_helpline(fmt, args); | ||
28 | else | 30 | else |
29 | ret = vfprintf(stderr, fmt, args); | 31 | ret = vfprintf(stderr, fmt, args); |
30 | va_end(args); | 32 | va_end(args); |
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 015c91dbc096..bb2e7d1007ab 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <stdbool.h> | 5 | #include <stdbool.h> |
6 | #include "event.h" | 6 | #include "event.h" |
7 | #include "../ui/helpline.h" | ||
7 | 8 | ||
8 | extern int verbose; | 9 | extern int verbose; |
9 | extern bool quiet, dump_trace; | 10 | extern bool quiet, dump_trace; |
@@ -15,32 +16,26 @@ struct ui_progress; | |||
15 | struct perf_error_ops; | 16 | struct perf_error_ops; |
16 | 17 | ||
17 | #if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT) | 18 | #if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT) |
18 | static inline int ui_helpline__show_help(const char *format __used, va_list ap __used) | 19 | static inline void ui_progress__update(u64 curr __maybe_unused, |
19 | { | 20 | u64 total __maybe_unused, |
20 | return 0; | 21 | const char *title __maybe_unused) {} |
21 | } | ||
22 | |||
23 | static inline void ui_progress__update(u64 curr __used, u64 total __used, | ||
24 | const char *title __used) {} | ||
25 | 22 | ||
26 | #define ui__error(format, arg...) ui__warning(format, ##arg) | 23 | #define ui__error(format, arg...) ui__warning(format, ##arg) |
27 | 24 | ||
28 | static inline int | 25 | static inline int |
29 | perf_error__register(struct perf_error_ops *eops __used) | 26 | perf_error__register(struct perf_error_ops *eops __maybe_unused) |
30 | { | 27 | { |
31 | return 0; | 28 | return 0; |
32 | } | 29 | } |
33 | 30 | ||
34 | static inline int | 31 | static inline int |
35 | perf_error__unregister(struct perf_error_ops *eops __used) | 32 | perf_error__unregister(struct perf_error_ops *eops __maybe_unused) |
36 | { | 33 | { |
37 | return 0; | 34 | return 0; |
38 | } | 35 | } |
39 | 36 | ||
40 | #else /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */ | 37 | #else /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */ |
41 | 38 | ||
42 | extern char ui_helpline__last_msg[]; | ||
43 | int ui_helpline__show_help(const char *format, va_list ap); | ||
44 | #include "../ui/progress.h" | 39 | #include "../ui/progress.h" |
45 | int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); | 40 | int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); |
46 | #include "../ui/util.h" | 41 | #include "../ui/util.h" |
diff --git a/tools/perf/util/dso-test-data.c b/tools/perf/util/dso-test-data.c index 541cdc72c7df..c6caedeb1d6b 100644 --- a/tools/perf/util/dso-test-data.c +++ b/tools/perf/util/dso-test-data.c | |||
@@ -23,7 +23,7 @@ static char *test_file(int size) | |||
23 | int fd, i; | 23 | int fd, i; |
24 | unsigned char *buf; | 24 | unsigned char *buf; |
25 | 25 | ||
26 | fd = mkostemp(templ, O_CREAT|O_WRONLY|O_TRUNC); | 26 | fd = mkstemp(templ); |
27 | 27 | ||
28 | buf = malloc(size); | 28 | buf = malloc(size); |
29 | if (!buf) { | 29 | if (!buf) { |
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index ee51e9b4dc09..3e5f5430a28a 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c | |||
@@ -804,6 +804,8 @@ int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) | |||
804 | tmp = "union "; | 804 | tmp = "union "; |
805 | else if (tag == DW_TAG_structure_type) | 805 | else if (tag == DW_TAG_structure_type) |
806 | tmp = "struct "; | 806 | tmp = "struct "; |
807 | else if (tag == DW_TAG_enumeration_type) | ||
808 | tmp = "enum "; | ||
807 | /* Write a base name */ | 809 | /* Write a base name */ |
808 | ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type)); | 810 | ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type)); |
809 | return (ret >= len) ? -E2BIG : ret; | 811 | return (ret >= len) ? -E2BIG : ret; |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 2a6f33cd888c..6715b1938725 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -112,7 +112,7 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, | |||
112 | event->comm.header.type = PERF_RECORD_COMM; | 112 | event->comm.header.type = PERF_RECORD_COMM; |
113 | 113 | ||
114 | size = strlen(event->comm.comm) + 1; | 114 | size = strlen(event->comm.comm) + 1; |
115 | size = ALIGN(size, sizeof(u64)); | 115 | size = PERF_ALIGN(size, sizeof(u64)); |
116 | memset(event->comm.comm + size, 0, machine->id_hdr_size); | 116 | memset(event->comm.comm + size, 0, machine->id_hdr_size); |
117 | event->comm.header.size = (sizeof(event->comm) - | 117 | event->comm.header.size = (sizeof(event->comm) - |
118 | (sizeof(event->comm.comm) - size) + | 118 | (sizeof(event->comm.comm) - size) + |
@@ -120,7 +120,9 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, | |||
120 | if (!full) { | 120 | if (!full) { |
121 | event->comm.tid = pid; | 121 | event->comm.tid = pid; |
122 | 122 | ||
123 | process(tool, event, &synth_sample, machine); | 123 | if (process(tool, event, &synth_sample, machine) != 0) |
124 | return -1; | ||
125 | |||
124 | goto out; | 126 | goto out; |
125 | } | 127 | } |
126 | 128 | ||
@@ -143,7 +145,7 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, | |||
143 | sizeof(event->comm.comm)); | 145 | sizeof(event->comm.comm)); |
144 | 146 | ||
145 | size = strlen(event->comm.comm) + 1; | 147 | size = strlen(event->comm.comm) + 1; |
146 | size = ALIGN(size, sizeof(u64)); | 148 | size = PERF_ALIGN(size, sizeof(u64)); |
147 | memset(event->comm.comm + size, 0, machine->id_hdr_size); | 149 | memset(event->comm.comm + size, 0, machine->id_hdr_size); |
148 | event->comm.header.size = (sizeof(event->comm) - | 150 | event->comm.header.size = (sizeof(event->comm) - |
149 | (sizeof(event->comm.comm) - size) + | 151 | (sizeof(event->comm.comm) - size) + |
@@ -151,7 +153,10 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, | |||
151 | 153 | ||
152 | event->comm.tid = pid; | 154 | event->comm.tid = pid; |
153 | 155 | ||
154 | process(tool, event, &synth_sample, machine); | 156 | if (process(tool, event, &synth_sample, machine) != 0) { |
157 | tgid = -1; | ||
158 | break; | ||
159 | } | ||
155 | } | 160 | } |
156 | 161 | ||
157 | closedir(tasks); | 162 | closedir(tasks); |
@@ -167,6 +172,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, | |||
167 | { | 172 | { |
168 | char filename[PATH_MAX]; | 173 | char filename[PATH_MAX]; |
169 | FILE *fp; | 174 | FILE *fp; |
175 | int rc = 0; | ||
170 | 176 | ||
171 | snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); | 177 | snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); |
172 | 178 | ||
@@ -222,7 +228,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, | |||
222 | size = strlen(execname); | 228 | size = strlen(execname); |
223 | execname[size - 1] = '\0'; /* Remove \n */ | 229 | execname[size - 1] = '\0'; /* Remove \n */ |
224 | memcpy(event->mmap.filename, execname, size); | 230 | memcpy(event->mmap.filename, execname, size); |
225 | size = ALIGN(size, sizeof(u64)); | 231 | size = PERF_ALIGN(size, sizeof(u64)); |
226 | event->mmap.len -= event->mmap.start; | 232 | event->mmap.len -= event->mmap.start; |
227 | event->mmap.header.size = (sizeof(event->mmap) - | 233 | event->mmap.header.size = (sizeof(event->mmap) - |
228 | (sizeof(event->mmap.filename) - size)); | 234 | (sizeof(event->mmap.filename) - size)); |
@@ -231,18 +237,22 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, | |||
231 | event->mmap.pid = tgid; | 237 | event->mmap.pid = tgid; |
232 | event->mmap.tid = pid; | 238 | event->mmap.tid = pid; |
233 | 239 | ||
234 | process(tool, event, &synth_sample, machine); | 240 | if (process(tool, event, &synth_sample, machine) != 0) { |
241 | rc = -1; | ||
242 | break; | ||
243 | } | ||
235 | } | 244 | } |
236 | } | 245 | } |
237 | 246 | ||
238 | fclose(fp); | 247 | fclose(fp); |
239 | return 0; | 248 | return rc; |
240 | } | 249 | } |
241 | 250 | ||
242 | int perf_event__synthesize_modules(struct perf_tool *tool, | 251 | int perf_event__synthesize_modules(struct perf_tool *tool, |
243 | perf_event__handler_t process, | 252 | perf_event__handler_t process, |
244 | struct machine *machine) | 253 | struct machine *machine) |
245 | { | 254 | { |
255 | int rc = 0; | ||
246 | struct rb_node *nd; | 256 | struct rb_node *nd; |
247 | struct map_groups *kmaps = &machine->kmaps; | 257 | struct map_groups *kmaps = &machine->kmaps; |
248 | union perf_event *event = zalloc((sizeof(event->mmap) + | 258 | union perf_event *event = zalloc((sizeof(event->mmap) + |
@@ -272,7 +282,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, | |||
272 | if (pos->dso->kernel) | 282 | if (pos->dso->kernel) |
273 | continue; | 283 | continue; |
274 | 284 | ||
275 | size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); | 285 | size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); |
276 | event->mmap.header.type = PERF_RECORD_MMAP; | 286 | event->mmap.header.type = PERF_RECORD_MMAP; |
277 | event->mmap.header.size = (sizeof(event->mmap) - | 287 | event->mmap.header.size = (sizeof(event->mmap) - |
278 | (sizeof(event->mmap.filename) - size)); | 288 | (sizeof(event->mmap.filename) - size)); |
@@ -284,11 +294,14 @@ int perf_event__synthesize_modules(struct perf_tool *tool, | |||
284 | 294 | ||
285 | memcpy(event->mmap.filename, pos->dso->long_name, | 295 | memcpy(event->mmap.filename, pos->dso->long_name, |
286 | pos->dso->long_name_len + 1); | 296 | pos->dso->long_name_len + 1); |
287 | process(tool, event, &synth_sample, machine); | 297 | if (process(tool, event, &synth_sample, machine) != 0) { |
298 | rc = -1; | ||
299 | break; | ||
300 | } | ||
288 | } | 301 | } |
289 | 302 | ||
290 | free(event); | 303 | free(event); |
291 | return 0; | 304 | return rc; |
292 | } | 305 | } |
293 | 306 | ||
294 | static int __event__synthesize_thread(union perf_event *comm_event, | 307 | static int __event__synthesize_thread(union perf_event *comm_event, |
@@ -392,12 +405,16 @@ int perf_event__synthesize_threads(struct perf_tool *tool, | |||
392 | if (*end) /* only interested in proper numerical dirents */ | 405 | if (*end) /* only interested in proper numerical dirents */ |
393 | continue; | 406 | continue; |
394 | 407 | ||
395 | __event__synthesize_thread(comm_event, mmap_event, pid, 1, | 408 | if (__event__synthesize_thread(comm_event, mmap_event, pid, 1, |
396 | process, tool, machine); | 409 | process, tool, machine) != 0) { |
410 | err = -1; | ||
411 | goto out_closedir; | ||
412 | } | ||
397 | } | 413 | } |
398 | 414 | ||
399 | closedir(proc); | ||
400 | err = 0; | 415 | err = 0; |
416 | out_closedir: | ||
417 | closedir(proc); | ||
401 | out_free_mmap: | 418 | out_free_mmap: |
402 | free(mmap_event); | 419 | free(mmap_event); |
403 | out_free_comm: | 420 | out_free_comm: |
@@ -412,7 +429,7 @@ struct process_symbol_args { | |||
412 | }; | 429 | }; |
413 | 430 | ||
414 | static int find_symbol_cb(void *arg, const char *name, char type, | 431 | static int find_symbol_cb(void *arg, const char *name, char type, |
415 | u64 start, u64 end __used) | 432 | u64 start) |
416 | { | 433 | { |
417 | struct process_symbol_args *args = arg; | 434 | struct process_symbol_args *args = arg; |
418 | 435 | ||
@@ -477,7 +494,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, | |||
477 | map = machine->vmlinux_maps[MAP__FUNCTION]; | 494 | map = machine->vmlinux_maps[MAP__FUNCTION]; |
478 | size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), | 495 | size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), |
479 | "%s%s", mmap_name, symbol_name) + 1; | 496 | "%s%s", mmap_name, symbol_name) + 1; |
480 | size = ALIGN(size, sizeof(u64)); | 497 | size = PERF_ALIGN(size, sizeof(u64)); |
481 | event->mmap.header.type = PERF_RECORD_MMAP; | 498 | event->mmap.header.type = PERF_RECORD_MMAP; |
482 | event->mmap.header.size = (sizeof(event->mmap) - | 499 | event->mmap.header.size = (sizeof(event->mmap) - |
483 | (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); | 500 | (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); |
@@ -497,9 +514,9 @@ size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) | |||
497 | return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); | 514 | return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); |
498 | } | 515 | } |
499 | 516 | ||
500 | int perf_event__process_comm(struct perf_tool *tool __used, | 517 | int perf_event__process_comm(struct perf_tool *tool __maybe_unused, |
501 | union perf_event *event, | 518 | union perf_event *event, |
502 | struct perf_sample *sample __used, | 519 | struct perf_sample *sample __maybe_unused, |
503 | struct machine *machine) | 520 | struct machine *machine) |
504 | { | 521 | { |
505 | struct thread *thread = machine__findnew_thread(machine, event->comm.tid); | 522 | struct thread *thread = machine__findnew_thread(machine, event->comm.tid); |
@@ -515,10 +532,10 @@ int perf_event__process_comm(struct perf_tool *tool __used, | |||
515 | return 0; | 532 | return 0; |
516 | } | 533 | } |
517 | 534 | ||
518 | int perf_event__process_lost(struct perf_tool *tool __used, | 535 | int perf_event__process_lost(struct perf_tool *tool __maybe_unused, |
519 | union perf_event *event, | 536 | union perf_event *event, |
520 | struct perf_sample *sample __used, | 537 | struct perf_sample *sample __maybe_unused, |
521 | struct machine *machine __used) | 538 | struct machine *machine __maybe_unused) |
522 | { | 539 | { |
523 | dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", | 540 | dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", |
524 | event->lost.id, event->lost.lost); | 541 | event->lost.id, event->lost.lost); |
@@ -538,7 +555,8 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event, | |||
538 | maps[MAP__FUNCTION]->end = ~0ULL; | 555 | maps[MAP__FUNCTION]->end = ~0ULL; |
539 | } | 556 | } |
540 | 557 | ||
541 | static int perf_event__process_kernel_mmap(struct perf_tool *tool __used, | 558 | static int perf_event__process_kernel_mmap(struct perf_tool *tool |
559 | __maybe_unused, | ||
542 | union perf_event *event, | 560 | union perf_event *event, |
543 | struct machine *machine) | 561 | struct machine *machine) |
544 | { | 562 | { |
@@ -640,7 +658,7 @@ size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) | |||
640 | 658 | ||
641 | int perf_event__process_mmap(struct perf_tool *tool, | 659 | int perf_event__process_mmap(struct perf_tool *tool, |
642 | union perf_event *event, | 660 | union perf_event *event, |
643 | struct perf_sample *sample __used, | 661 | struct perf_sample *sample __maybe_unused, |
644 | struct machine *machine) | 662 | struct machine *machine) |
645 | { | 663 | { |
646 | struct thread *thread; | 664 | struct thread *thread; |
@@ -684,9 +702,9 @@ size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) | |||
684 | event->fork.ppid, event->fork.ptid); | 702 | event->fork.ppid, event->fork.ptid); |
685 | } | 703 | } |
686 | 704 | ||
687 | int perf_event__process_task(struct perf_tool *tool __used, | 705 | int perf_event__process_task(struct perf_tool *tool __maybe_unused, |
688 | union perf_event *event, | 706 | union perf_event *event, |
689 | struct perf_sample *sample __used, | 707 | struct perf_sample *sample __maybe_unused, |
690 | struct machine *machine) | 708 | struct machine *machine) |
691 | { | 709 | { |
692 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); | 710 | struct thread *thread = machine__findnew_thread(machine, event->fork.tid); |
@@ -886,8 +904,9 @@ int perf_event__preprocess_sample(const union perf_event *event, | |||
886 | al->sym = map__find_symbol(al->map, al->addr, filter); | 904 | al->sym = map__find_symbol(al->map, al->addr, filter); |
887 | } | 905 | } |
888 | 906 | ||
889 | if (symbol_conf.sym_list && al->sym && | 907 | if (symbol_conf.sym_list && |
890 | !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) | 908 | (!al->sym || !strlist__has_entry(symbol_conf.sym_list, |
909 | al->sym->name))) | ||
891 | goto out_filtered; | 910 | goto out_filtered; |
892 | 911 | ||
893 | return 0; | 912 | return 0; |
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index d84870b06426..21b99e741a87 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -69,6 +69,16 @@ struct sample_event { | |||
69 | u64 array[]; | 69 | u64 array[]; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | struct regs_dump { | ||
73 | u64 *regs; | ||
74 | }; | ||
75 | |||
76 | struct stack_dump { | ||
77 | u16 offset; | ||
78 | u64 size; | ||
79 | char *data; | ||
80 | }; | ||
81 | |||
72 | struct perf_sample { | 82 | struct perf_sample { |
73 | u64 ip; | 83 | u64 ip; |
74 | u32 pid, tid; | 84 | u32 pid, tid; |
@@ -82,6 +92,8 @@ struct perf_sample { | |||
82 | void *raw_data; | 92 | void *raw_data; |
83 | struct ip_callchain *callchain; | 93 | struct ip_callchain *callchain; |
84 | struct branch_stack *branch_stack; | 94 | struct branch_stack *branch_stack; |
95 | struct regs_dump user_regs; | ||
96 | struct stack_dump user_stack; | ||
85 | }; | 97 | }; |
86 | 98 | ||
87 | #define BUILD_ID_SIZE 20 | 99 | #define BUILD_ID_SIZE 20 |
@@ -89,7 +101,7 @@ struct perf_sample { | |||
89 | struct build_id_event { | 101 | struct build_id_event { |
90 | struct perf_event_header header; | 102 | struct perf_event_header header; |
91 | pid_t pid; | 103 | pid_t pid; |
92 | u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; | 104 | u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; |
93 | char filename[]; | 105 | char filename[]; |
94 | }; | 106 | }; |
95 | 107 | ||
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 9b38681add9e..ae89686102f4 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -57,7 +57,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist, | |||
57 | if (evlist->cpus->map[0] < 0) | 57 | if (evlist->cpus->map[0] < 0) |
58 | opts->no_inherit = true; | 58 | opts->no_inherit = true; |
59 | 59 | ||
60 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | 60 | first = perf_evlist__first(evlist); |
61 | 61 | ||
62 | list_for_each_entry(evsel, &evlist->entries, node) { | 62 | list_for_each_entry(evsel, &evlist->entries, node) { |
63 | perf_evsel__config(evsel, opts, first); | 63 | perf_evsel__config(evsel, opts, first); |
@@ -108,6 +108,25 @@ void perf_evlist__splice_list_tail(struct perf_evlist *evlist, | |||
108 | evlist->nr_entries += nr_entries; | 108 | evlist->nr_entries += nr_entries; |
109 | } | 109 | } |
110 | 110 | ||
111 | void __perf_evlist__set_leader(struct list_head *list) | ||
112 | { | ||
113 | struct perf_evsel *evsel, *leader; | ||
114 | |||
115 | leader = list_entry(list->next, struct perf_evsel, node); | ||
116 | leader->leader = NULL; | ||
117 | |||
118 | list_for_each_entry(evsel, list, node) { | ||
119 | if (evsel != leader) | ||
120 | evsel->leader = leader; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | void perf_evlist__set_leader(struct perf_evlist *evlist) | ||
125 | { | ||
126 | if (evlist->nr_entries) | ||
127 | __perf_evlist__set_leader(&evlist->entries); | ||
128 | } | ||
129 | |||
111 | int perf_evlist__add_default(struct perf_evlist *evlist) | 130 | int perf_evlist__add_default(struct perf_evlist *evlist) |
112 | { | 131 | { |
113 | struct perf_event_attr attr = { | 132 | struct perf_event_attr attr = { |
@@ -285,7 +304,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) | |||
285 | int cpu, thread; | 304 | int cpu, thread; |
286 | struct perf_evsel *pos; | 305 | struct perf_evsel *pos; |
287 | 306 | ||
288 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 307 | for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { |
289 | list_for_each_entry(pos, &evlist->entries, node) { | 308 | list_for_each_entry(pos, &evlist->entries, node) { |
290 | for (thread = 0; thread < evlist->threads->nr; thread++) | 309 | for (thread = 0; thread < evlist->threads->nr; thread++) |
291 | ioctl(FD(pos, cpu, thread), | 310 | ioctl(FD(pos, cpu, thread), |
@@ -296,7 +315,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) | |||
296 | 315 | ||
297 | static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) | 316 | static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
298 | { | 317 | { |
299 | int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; | 318 | int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries; |
300 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); | 319 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); |
301 | return evlist->pollfd != NULL ? 0 : -ENOMEM; | 320 | return evlist->pollfd != NULL ? 0 : -ENOMEM; |
302 | } | 321 | } |
@@ -357,7 +376,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |||
357 | int hash; | 376 | int hash; |
358 | 377 | ||
359 | if (evlist->nr_entries == 1) | 378 | if (evlist->nr_entries == 1) |
360 | return list_entry(evlist->entries.next, struct perf_evsel, node); | 379 | return perf_evlist__first(evlist); |
361 | 380 | ||
362 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | 381 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); |
363 | head = &evlist->heads[hash]; | 382 | head = &evlist->heads[hash]; |
@@ -367,7 +386,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |||
367 | return sid->evsel; | 386 | return sid->evsel; |
368 | 387 | ||
369 | if (!perf_evlist__sample_id_all(evlist)) | 388 | if (!perf_evlist__sample_id_all(evlist)) |
370 | return list_entry(evlist->entries.next, struct perf_evsel, node); | 389 | return perf_evlist__first(evlist); |
371 | 390 | ||
372 | return NULL; | 391 | return NULL; |
373 | } | 392 | } |
@@ -456,8 +475,8 @@ void perf_evlist__munmap(struct perf_evlist *evlist) | |||
456 | 475 | ||
457 | static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) | 476 | static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
458 | { | 477 | { |
459 | evlist->nr_mmaps = evlist->cpus->nr; | 478 | evlist->nr_mmaps = cpu_map__nr(evlist->cpus); |
460 | if (evlist->cpus->map[0] == -1) | 479 | if (cpu_map__all(evlist->cpus)) |
461 | evlist->nr_mmaps = evlist->threads->nr; | 480 | evlist->nr_mmaps = evlist->threads->nr; |
462 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); | 481 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); |
463 | return evlist->mmap != NULL ? 0 : -ENOMEM; | 482 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
@@ -603,11 +622,11 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, | |||
603 | list_for_each_entry(evsel, &evlist->entries, node) { | 622 | list_for_each_entry(evsel, &evlist->entries, node) { |
604 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | 623 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && |
605 | evsel->sample_id == NULL && | 624 | evsel->sample_id == NULL && |
606 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | 625 | perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) |
607 | return -ENOMEM; | 626 | return -ENOMEM; |
608 | } | 627 | } |
609 | 628 | ||
610 | if (evlist->cpus->map[0] == -1) | 629 | if (cpu_map__all(cpus)) |
611 | return perf_evlist__mmap_per_thread(evlist, prot, mask); | 630 | return perf_evlist__mmap_per_thread(evlist, prot, mask); |
612 | 631 | ||
613 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); | 632 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); |
@@ -647,39 +666,44 @@ void perf_evlist__delete_maps(struct perf_evlist *evlist) | |||
647 | evlist->threads = NULL; | 666 | evlist->threads = NULL; |
648 | } | 667 | } |
649 | 668 | ||
650 | int perf_evlist__set_filters(struct perf_evlist *evlist) | 669 | int perf_evlist__apply_filters(struct perf_evlist *evlist) |
651 | { | 670 | { |
652 | const struct thread_map *threads = evlist->threads; | ||
653 | const struct cpu_map *cpus = evlist->cpus; | ||
654 | struct perf_evsel *evsel; | 671 | struct perf_evsel *evsel; |
655 | char *filter; | 672 | int err = 0; |
656 | int thread; | 673 | const int ncpus = cpu_map__nr(evlist->cpus), |
657 | int cpu; | 674 | nthreads = evlist->threads->nr; |
658 | int err; | ||
659 | int fd; | ||
660 | 675 | ||
661 | list_for_each_entry(evsel, &evlist->entries, node) { | 676 | list_for_each_entry(evsel, &evlist->entries, node) { |
662 | filter = evsel->filter; | 677 | if (evsel->filter == NULL) |
663 | if (!filter) | ||
664 | continue; | 678 | continue; |
665 | for (cpu = 0; cpu < cpus->nr; cpu++) { | 679 | |
666 | for (thread = 0; thread < threads->nr; thread++) { | 680 | err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); |
667 | fd = FD(evsel, cpu, thread); | 681 | if (err) |
668 | err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); | 682 | break; |
669 | if (err) | ||
670 | return err; | ||
671 | } | ||
672 | } | ||
673 | } | 683 | } |
674 | 684 | ||
675 | return 0; | 685 | return err; |
676 | } | 686 | } |
677 | 687 | ||
678 | bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist) | 688 | int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) |
679 | { | 689 | { |
680 | struct perf_evsel *pos, *first; | 690 | struct perf_evsel *evsel; |
691 | int err = 0; | ||
692 | const int ncpus = cpu_map__nr(evlist->cpus), | ||
693 | nthreads = evlist->threads->nr; | ||
694 | |||
695 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
696 | err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); | ||
697 | if (err) | ||
698 | break; | ||
699 | } | ||
700 | |||
701 | return err; | ||
702 | } | ||
681 | 703 | ||
682 | pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); | 704 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) |
705 | { | ||
706 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; | ||
683 | 707 | ||
684 | list_for_each_entry_continue(pos, &evlist->entries, node) { | 708 | list_for_each_entry_continue(pos, &evlist->entries, node) { |
685 | if (first->attr.sample_type != pos->attr.sample_type) | 709 | if (first->attr.sample_type != pos->attr.sample_type) |
@@ -689,23 +713,19 @@ bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist) | |||
689 | return true; | 713 | return true; |
690 | } | 714 | } |
691 | 715 | ||
692 | u64 perf_evlist__sample_type(const struct perf_evlist *evlist) | 716 | u64 perf_evlist__sample_type(struct perf_evlist *evlist) |
693 | { | 717 | { |
694 | struct perf_evsel *first; | 718 | struct perf_evsel *first = perf_evlist__first(evlist); |
695 | |||
696 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
697 | return first->attr.sample_type; | 719 | return first->attr.sample_type; |
698 | } | 720 | } |
699 | 721 | ||
700 | u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist) | 722 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) |
701 | { | 723 | { |
702 | struct perf_evsel *first; | 724 | struct perf_evsel *first = perf_evlist__first(evlist); |
703 | struct perf_sample *data; | 725 | struct perf_sample *data; |
704 | u64 sample_type; | 726 | u64 sample_type; |
705 | u16 size = 0; | 727 | u16 size = 0; |
706 | 728 | ||
707 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
708 | |||
709 | if (!first->attr.sample_id_all) | 729 | if (!first->attr.sample_id_all) |
710 | goto out; | 730 | goto out; |
711 | 731 | ||
@@ -729,11 +749,9 @@ out: | |||
729 | return size; | 749 | return size; |
730 | } | 750 | } |
731 | 751 | ||
732 | bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) | 752 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) |
733 | { | 753 | { |
734 | struct perf_evsel *pos, *first; | 754 | struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
735 | |||
736 | pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
737 | 755 | ||
738 | list_for_each_entry_continue(pos, &evlist->entries, node) { | 756 | list_for_each_entry_continue(pos, &evlist->entries, node) { |
739 | if (first->attr.sample_id_all != pos->attr.sample_id_all) | 757 | if (first->attr.sample_id_all != pos->attr.sample_id_all) |
@@ -743,11 +761,9 @@ bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) | |||
743 | return true; | 761 | return true; |
744 | } | 762 | } |
745 | 763 | ||
746 | bool perf_evlist__sample_id_all(const struct perf_evlist *evlist) | 764 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist) |
747 | { | 765 | { |
748 | struct perf_evsel *first; | 766 | struct perf_evsel *first = perf_evlist__first(evlist); |
749 | |||
750 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
751 | return first->attr.sample_id_all; | 767 | return first->attr.sample_id_all; |
752 | } | 768 | } |
753 | 769 | ||
@@ -757,21 +773,13 @@ void perf_evlist__set_selected(struct perf_evlist *evlist, | |||
757 | evlist->selected = evsel; | 773 | evlist->selected = evsel; |
758 | } | 774 | } |
759 | 775 | ||
760 | int perf_evlist__open(struct perf_evlist *evlist, bool group) | 776 | int perf_evlist__open(struct perf_evlist *evlist) |
761 | { | 777 | { |
762 | struct perf_evsel *evsel, *first; | 778 | struct perf_evsel *evsel; |
763 | int err, ncpus, nthreads; | 779 | int err, ncpus, nthreads; |
764 | 780 | ||
765 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
766 | |||
767 | list_for_each_entry(evsel, &evlist->entries, node) { | 781 | list_for_each_entry(evsel, &evlist->entries, node) { |
768 | struct xyarray *group_fd = NULL; | 782 | err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); |
769 | |||
770 | if (group && evsel != first) | ||
771 | group_fd = first->fd; | ||
772 | |||
773 | err = perf_evsel__open(evsel, evlist->cpus, evlist->threads, | ||
774 | group, group_fd); | ||
775 | if (err < 0) | 783 | if (err < 0) |
776 | goto out_err; | 784 | goto out_err; |
777 | } | 785 | } |
@@ -883,8 +891,21 @@ int perf_evlist__start_workload(struct perf_evlist *evlist) | |||
883 | } | 891 | } |
884 | 892 | ||
885 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, | 893 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
886 | struct perf_sample *sample, bool swapped) | 894 | struct perf_sample *sample) |
895 | { | ||
896 | struct perf_evsel *evsel = perf_evlist__first(evlist); | ||
897 | return perf_evsel__parse_sample(evsel, event, sample); | ||
898 | } | ||
899 | |||
900 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) | ||
887 | { | 901 | { |
888 | struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node); | 902 | struct perf_evsel *evsel; |
889 | return perf_evsel__parse_sample(e, event, sample, swapped); | 903 | size_t printed = 0; |
904 | |||
905 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
906 | printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", | ||
907 | perf_evsel__name(evsel)); | ||
908 | } | ||
909 | |||
910 | return printed + fprintf(fp, "\n");; | ||
890 | } | 911 | } |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 528c1acd9298..3f1fb66be022 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <stdio.h> | 5 | #include <stdio.h> |
6 | #include "../perf.h" | 6 | #include "../perf.h" |
7 | #include "event.h" | 7 | #include "event.h" |
8 | #include "evsel.h" | ||
8 | #include "util.h" | 9 | #include "util.h" |
9 | #include <unistd.h> | 10 | #include <unistd.h> |
10 | 11 | ||
@@ -41,8 +42,6 @@ struct perf_evsel_str_handler { | |||
41 | void *handler; | 42 | void *handler; |
42 | }; | 43 | }; |
43 | 44 | ||
44 | struct perf_evsel; | ||
45 | |||
46 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, | 45 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, |
47 | struct thread_map *threads); | 46 | struct thread_map *threads); |
48 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, | 47 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
@@ -73,6 +72,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, | |||
73 | #define perf_evlist__set_tracepoints_handlers_array(evlist, array) \ | 72 | #define perf_evlist__set_tracepoints_handlers_array(evlist, array) \ |
74 | perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array)) | 73 | perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array)) |
75 | 74 | ||
75 | int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); | ||
76 | |||
76 | struct perf_evsel * | 77 | struct perf_evsel * |
77 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); | 78 | perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); |
78 | 79 | ||
@@ -85,7 +86,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); | |||
85 | 86 | ||
86 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); | 87 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); |
87 | 88 | ||
88 | int perf_evlist__open(struct perf_evlist *evlist, bool group); | 89 | int perf_evlist__open(struct perf_evlist *evlist); |
89 | 90 | ||
90 | void perf_evlist__config_attrs(struct perf_evlist *evlist, | 91 | void perf_evlist__config_attrs(struct perf_evlist *evlist, |
91 | struct perf_record_opts *opts); | 92 | struct perf_record_opts *opts); |
@@ -116,20 +117,34 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist, | |||
116 | int perf_evlist__create_maps(struct perf_evlist *evlist, | 117 | int perf_evlist__create_maps(struct perf_evlist *evlist, |
117 | struct perf_target *target); | 118 | struct perf_target *target); |
118 | void perf_evlist__delete_maps(struct perf_evlist *evlist); | 119 | void perf_evlist__delete_maps(struct perf_evlist *evlist); |
119 | int perf_evlist__set_filters(struct perf_evlist *evlist); | 120 | int perf_evlist__apply_filters(struct perf_evlist *evlist); |
121 | |||
122 | void __perf_evlist__set_leader(struct list_head *list); | ||
123 | void perf_evlist__set_leader(struct perf_evlist *evlist); | ||
120 | 124 | ||
121 | u64 perf_evlist__sample_type(const struct perf_evlist *evlist); | 125 | u64 perf_evlist__sample_type(struct perf_evlist *evlist); |
122 | bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist); | 126 | bool perf_evlist__sample_id_all(struct perf_evlist *evlist); |
123 | u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist); | 127 | u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); |
124 | 128 | ||
125 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, | 129 | int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
126 | struct perf_sample *sample, bool swapped); | 130 | struct perf_sample *sample); |
127 | 131 | ||
128 | bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist); | 132 | bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); |
129 | bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist); | 133 | bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); |
130 | 134 | ||
131 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, | 135 | void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
132 | struct list_head *list, | 136 | struct list_head *list, |
133 | int nr_entries); | 137 | int nr_entries); |
134 | 138 | ||
139 | static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) | ||
140 | { | ||
141 | return list_entry(evlist->entries.next, struct perf_evsel, node); | ||
142 | } | ||
143 | |||
144 | static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) | ||
145 | { | ||
146 | return list_entry(evlist->entries.prev, struct perf_evsel, node); | ||
147 | } | ||
148 | |||
149 | size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); | ||
135 | #endif /* __PERF_EVLIST_H */ | 150 | #endif /* __PERF_EVLIST_H */ |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 2eaae140def2..ffdd94e9c9c3 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -8,7 +8,10 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <byteswap.h> | 10 | #include <byteswap.h> |
11 | #include <linux/bitops.h> | ||
11 | #include "asm/bug.h" | 12 | #include "asm/bug.h" |
13 | #include "debugfs.h" | ||
14 | #include "event-parse.h" | ||
12 | #include "evsel.h" | 15 | #include "evsel.h" |
13 | #include "evlist.h" | 16 | #include "evlist.h" |
14 | #include "util.h" | 17 | #include "util.h" |
@@ -16,9 +19,10 @@ | |||
16 | #include "thread_map.h" | 19 | #include "thread_map.h" |
17 | #include "target.h" | 20 | #include "target.h" |
18 | #include "../../../include/linux/hw_breakpoint.h" | 21 | #include "../../../include/linux/hw_breakpoint.h" |
22 | #include "../../include/linux/perf_event.h" | ||
23 | #include "perf_regs.h" | ||
19 | 24 | ||
20 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | 25 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
21 | #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0)) | ||
22 | 26 | ||
23 | static int __perf_evsel__sample_size(u64 sample_type) | 27 | static int __perf_evsel__sample_size(u64 sample_type) |
24 | { | 28 | { |
@@ -66,7 +70,80 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) | |||
66 | return evsel; | 70 | return evsel; |
67 | } | 71 | } |
68 | 72 | ||
69 | static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { | 73 | struct event_format *event_format__new(const char *sys, const char *name) |
74 | { | ||
75 | int fd, n; | ||
76 | char *filename; | ||
77 | void *bf = NULL, *nbf; | ||
78 | size_t size = 0, alloc_size = 0; | ||
79 | struct event_format *format = NULL; | ||
80 | |||
81 | if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0) | ||
82 | goto out; | ||
83 | |||
84 | fd = open(filename, O_RDONLY); | ||
85 | if (fd < 0) | ||
86 | goto out_free_filename; | ||
87 | |||
88 | do { | ||
89 | if (size == alloc_size) { | ||
90 | alloc_size += BUFSIZ; | ||
91 | nbf = realloc(bf, alloc_size); | ||
92 | if (nbf == NULL) | ||
93 | goto out_free_bf; | ||
94 | bf = nbf; | ||
95 | } | ||
96 | |||
97 | n = read(fd, bf + size, BUFSIZ); | ||
98 | if (n < 0) | ||
99 | goto out_free_bf; | ||
100 | size += n; | ||
101 | } while (n > 0); | ||
102 | |||
103 | pevent_parse_format(&format, bf, size, sys); | ||
104 | |||
105 | out_free_bf: | ||
106 | free(bf); | ||
107 | close(fd); | ||
108 | out_free_filename: | ||
109 | free(filename); | ||
110 | out: | ||
111 | return format; | ||
112 | } | ||
113 | |||
114 | struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) | ||
115 | { | ||
116 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | ||
117 | |||
118 | if (evsel != NULL) { | ||
119 | struct perf_event_attr attr = { | ||
120 | .type = PERF_TYPE_TRACEPOINT, | ||
121 | .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | | ||
122 | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), | ||
123 | }; | ||
124 | |||
125 | if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) | ||
126 | goto out_free; | ||
127 | |||
128 | evsel->tp_format = event_format__new(sys, name); | ||
129 | if (evsel->tp_format == NULL) | ||
130 | goto out_free; | ||
131 | |||
132 | event_attr_init(&attr); | ||
133 | attr.config = evsel->tp_format->id; | ||
134 | attr.sample_period = 1; | ||
135 | perf_evsel__init(evsel, &attr, idx); | ||
136 | } | ||
137 | |||
138 | return evsel; | ||
139 | |||
140 | out_free: | ||
141 | free(evsel->name); | ||
142 | free(evsel); | ||
143 | return NULL; | ||
144 | } | ||
145 | |||
146 | const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { | ||
70 | "cycles", | 147 | "cycles", |
71 | "instructions", | 148 | "instructions", |
72 | "cache-references", | 149 | "cache-references", |
@@ -129,12 +206,12 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) | |||
129 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | 206 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); |
130 | } | 207 | } |
131 | 208 | ||
132 | static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { | 209 | const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { |
133 | "cpu-clock", | 210 | "cpu-clock", |
134 | "task-clock", | 211 | "task-clock", |
135 | "page-faults", | 212 | "page-faults", |
136 | "context-switches", | 213 | "context-switches", |
137 | "CPU-migrations", | 214 | "cpu-migrations", |
138 | "minor-faults", | 215 | "minor-faults", |
139 | "major-faults", | 216 | "major-faults", |
140 | "alignment-faults", | 217 | "alignment-faults", |
@@ -317,7 +394,8 @@ const char *perf_evsel__name(struct perf_evsel *evsel) | |||
317 | break; | 394 | break; |
318 | 395 | ||
319 | default: | 396 | default: |
320 | scnprintf(bf, sizeof(bf), "%s", "unknown attr type"); | 397 | scnprintf(bf, sizeof(bf), "unknown attr type: %d", |
398 | evsel->attr.type); | ||
321 | break; | 399 | break; |
322 | } | 400 | } |
323 | 401 | ||
@@ -367,9 +445,18 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, | |||
367 | attr->mmap_data = track; | 445 | attr->mmap_data = track; |
368 | } | 446 | } |
369 | 447 | ||
370 | if (opts->call_graph) | 448 | if (opts->call_graph) { |
371 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; | 449 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; |
372 | 450 | ||
451 | if (opts->call_graph == CALLCHAIN_DWARF) { | ||
452 | attr->sample_type |= PERF_SAMPLE_REGS_USER | | ||
453 | PERF_SAMPLE_STACK_USER; | ||
454 | attr->sample_regs_user = PERF_REGS_MASK; | ||
455 | attr->sample_stack_user = opts->stack_dump_size; | ||
456 | attr->exclude_callchain_user = 1; | ||
457 | } | ||
458 | } | ||
459 | |||
373 | if (perf_target__has_cpu(&opts->target)) | 460 | if (perf_target__has_cpu(&opts->target)) |
374 | attr->sample_type |= PERF_SAMPLE_CPU; | 461 | attr->sample_type |= PERF_SAMPLE_CPU; |
375 | 462 | ||
@@ -421,6 +508,24 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | |||
421 | return evsel->fd != NULL ? 0 : -ENOMEM; | 508 | return evsel->fd != NULL ? 0 : -ENOMEM; |
422 | } | 509 | } |
423 | 510 | ||
511 | int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, | ||
512 | const char *filter) | ||
513 | { | ||
514 | int cpu, thread; | ||
515 | |||
516 | for (cpu = 0; cpu < ncpus; cpu++) { | ||
517 | for (thread = 0; thread < nthreads; thread++) { | ||
518 | int fd = FD(evsel, cpu, thread), | ||
519 | err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); | ||
520 | |||
521 | if (err) | ||
522 | return err; | ||
523 | } | ||
524 | } | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | |||
424 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) | 529 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) |
425 | { | 530 | { |
426 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); | 531 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); |
@@ -481,6 +586,9 @@ void perf_evsel__delete(struct perf_evsel *evsel) | |||
481 | { | 586 | { |
482 | perf_evsel__exit(evsel); | 587 | perf_evsel__exit(evsel); |
483 | close_cgroup(evsel->cgrp); | 588 | close_cgroup(evsel->cgrp); |
589 | free(evsel->group_name); | ||
590 | if (evsel->tp_format) | ||
591 | pevent_free_format(evsel->tp_format); | ||
484 | free(evsel->name); | 592 | free(evsel->name); |
485 | free(evsel); | 593 | free(evsel); |
486 | } | 594 | } |
@@ -556,9 +664,28 @@ int __perf_evsel__read(struct perf_evsel *evsel, | |||
556 | return 0; | 664 | return 0; |
557 | } | 665 | } |
558 | 666 | ||
667 | static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) | ||
668 | { | ||
669 | struct perf_evsel *leader = evsel->leader; | ||
670 | int fd; | ||
671 | |||
672 | if (!leader) | ||
673 | return -1; | ||
674 | |||
675 | /* | ||
676 | * Leader must be already processed/open, | ||
677 | * if not it's a bug. | ||
678 | */ | ||
679 | BUG_ON(!leader->fd); | ||
680 | |||
681 | fd = FD(leader, cpu, thread); | ||
682 | BUG_ON(fd == -1); | ||
683 | |||
684 | return fd; | ||
685 | } | ||
686 | |||
559 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 687 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
560 | struct thread_map *threads, bool group, | 688 | struct thread_map *threads) |
561 | struct xyarray *group_fds) | ||
562 | { | 689 | { |
563 | int cpu, thread; | 690 | int cpu, thread; |
564 | unsigned long flags = 0; | 691 | unsigned long flags = 0; |
@@ -574,13 +701,15 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | |||
574 | } | 701 | } |
575 | 702 | ||
576 | for (cpu = 0; cpu < cpus->nr; cpu++) { | 703 | for (cpu = 0; cpu < cpus->nr; cpu++) { |
577 | int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1; | ||
578 | 704 | ||
579 | for (thread = 0; thread < threads->nr; thread++) { | 705 | for (thread = 0; thread < threads->nr; thread++) { |
706 | int group_fd; | ||
580 | 707 | ||
581 | if (!evsel->cgrp) | 708 | if (!evsel->cgrp) |
582 | pid = threads->map[thread]; | 709 | pid = threads->map[thread]; |
583 | 710 | ||
711 | group_fd = get_group_fd(evsel, cpu, thread); | ||
712 | |||
584 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, | 713 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, |
585 | pid, | 714 | pid, |
586 | cpus->map[cpu], | 715 | cpus->map[cpu], |
@@ -589,9 +718,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | |||
589 | err = -errno; | 718 | err = -errno; |
590 | goto out_close; | 719 | goto out_close; |
591 | } | 720 | } |
592 | |||
593 | if (group && group_fd == -1) | ||
594 | group_fd = FD(evsel, cpu, thread); | ||
595 | } | 721 | } |
596 | } | 722 | } |
597 | 723 | ||
@@ -635,8 +761,7 @@ static struct { | |||
635 | }; | 761 | }; |
636 | 762 | ||
637 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 763 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
638 | struct thread_map *threads, bool group, | 764 | struct thread_map *threads) |
639 | struct xyarray *group_fd) | ||
640 | { | 765 | { |
641 | if (cpus == NULL) { | 766 | if (cpus == NULL) { |
642 | /* Work around old compiler warnings about strict aliasing */ | 767 | /* Work around old compiler warnings about strict aliasing */ |
@@ -646,30 +771,28 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | |||
646 | if (threads == NULL) | 771 | if (threads == NULL) |
647 | threads = &empty_thread_map.map; | 772 | threads = &empty_thread_map.map; |
648 | 773 | ||
649 | return __perf_evsel__open(evsel, cpus, threads, group, group_fd); | 774 | return __perf_evsel__open(evsel, cpus, threads); |
650 | } | 775 | } |
651 | 776 | ||
652 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, | 777 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, |
653 | struct cpu_map *cpus, bool group, | 778 | struct cpu_map *cpus) |
654 | struct xyarray *group_fd) | ||
655 | { | 779 | { |
656 | return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, | 780 | return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); |
657 | group_fd); | ||
658 | } | 781 | } |
659 | 782 | ||
660 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, | 783 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, |
661 | struct thread_map *threads, bool group, | 784 | struct thread_map *threads) |
662 | struct xyarray *group_fd) | ||
663 | { | 785 | { |
664 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, | 786 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); |
665 | group_fd); | ||
666 | } | 787 | } |
667 | 788 | ||
668 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, | 789 | static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, |
669 | struct perf_sample *sample, | 790 | const union perf_event *event, |
670 | bool swapped) | 791 | struct perf_sample *sample) |
671 | { | 792 | { |
793 | u64 type = evsel->attr.sample_type; | ||
672 | const u64 *array = event->sample.array; | 794 | const u64 *array = event->sample.array; |
795 | bool swapped = evsel->needs_swap; | ||
673 | union u64_swap u; | 796 | union u64_swap u; |
674 | 797 | ||
675 | array += ((event->header.size - | 798 | array += ((event->header.size - |
@@ -730,9 +853,11 @@ static bool sample_overlap(const union perf_event *event, | |||
730 | } | 853 | } |
731 | 854 | ||
732 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | 855 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, |
733 | struct perf_sample *data, bool swapped) | 856 | struct perf_sample *data) |
734 | { | 857 | { |
735 | u64 type = evsel->attr.sample_type; | 858 | u64 type = evsel->attr.sample_type; |
859 | u64 regs_user = evsel->attr.sample_regs_user; | ||
860 | bool swapped = evsel->needs_swap; | ||
736 | const u64 *array; | 861 | const u64 *array; |
737 | 862 | ||
738 | /* | 863 | /* |
@@ -749,7 +874,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
749 | if (event->header.type != PERF_RECORD_SAMPLE) { | 874 | if (event->header.type != PERF_RECORD_SAMPLE) { |
750 | if (!evsel->attr.sample_id_all) | 875 | if (!evsel->attr.sample_id_all) |
751 | return 0; | 876 | return 0; |
752 | return perf_event__parse_id_sample(event, type, data, swapped); | 877 | return perf_evsel__parse_id_sample(evsel, event, data); |
753 | } | 878 | } |
754 | 879 | ||
755 | array = event->sample.array; | 880 | array = event->sample.array; |
@@ -869,6 +994,32 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
869 | sz /= sizeof(u64); | 994 | sz /= sizeof(u64); |
870 | array += sz; | 995 | array += sz; |
871 | } | 996 | } |
997 | |||
998 | if (type & PERF_SAMPLE_REGS_USER) { | ||
999 | /* First u64 tells us if we have any regs in sample. */ | ||
1000 | u64 avail = *array++; | ||
1001 | |||
1002 | if (avail) { | ||
1003 | data->user_regs.regs = (u64 *)array; | ||
1004 | array += hweight_long(regs_user); | ||
1005 | } | ||
1006 | } | ||
1007 | |||
1008 | if (type & PERF_SAMPLE_STACK_USER) { | ||
1009 | u64 size = *array++; | ||
1010 | |||
1011 | data->user_stack.offset = ((char *)(array - 1) | ||
1012 | - (char *) event); | ||
1013 | |||
1014 | if (!size) { | ||
1015 | data->user_stack.size = 0; | ||
1016 | } else { | ||
1017 | data->user_stack.data = (char *)array; | ||
1018 | array += size / sizeof(*array); | ||
1019 | data->user_stack.size = *array; | ||
1020 | } | ||
1021 | } | ||
1022 | |||
872 | return 0; | 1023 | return 0; |
873 | } | 1024 | } |
874 | 1025 | ||
@@ -947,3 +1098,72 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
947 | 1098 | ||
948 | return 0; | 1099 | return 0; |
949 | } | 1100 | } |
1101 | |||
1102 | struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) | ||
1103 | { | ||
1104 | return pevent_find_field(evsel->tp_format, name); | ||
1105 | } | ||
1106 | |||
1107 | void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, | ||
1108 | const char *name) | ||
1109 | { | ||
1110 | struct format_field *field = perf_evsel__field(evsel, name); | ||
1111 | int offset; | ||
1112 | |||
1113 | if (!field) | ||
1114 | return NULL; | ||
1115 | |||
1116 | offset = field->offset; | ||
1117 | |||
1118 | if (field->flags & FIELD_IS_DYNAMIC) { | ||
1119 | offset = *(int *)(sample->raw_data + field->offset); | ||
1120 | offset &= 0xffff; | ||
1121 | } | ||
1122 | |||
1123 | return sample->raw_data + offset; | ||
1124 | } | ||
1125 | |||
1126 | u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, | ||
1127 | const char *name) | ||
1128 | { | ||
1129 | struct format_field *field = perf_evsel__field(evsel, name); | ||
1130 | void *ptr; | ||
1131 | u64 value; | ||
1132 | |||
1133 | if (!field) | ||
1134 | return 0; | ||
1135 | |||
1136 | ptr = sample->raw_data + field->offset; | ||
1137 | |||
1138 | switch (field->size) { | ||
1139 | case 1: | ||
1140 | return *(u8 *)ptr; | ||
1141 | case 2: | ||
1142 | value = *(u16 *)ptr; | ||
1143 | break; | ||
1144 | case 4: | ||
1145 | value = *(u32 *)ptr; | ||
1146 | break; | ||
1147 | case 8: | ||
1148 | value = *(u64 *)ptr; | ||
1149 | break; | ||
1150 | default: | ||
1151 | return 0; | ||
1152 | } | ||
1153 | |||
1154 | if (!evsel->needs_swap) | ||
1155 | return value; | ||
1156 | |||
1157 | switch (field->size) { | ||
1158 | case 2: | ||
1159 | return bswap_16(value); | ||
1160 | case 4: | ||
1161 | return bswap_32(value); | ||
1162 | case 8: | ||
1163 | return bswap_64(value); | ||
1164 | default: | ||
1165 | return 0; | ||
1166 | } | ||
1167 | |||
1168 | return 0; | ||
1169 | } | ||
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index b559929983bb..3ead0d59c03d 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -53,9 +53,10 @@ struct perf_evsel { | |||
53 | u64 *id; | 53 | u64 *id; |
54 | struct perf_counts *counts; | 54 | struct perf_counts *counts; |
55 | int idx; | 55 | int idx; |
56 | int ids; | 56 | u32 ids; |
57 | struct hists hists; | 57 | struct hists hists; |
58 | char *name; | 58 | char *name; |
59 | struct event_format *tp_format; | ||
59 | union { | 60 | union { |
60 | void *priv; | 61 | void *priv; |
61 | off_t id_offset; | 62 | off_t id_offset; |
@@ -65,8 +66,14 @@ struct perf_evsel { | |||
65 | void *func; | 66 | void *func; |
66 | void *data; | 67 | void *data; |
67 | } handler; | 68 | } handler; |
69 | struct cpu_map *cpus; | ||
68 | unsigned int sample_size; | 70 | unsigned int sample_size; |
69 | bool supported; | 71 | bool supported; |
72 | bool needs_swap; | ||
73 | /* parse modifier helper */ | ||
74 | int exclude_GH; | ||
75 | struct perf_evsel *leader; | ||
76 | char *group_name; | ||
70 | }; | 77 | }; |
71 | 78 | ||
72 | struct cpu_map; | 79 | struct cpu_map; |
@@ -75,6 +82,10 @@ struct perf_evlist; | |||
75 | struct perf_record_opts; | 82 | struct perf_record_opts; |
76 | 83 | ||
77 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); | 84 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); |
85 | struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx); | ||
86 | |||
87 | struct event_format *event_format__new(const char *sys, const char *name); | ||
88 | |||
78 | void perf_evsel__init(struct perf_evsel *evsel, | 89 | void perf_evsel__init(struct perf_evsel *evsel, |
79 | struct perf_event_attr *attr, int idx); | 90 | struct perf_event_attr *attr, int idx); |
80 | void perf_evsel__exit(struct perf_evsel *evsel); | 91 | void perf_evsel__exit(struct perf_evsel *evsel); |
@@ -92,8 +103,10 @@ extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] | |||
92 | [PERF_EVSEL__MAX_ALIASES]; | 103 | [PERF_EVSEL__MAX_ALIASES]; |
93 | extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] | 104 | extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] |
94 | [PERF_EVSEL__MAX_ALIASES]; | 105 | [PERF_EVSEL__MAX_ALIASES]; |
95 | const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] | 106 | extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] |
96 | [PERF_EVSEL__MAX_ALIASES]; | 107 | [PERF_EVSEL__MAX_ALIASES]; |
108 | extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX]; | ||
109 | extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX]; | ||
97 | int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, | 110 | int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, |
98 | char *bf, size_t size); | 111 | char *bf, size_t size); |
99 | const char *perf_evsel__name(struct perf_evsel *evsel); | 112 | const char *perf_evsel__name(struct perf_evsel *evsel); |
@@ -105,21 +118,46 @@ void perf_evsel__free_fd(struct perf_evsel *evsel); | |||
105 | void perf_evsel__free_id(struct perf_evsel *evsel); | 118 | void perf_evsel__free_id(struct perf_evsel *evsel); |
106 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | 119 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); |
107 | 120 | ||
121 | int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, | ||
122 | const char *filter); | ||
123 | |||
108 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, | 124 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, |
109 | struct cpu_map *cpus, bool group, | 125 | struct cpu_map *cpus); |
110 | struct xyarray *group_fds); | ||
111 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, | 126 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, |
112 | struct thread_map *threads, bool group, | 127 | struct thread_map *threads); |
113 | struct xyarray *group_fds); | ||
114 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 128 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
115 | struct thread_map *threads, bool group, | 129 | struct thread_map *threads); |
116 | struct xyarray *group_fds); | ||
117 | void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads); | 130 | void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads); |
118 | 131 | ||
132 | struct perf_sample; | ||
133 | |||
134 | void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, | ||
135 | const char *name); | ||
136 | u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, | ||
137 | const char *name); | ||
138 | |||
139 | static inline char *perf_evsel__strval(struct perf_evsel *evsel, | ||
140 | struct perf_sample *sample, | ||
141 | const char *name) | ||
142 | { | ||
143 | return perf_evsel__rawptr(evsel, sample, name); | ||
144 | } | ||
145 | |||
146 | struct format_field; | ||
147 | |||
148 | struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name); | ||
149 | |||
119 | #define perf_evsel__match(evsel, t, c) \ | 150 | #define perf_evsel__match(evsel, t, c) \ |
120 | (evsel->attr.type == PERF_TYPE_##t && \ | 151 | (evsel->attr.type == PERF_TYPE_##t && \ |
121 | evsel->attr.config == PERF_COUNT_##c) | 152 | evsel->attr.config == PERF_COUNT_##c) |
122 | 153 | ||
154 | static inline bool perf_evsel__match2(struct perf_evsel *e1, | ||
155 | struct perf_evsel *e2) | ||
156 | { | ||
157 | return (e1->attr.type == e2->attr.type) && | ||
158 | (e1->attr.config == e2->attr.config); | ||
159 | } | ||
160 | |||
123 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, | 161 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, |
124 | int cpu, int thread, bool scale); | 162 | int cpu, int thread, bool scale); |
125 | 163 | ||
@@ -181,5 +219,10 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel, | |||
181 | void hists__init(struct hists *hists); | 219 | void hists__init(struct hists *hists); |
182 | 220 | ||
183 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | 221 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, |
184 | struct perf_sample *sample, bool swapped); | 222 | struct perf_sample *sample); |
223 | |||
224 | static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel) | ||
225 | { | ||
226 | return list_entry(evsel->node.next, struct perf_evsel, node); | ||
227 | } | ||
185 | #endif /* __PERF_EVSEL_H */ | 228 | #endif /* __PERF_EVSEL_H */ |
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh index f06f6fd148f8..389590c1ad21 100755 --- a/tools/perf/util/generate-cmdlist.sh +++ b/tools/perf/util/generate-cmdlist.sh | |||
@@ -21,4 +21,19 @@ do | |||
21 | p | 21 | p |
22 | }' "Documentation/perf-$cmd.txt" | 22 | }' "Documentation/perf-$cmd.txt" |
23 | done | 23 | done |
24 | |||
25 | echo "#ifndef NO_LIBELF_SUPPORT" | ||
26 | sed -n -e 's/^perf-\([^ ]*\)[ ].* full.*/\1/p' command-list.txt | | ||
27 | sort | | ||
28 | while read cmd | ||
29 | do | ||
30 | sed -n ' | ||
31 | /^NAME/,/perf-'"$cmd"'/H | ||
32 | ${ | ||
33 | x | ||
34 | s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ | ||
35 | p | ||
36 | }' "Documentation/perf-$cmd.txt" | ||
37 | done | ||
38 | echo "#endif /* NO_LIBELF_SUPPORT */" | ||
24 | echo "};" | 39 | echo "};" |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 74ea3c2f8138..7daad237dea5 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -20,11 +20,14 @@ | |||
20 | #include "symbol.h" | 20 | #include "symbol.h" |
21 | #include "debug.h" | 21 | #include "debug.h" |
22 | #include "cpumap.h" | 22 | #include "cpumap.h" |
23 | #include "pmu.h" | ||
24 | #include "vdso.h" | ||
25 | #include "strbuf.h" | ||
23 | 26 | ||
24 | static bool no_buildid_cache = false; | 27 | static bool no_buildid_cache = false; |
25 | 28 | ||
26 | static int event_count; | 29 | static int trace_event_count; |
27 | static struct perf_trace_event_type *events; | 30 | static struct perf_trace_event_type *trace_events; |
28 | 31 | ||
29 | static u32 header_argc; | 32 | static u32 header_argc; |
30 | static const char **header_argv; | 33 | static const char **header_argv; |
@@ -36,24 +39,24 @@ int perf_header__push_event(u64 id, const char *name) | |||
36 | if (strlen(name) > MAX_EVENT_NAME) | 39 | if (strlen(name) > MAX_EVENT_NAME) |
37 | pr_warning("Event %s will be truncated\n", name); | 40 | pr_warning("Event %s will be truncated\n", name); |
38 | 41 | ||
39 | nevents = realloc(events, (event_count + 1) * sizeof(*events)); | 42 | nevents = realloc(trace_events, (trace_event_count + 1) * sizeof(*trace_events)); |
40 | if (nevents == NULL) | 43 | if (nevents == NULL) |
41 | return -ENOMEM; | 44 | return -ENOMEM; |
42 | events = nevents; | 45 | trace_events = nevents; |
43 | 46 | ||
44 | memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); | 47 | memset(&trace_events[trace_event_count], 0, sizeof(struct perf_trace_event_type)); |
45 | events[event_count].event_id = id; | 48 | trace_events[trace_event_count].event_id = id; |
46 | strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); | 49 | strncpy(trace_events[trace_event_count].name, name, MAX_EVENT_NAME - 1); |
47 | event_count++; | 50 | trace_event_count++; |
48 | return 0; | 51 | return 0; |
49 | } | 52 | } |
50 | 53 | ||
51 | char *perf_header__find_event(u64 id) | 54 | char *perf_header__find_event(u64 id) |
52 | { | 55 | { |
53 | int i; | 56 | int i; |
54 | for (i = 0 ; i < event_count; i++) { | 57 | for (i = 0 ; i < trace_event_count; i++) { |
55 | if (events[i].event_id == id) | 58 | if (trace_events[i].event_id == id) |
56 | return events[i].name; | 59 | return trace_events[i].name; |
57 | } | 60 | } |
58 | return NULL; | 61 | return NULL; |
59 | } | 62 | } |
@@ -128,7 +131,7 @@ static int do_write_string(int fd, const char *str) | |||
128 | int ret; | 131 | int ret; |
129 | 132 | ||
130 | olen = strlen(str) + 1; | 133 | olen = strlen(str) + 1; |
131 | len = ALIGN(olen, NAME_ALIGN); | 134 | len = PERF_ALIGN(olen, NAME_ALIGN); |
132 | 135 | ||
133 | /* write len, incl. \0 */ | 136 | /* write len, incl. \0 */ |
134 | ret = do_write(fd, &len, sizeof(len)); | 137 | ret = do_write(fd, &len, sizeof(len)); |
@@ -206,6 +209,29 @@ perf_header__set_cmdline(int argc, const char **argv) | |||
206 | continue; \ | 209 | continue; \ |
207 | else | 210 | else |
208 | 211 | ||
212 | static int write_buildid(char *name, size_t name_len, u8 *build_id, | ||
213 | pid_t pid, u16 misc, int fd) | ||
214 | { | ||
215 | int err; | ||
216 | struct build_id_event b; | ||
217 | size_t len; | ||
218 | |||
219 | len = name_len + 1; | ||
220 | len = PERF_ALIGN(len, NAME_ALIGN); | ||
221 | |||
222 | memset(&b, 0, sizeof(b)); | ||
223 | memcpy(&b.build_id, build_id, BUILD_ID_SIZE); | ||
224 | b.pid = pid; | ||
225 | b.header.misc = misc; | ||
226 | b.header.size = sizeof(b) + len; | ||
227 | |||
228 | err = do_write(fd, &b, sizeof(b)); | ||
229 | if (err < 0) | ||
230 | return err; | ||
231 | |||
232 | return write_padded(fd, name, name_len + 1, len); | ||
233 | } | ||
234 | |||
209 | static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, | 235 | static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, |
210 | u16 misc, int fd) | 236 | u16 misc, int fd) |
211 | { | 237 | { |
@@ -213,24 +239,23 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, | |||
213 | 239 | ||
214 | dsos__for_each_with_build_id(pos, head) { | 240 | dsos__for_each_with_build_id(pos, head) { |
215 | int err; | 241 | int err; |
216 | struct build_id_event b; | 242 | char *name; |
217 | size_t len; | 243 | size_t name_len; |
218 | 244 | ||
219 | if (!pos->hit) | 245 | if (!pos->hit) |
220 | continue; | 246 | continue; |
221 | len = pos->long_name_len + 1; | 247 | |
222 | len = ALIGN(len, NAME_ALIGN); | 248 | if (is_vdso_map(pos->short_name)) { |
223 | memset(&b, 0, sizeof(b)); | 249 | name = (char *) VDSO__MAP_NAME; |
224 | memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); | 250 | name_len = sizeof(VDSO__MAP_NAME) + 1; |
225 | b.pid = pid; | 251 | } else { |
226 | b.header.misc = misc; | 252 | name = pos->long_name; |
227 | b.header.size = sizeof(b) + len; | 253 | name_len = pos->long_name_len + 1; |
228 | err = do_write(fd, &b, sizeof(b)); | 254 | } |
229 | if (err < 0) | 255 | |
230 | return err; | 256 | err = write_buildid(name, name_len, pos->build_id, |
231 | err = write_padded(fd, pos->long_name, | 257 | pid, misc, fd); |
232 | pos->long_name_len + 1, len); | 258 | if (err) |
233 | if (err < 0) | ||
234 | return err; | 259 | return err; |
235 | } | 260 | } |
236 | 261 | ||
@@ -276,19 +301,20 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd) | |||
276 | } | 301 | } |
277 | 302 | ||
278 | int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | 303 | int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, |
279 | const char *name, bool is_kallsyms) | 304 | const char *name, bool is_kallsyms, bool is_vdso) |
280 | { | 305 | { |
281 | const size_t size = PATH_MAX; | 306 | const size_t size = PATH_MAX; |
282 | char *realname, *filename = zalloc(size), | 307 | char *realname, *filename = zalloc(size), |
283 | *linkname = zalloc(size), *targetname; | 308 | *linkname = zalloc(size), *targetname; |
284 | int len, err = -1; | 309 | int len, err = -1; |
310 | bool slash = is_kallsyms || is_vdso; | ||
285 | 311 | ||
286 | if (is_kallsyms) { | 312 | if (is_kallsyms) { |
287 | if (symbol_conf.kptr_restrict) { | 313 | if (symbol_conf.kptr_restrict) { |
288 | pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); | 314 | pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); |
289 | return 0; | 315 | return 0; |
290 | } | 316 | } |
291 | realname = (char *)name; | 317 | realname = (char *) name; |
292 | } else | 318 | } else |
293 | realname = realpath(name, NULL); | 319 | realname = realpath(name, NULL); |
294 | 320 | ||
@@ -296,7 +322,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
296 | goto out_free; | 322 | goto out_free; |
297 | 323 | ||
298 | len = scnprintf(filename, size, "%s%s%s", | 324 | len = scnprintf(filename, size, "%s%s%s", |
299 | debugdir, is_kallsyms ? "/" : "", realname); | 325 | debugdir, slash ? "/" : "", |
326 | is_vdso ? VDSO__MAP_NAME : realname); | ||
300 | if (mkdir_p(filename, 0755)) | 327 | if (mkdir_p(filename, 0755)) |
301 | goto out_free; | 328 | goto out_free; |
302 | 329 | ||
@@ -332,13 +359,14 @@ out_free: | |||
332 | 359 | ||
333 | static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, | 360 | static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, |
334 | const char *name, const char *debugdir, | 361 | const char *name, const char *debugdir, |
335 | bool is_kallsyms) | 362 | bool is_kallsyms, bool is_vdso) |
336 | { | 363 | { |
337 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | 364 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; |
338 | 365 | ||
339 | build_id__sprintf(build_id, build_id_size, sbuild_id); | 366 | build_id__sprintf(build_id, build_id_size, sbuild_id); |
340 | 367 | ||
341 | return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); | 368 | return build_id_cache__add_s(sbuild_id, debugdir, name, |
369 | is_kallsyms, is_vdso); | ||
342 | } | 370 | } |
343 | 371 | ||
344 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) | 372 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) |
@@ -382,9 +410,11 @@ out_free: | |||
382 | static int dso__cache_build_id(struct dso *dso, const char *debugdir) | 410 | static int dso__cache_build_id(struct dso *dso, const char *debugdir) |
383 | { | 411 | { |
384 | bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; | 412 | bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; |
413 | bool is_vdso = is_vdso_map(dso->short_name); | ||
385 | 414 | ||
386 | return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), | 415 | return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), |
387 | dso->long_name, debugdir, is_kallsyms); | 416 | dso->long_name, debugdir, |
417 | is_kallsyms, is_vdso); | ||
388 | } | 418 | } |
389 | 419 | ||
390 | static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) | 420 | static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) |
@@ -446,7 +476,7 @@ static bool perf_session__read_build_ids(struct perf_session *session, bool with | |||
446 | return ret; | 476 | return ret; |
447 | } | 477 | } |
448 | 478 | ||
449 | static int write_tracing_data(int fd, struct perf_header *h __used, | 479 | static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, |
450 | struct perf_evlist *evlist) | 480 | struct perf_evlist *evlist) |
451 | { | 481 | { |
452 | return read_tracing_data(fd, &evlist->entries); | 482 | return read_tracing_data(fd, &evlist->entries); |
@@ -454,7 +484,7 @@ static int write_tracing_data(int fd, struct perf_header *h __used, | |||
454 | 484 | ||
455 | 485 | ||
456 | static int write_build_id(int fd, struct perf_header *h, | 486 | static int write_build_id(int fd, struct perf_header *h, |
457 | struct perf_evlist *evlist __used) | 487 | struct perf_evlist *evlist __maybe_unused) |
458 | { | 488 | { |
459 | struct perf_session *session; | 489 | struct perf_session *session; |
460 | int err; | 490 | int err; |
@@ -475,8 +505,8 @@ static int write_build_id(int fd, struct perf_header *h, | |||
475 | return 0; | 505 | return 0; |
476 | } | 506 | } |
477 | 507 | ||
478 | static int write_hostname(int fd, struct perf_header *h __used, | 508 | static int write_hostname(int fd, struct perf_header *h __maybe_unused, |
479 | struct perf_evlist *evlist __used) | 509 | struct perf_evlist *evlist __maybe_unused) |
480 | { | 510 | { |
481 | struct utsname uts; | 511 | struct utsname uts; |
482 | int ret; | 512 | int ret; |
@@ -488,8 +518,8 @@ static int write_hostname(int fd, struct perf_header *h __used, | |||
488 | return do_write_string(fd, uts.nodename); | 518 | return do_write_string(fd, uts.nodename); |
489 | } | 519 | } |
490 | 520 | ||
491 | static int write_osrelease(int fd, struct perf_header *h __used, | 521 | static int write_osrelease(int fd, struct perf_header *h __maybe_unused, |
492 | struct perf_evlist *evlist __used) | 522 | struct perf_evlist *evlist __maybe_unused) |
493 | { | 523 | { |
494 | struct utsname uts; | 524 | struct utsname uts; |
495 | int ret; | 525 | int ret; |
@@ -501,8 +531,8 @@ static int write_osrelease(int fd, struct perf_header *h __used, | |||
501 | return do_write_string(fd, uts.release); | 531 | return do_write_string(fd, uts.release); |
502 | } | 532 | } |
503 | 533 | ||
504 | static int write_arch(int fd, struct perf_header *h __used, | 534 | static int write_arch(int fd, struct perf_header *h __maybe_unused, |
505 | struct perf_evlist *evlist __used) | 535 | struct perf_evlist *evlist __maybe_unused) |
506 | { | 536 | { |
507 | struct utsname uts; | 537 | struct utsname uts; |
508 | int ret; | 538 | int ret; |
@@ -514,14 +544,14 @@ static int write_arch(int fd, struct perf_header *h __used, | |||
514 | return do_write_string(fd, uts.machine); | 544 | return do_write_string(fd, uts.machine); |
515 | } | 545 | } |
516 | 546 | ||
517 | static int write_version(int fd, struct perf_header *h __used, | 547 | static int write_version(int fd, struct perf_header *h __maybe_unused, |
518 | struct perf_evlist *evlist __used) | 548 | struct perf_evlist *evlist __maybe_unused) |
519 | { | 549 | { |
520 | return do_write_string(fd, perf_version_string); | 550 | return do_write_string(fd, perf_version_string); |
521 | } | 551 | } |
522 | 552 | ||
523 | static int write_cpudesc(int fd, struct perf_header *h __used, | 553 | static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, |
524 | struct perf_evlist *evlist __used) | 554 | struct perf_evlist *evlist __maybe_unused) |
525 | { | 555 | { |
526 | #ifndef CPUINFO_PROC | 556 | #ifndef CPUINFO_PROC |
527 | #define CPUINFO_PROC NULL | 557 | #define CPUINFO_PROC NULL |
@@ -579,8 +609,8 @@ done: | |||
579 | return ret; | 609 | return ret; |
580 | } | 610 | } |
581 | 611 | ||
582 | static int write_nrcpus(int fd, struct perf_header *h __used, | 612 | static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, |
583 | struct perf_evlist *evlist __used) | 613 | struct perf_evlist *evlist __maybe_unused) |
584 | { | 614 | { |
585 | long nr; | 615 | long nr; |
586 | u32 nrc, nra; | 616 | u32 nrc, nra; |
@@ -605,15 +635,14 @@ static int write_nrcpus(int fd, struct perf_header *h __used, | |||
605 | return do_write(fd, &nra, sizeof(nra)); | 635 | return do_write(fd, &nra, sizeof(nra)); |
606 | } | 636 | } |
607 | 637 | ||
608 | static int write_event_desc(int fd, struct perf_header *h __used, | 638 | static int write_event_desc(int fd, struct perf_header *h __maybe_unused, |
609 | struct perf_evlist *evlist) | 639 | struct perf_evlist *evlist) |
610 | { | 640 | { |
611 | struct perf_evsel *attr; | 641 | struct perf_evsel *evsel; |
612 | u32 nre = 0, nri, sz; | 642 | u32 nre, nri, sz; |
613 | int ret; | 643 | int ret; |
614 | 644 | ||
615 | list_for_each_entry(attr, &evlist->entries, node) | 645 | nre = evlist->nr_entries; |
616 | nre++; | ||
617 | 646 | ||
618 | /* | 647 | /* |
619 | * write number of events | 648 | * write number of events |
@@ -625,14 +654,14 @@ static int write_event_desc(int fd, struct perf_header *h __used, | |||
625 | /* | 654 | /* |
626 | * size of perf_event_attr struct | 655 | * size of perf_event_attr struct |
627 | */ | 656 | */ |
628 | sz = (u32)sizeof(attr->attr); | 657 | sz = (u32)sizeof(evsel->attr); |
629 | ret = do_write(fd, &sz, sizeof(sz)); | 658 | ret = do_write(fd, &sz, sizeof(sz)); |
630 | if (ret < 0) | 659 | if (ret < 0) |
631 | return ret; | 660 | return ret; |
632 | 661 | ||
633 | list_for_each_entry(attr, &evlist->entries, node) { | 662 | list_for_each_entry(evsel, &evlist->entries, node) { |
634 | 663 | ||
635 | ret = do_write(fd, &attr->attr, sz); | 664 | ret = do_write(fd, &evsel->attr, sz); |
636 | if (ret < 0) | 665 | if (ret < 0) |
637 | return ret; | 666 | return ret; |
638 | /* | 667 | /* |
@@ -642,7 +671,7 @@ static int write_event_desc(int fd, struct perf_header *h __used, | |||
642 | * copy into an nri to be independent of the | 671 | * copy into an nri to be independent of the |
643 | * type of ids, | 672 | * type of ids, |
644 | */ | 673 | */ |
645 | nri = attr->ids; | 674 | nri = evsel->ids; |
646 | ret = do_write(fd, &nri, sizeof(nri)); | 675 | ret = do_write(fd, &nri, sizeof(nri)); |
647 | if (ret < 0) | 676 | if (ret < 0) |
648 | return ret; | 677 | return ret; |
@@ -650,21 +679,21 @@ static int write_event_desc(int fd, struct perf_header *h __used, | |||
650 | /* | 679 | /* |
651 | * write event string as passed on cmdline | 680 | * write event string as passed on cmdline |
652 | */ | 681 | */ |
653 | ret = do_write_string(fd, perf_evsel__name(attr)); | 682 | ret = do_write_string(fd, perf_evsel__name(evsel)); |
654 | if (ret < 0) | 683 | if (ret < 0) |
655 | return ret; | 684 | return ret; |
656 | /* | 685 | /* |
657 | * write unique ids for this event | 686 | * write unique ids for this event |
658 | */ | 687 | */ |
659 | ret = do_write(fd, attr->id, attr->ids * sizeof(u64)); | 688 | ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); |
660 | if (ret < 0) | 689 | if (ret < 0) |
661 | return ret; | 690 | return ret; |
662 | } | 691 | } |
663 | return 0; | 692 | return 0; |
664 | } | 693 | } |
665 | 694 | ||
666 | static int write_cmdline(int fd, struct perf_header *h __used, | 695 | static int write_cmdline(int fd, struct perf_header *h __maybe_unused, |
667 | struct perf_evlist *evlist __used) | 696 | struct perf_evlist *evlist __maybe_unused) |
668 | { | 697 | { |
669 | char buf[MAXPATHLEN]; | 698 | char buf[MAXPATHLEN]; |
670 | char proc[32]; | 699 | char proc[32]; |
@@ -832,8 +861,8 @@ static struct cpu_topo *build_cpu_topology(void) | |||
832 | return tp; | 861 | return tp; |
833 | } | 862 | } |
834 | 863 | ||
835 | static int write_cpu_topology(int fd, struct perf_header *h __used, | 864 | static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, |
836 | struct perf_evlist *evlist __used) | 865 | struct perf_evlist *evlist __maybe_unused) |
837 | { | 866 | { |
838 | struct cpu_topo *tp; | 867 | struct cpu_topo *tp; |
839 | u32 i; | 868 | u32 i; |
@@ -868,8 +897,8 @@ done: | |||
868 | 897 | ||
869 | 898 | ||
870 | 899 | ||
871 | static int write_total_mem(int fd, struct perf_header *h __used, | 900 | static int write_total_mem(int fd, struct perf_header *h __maybe_unused, |
872 | struct perf_evlist *evlist __used) | 901 | struct perf_evlist *evlist __maybe_unused) |
873 | { | 902 | { |
874 | char *buf = NULL; | 903 | char *buf = NULL; |
875 | FILE *fp; | 904 | FILE *fp; |
@@ -954,8 +983,8 @@ done: | |||
954 | return ret; | 983 | return ret; |
955 | } | 984 | } |
956 | 985 | ||
957 | static int write_numa_topology(int fd, struct perf_header *h __used, | 986 | static int write_numa_topology(int fd, struct perf_header *h __maybe_unused, |
958 | struct perf_evlist *evlist __used) | 987 | struct perf_evlist *evlist __maybe_unused) |
959 | { | 988 | { |
960 | char *buf = NULL; | 989 | char *buf = NULL; |
961 | size_t len = 0; | 990 | size_t len = 0; |
@@ -1004,16 +1033,56 @@ done: | |||
1004 | } | 1033 | } |
1005 | 1034 | ||
1006 | /* | 1035 | /* |
1036 | * File format: | ||
1037 | * | ||
1038 | * struct pmu_mappings { | ||
1039 | * u32 pmu_num; | ||
1040 | * struct pmu_map { | ||
1041 | * u32 type; | ||
1042 | * char name[]; | ||
1043 | * }[pmu_num]; | ||
1044 | * }; | ||
1045 | */ | ||
1046 | |||
1047 | static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, | ||
1048 | struct perf_evlist *evlist __maybe_unused) | ||
1049 | { | ||
1050 | struct perf_pmu *pmu = NULL; | ||
1051 | off_t offset = lseek(fd, 0, SEEK_CUR); | ||
1052 | __u32 pmu_num = 0; | ||
1053 | |||
1054 | /* write real pmu_num later */ | ||
1055 | do_write(fd, &pmu_num, sizeof(pmu_num)); | ||
1056 | |||
1057 | while ((pmu = perf_pmu__scan(pmu))) { | ||
1058 | if (!pmu->name) | ||
1059 | continue; | ||
1060 | pmu_num++; | ||
1061 | do_write(fd, &pmu->type, sizeof(pmu->type)); | ||
1062 | do_write_string(fd, pmu->name); | ||
1063 | } | ||
1064 | |||
1065 | if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { | ||
1066 | /* discard all */ | ||
1067 | lseek(fd, offset, SEEK_SET); | ||
1068 | return -1; | ||
1069 | } | ||
1070 | |||
1071 | return 0; | ||
1072 | } | ||
1073 | |||
1074 | /* | ||
1007 | * default get_cpuid(): nothing gets recorded | 1075 | * default get_cpuid(): nothing gets recorded |
1008 | * actual implementation must be in arch/$(ARCH)/util/header.c | 1076 | * actual implementation must be in arch/$(ARCH)/util/header.c |
1009 | */ | 1077 | */ |
1010 | int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used) | 1078 | int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused, |
1079 | size_t sz __maybe_unused) | ||
1011 | { | 1080 | { |
1012 | return -1; | 1081 | return -1; |
1013 | } | 1082 | } |
1014 | 1083 | ||
1015 | static int write_cpuid(int fd, struct perf_header *h __used, | 1084 | static int write_cpuid(int fd, struct perf_header *h __maybe_unused, |
1016 | struct perf_evlist *evlist __used) | 1085 | struct perf_evlist *evlist __maybe_unused) |
1017 | { | 1086 | { |
1018 | char buffer[64]; | 1087 | char buffer[64]; |
1019 | int ret; | 1088 | int ret; |
@@ -1027,133 +1096,113 @@ write_it: | |||
1027 | return do_write_string(fd, buffer); | 1096 | return do_write_string(fd, buffer); |
1028 | } | 1097 | } |
1029 | 1098 | ||
1030 | static int write_branch_stack(int fd __used, struct perf_header *h __used, | 1099 | static int write_branch_stack(int fd __maybe_unused, |
1031 | struct perf_evlist *evlist __used) | 1100 | struct perf_header *h __maybe_unused, |
1101 | struct perf_evlist *evlist __maybe_unused) | ||
1032 | { | 1102 | { |
1033 | return 0; | 1103 | return 0; |
1034 | } | 1104 | } |
1035 | 1105 | ||
1036 | static void print_hostname(struct perf_header *ph, int fd, FILE *fp) | 1106 | static void print_hostname(struct perf_header *ph, int fd __maybe_unused, |
1107 | FILE *fp) | ||
1037 | { | 1108 | { |
1038 | char *str = do_read_string(fd, ph); | 1109 | fprintf(fp, "# hostname : %s\n", ph->env.hostname); |
1039 | fprintf(fp, "# hostname : %s\n", str); | ||
1040 | free(str); | ||
1041 | } | 1110 | } |
1042 | 1111 | ||
1043 | static void print_osrelease(struct perf_header *ph, int fd, FILE *fp) | 1112 | static void print_osrelease(struct perf_header *ph, int fd __maybe_unused, |
1113 | FILE *fp) | ||
1044 | { | 1114 | { |
1045 | char *str = do_read_string(fd, ph); | 1115 | fprintf(fp, "# os release : %s\n", ph->env.os_release); |
1046 | fprintf(fp, "# os release : %s\n", str); | ||
1047 | free(str); | ||
1048 | } | 1116 | } |
1049 | 1117 | ||
1050 | static void print_arch(struct perf_header *ph, int fd, FILE *fp) | 1118 | static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp) |
1051 | { | 1119 | { |
1052 | char *str = do_read_string(fd, ph); | 1120 | fprintf(fp, "# arch : %s\n", ph->env.arch); |
1053 | fprintf(fp, "# arch : %s\n", str); | ||
1054 | free(str); | ||
1055 | } | 1121 | } |
1056 | 1122 | ||
1057 | static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp) | 1123 | static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused, |
1124 | FILE *fp) | ||
1058 | { | 1125 | { |
1059 | char *str = do_read_string(fd, ph); | 1126 | fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc); |
1060 | fprintf(fp, "# cpudesc : %s\n", str); | ||
1061 | free(str); | ||
1062 | } | 1127 | } |
1063 | 1128 | ||
1064 | static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp) | 1129 | static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused, |
1130 | FILE *fp) | ||
1065 | { | 1131 | { |
1066 | ssize_t ret; | 1132 | fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online); |
1067 | u32 nr; | 1133 | fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail); |
1068 | |||
1069 | ret = read(fd, &nr, sizeof(nr)); | ||
1070 | if (ret != (ssize_t)sizeof(nr)) | ||
1071 | nr = -1; /* interpreted as error */ | ||
1072 | |||
1073 | if (ph->needs_swap) | ||
1074 | nr = bswap_32(nr); | ||
1075 | |||
1076 | fprintf(fp, "# nrcpus online : %u\n", nr); | ||
1077 | |||
1078 | ret = read(fd, &nr, sizeof(nr)); | ||
1079 | if (ret != (ssize_t)sizeof(nr)) | ||
1080 | nr = -1; /* interpreted as error */ | ||
1081 | |||
1082 | if (ph->needs_swap) | ||
1083 | nr = bswap_32(nr); | ||
1084 | |||
1085 | fprintf(fp, "# nrcpus avail : %u\n", nr); | ||
1086 | } | 1134 | } |
1087 | 1135 | ||
1088 | static void print_version(struct perf_header *ph, int fd, FILE *fp) | 1136 | static void print_version(struct perf_header *ph, int fd __maybe_unused, |
1137 | FILE *fp) | ||
1089 | { | 1138 | { |
1090 | char *str = do_read_string(fd, ph); | 1139 | fprintf(fp, "# perf version : %s\n", ph->env.version); |
1091 | fprintf(fp, "# perf version : %s\n", str); | ||
1092 | free(str); | ||
1093 | } | 1140 | } |
1094 | 1141 | ||
1095 | static void print_cmdline(struct perf_header *ph, int fd, FILE *fp) | 1142 | static void print_cmdline(struct perf_header *ph, int fd __maybe_unused, |
1143 | FILE *fp) | ||
1096 | { | 1144 | { |
1097 | ssize_t ret; | 1145 | int nr, i; |
1098 | char *str; | 1146 | char *str; |
1099 | u32 nr, i; | ||
1100 | |||
1101 | ret = read(fd, &nr, sizeof(nr)); | ||
1102 | if (ret != (ssize_t)sizeof(nr)) | ||
1103 | return; | ||
1104 | 1147 | ||
1105 | if (ph->needs_swap) | 1148 | nr = ph->env.nr_cmdline; |
1106 | nr = bswap_32(nr); | 1149 | str = ph->env.cmdline; |
1107 | 1150 | ||
1108 | fprintf(fp, "# cmdline : "); | 1151 | fprintf(fp, "# cmdline : "); |
1109 | 1152 | ||
1110 | for (i = 0; i < nr; i++) { | 1153 | for (i = 0; i < nr; i++) { |
1111 | str = do_read_string(fd, ph); | ||
1112 | fprintf(fp, "%s ", str); | 1154 | fprintf(fp, "%s ", str); |
1113 | free(str); | 1155 | str += strlen(str) + 1; |
1114 | } | 1156 | } |
1115 | fputc('\n', fp); | 1157 | fputc('\n', fp); |
1116 | } | 1158 | } |
1117 | 1159 | ||
1118 | static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp) | 1160 | static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, |
1161 | FILE *fp) | ||
1119 | { | 1162 | { |
1120 | ssize_t ret; | 1163 | int nr, i; |
1121 | u32 nr, i; | ||
1122 | char *str; | 1164 | char *str; |
1123 | 1165 | ||
1124 | ret = read(fd, &nr, sizeof(nr)); | 1166 | nr = ph->env.nr_sibling_cores; |
1125 | if (ret != (ssize_t)sizeof(nr)) | 1167 | str = ph->env.sibling_cores; |
1126 | return; | ||
1127 | |||
1128 | if (ph->needs_swap) | ||
1129 | nr = bswap_32(nr); | ||
1130 | 1168 | ||
1131 | for (i = 0; i < nr; i++) { | 1169 | for (i = 0; i < nr; i++) { |
1132 | str = do_read_string(fd, ph); | ||
1133 | fprintf(fp, "# sibling cores : %s\n", str); | 1170 | fprintf(fp, "# sibling cores : %s\n", str); |
1134 | free(str); | 1171 | str += strlen(str) + 1; |
1135 | } | 1172 | } |
1136 | 1173 | ||
1137 | ret = read(fd, &nr, sizeof(nr)); | 1174 | nr = ph->env.nr_sibling_threads; |
1138 | if (ret != (ssize_t)sizeof(nr)) | 1175 | str = ph->env.sibling_threads; |
1139 | return; | ||
1140 | |||
1141 | if (ph->needs_swap) | ||
1142 | nr = bswap_32(nr); | ||
1143 | 1176 | ||
1144 | for (i = 0; i < nr; i++) { | 1177 | for (i = 0; i < nr; i++) { |
1145 | str = do_read_string(fd, ph); | ||
1146 | fprintf(fp, "# sibling threads : %s\n", str); | 1178 | fprintf(fp, "# sibling threads : %s\n", str); |
1147 | free(str); | 1179 | str += strlen(str) + 1; |
1148 | } | 1180 | } |
1149 | } | 1181 | } |
1150 | 1182 | ||
1151 | static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) | 1183 | static void free_event_desc(struct perf_evsel *events) |
1184 | { | ||
1185 | struct perf_evsel *evsel; | ||
1186 | |||
1187 | if (!events) | ||
1188 | return; | ||
1189 | |||
1190 | for (evsel = events; evsel->attr.size; evsel++) { | ||
1191 | if (evsel->name) | ||
1192 | free(evsel->name); | ||
1193 | if (evsel->id) | ||
1194 | free(evsel->id); | ||
1195 | } | ||
1196 | |||
1197 | free(events); | ||
1198 | } | ||
1199 | |||
1200 | static struct perf_evsel * | ||
1201 | read_event_desc(struct perf_header *ph, int fd) | ||
1152 | { | 1202 | { |
1153 | struct perf_event_attr attr; | 1203 | struct perf_evsel *evsel, *events = NULL; |
1154 | uint64_t id; | 1204 | u64 *id; |
1155 | void *buf = NULL; | 1205 | void *buf = NULL; |
1156 | char *str; | ||
1157 | u32 nre, sz, nr, i, j; | 1206 | u32 nre, sz, nr, i, j; |
1158 | ssize_t ret; | 1207 | ssize_t ret; |
1159 | size_t msz; | 1208 | size_t msz; |
@@ -1173,18 +1222,22 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) | |||
1173 | if (ph->needs_swap) | 1222 | if (ph->needs_swap) |
1174 | sz = bswap_32(sz); | 1223 | sz = bswap_32(sz); |
1175 | 1224 | ||
1176 | memset(&attr, 0, sizeof(attr)); | ||
1177 | |||
1178 | /* buffer to hold on file attr struct */ | 1225 | /* buffer to hold on file attr struct */ |
1179 | buf = malloc(sz); | 1226 | buf = malloc(sz); |
1180 | if (!buf) | 1227 | if (!buf) |
1181 | goto error; | 1228 | goto error; |
1182 | 1229 | ||
1183 | msz = sizeof(attr); | 1230 | /* the last event terminates with evsel->attr.size == 0: */ |
1231 | events = calloc(nre + 1, sizeof(*events)); | ||
1232 | if (!events) | ||
1233 | goto error; | ||
1234 | |||
1235 | msz = sizeof(evsel->attr); | ||
1184 | if (sz < msz) | 1236 | if (sz < msz) |
1185 | msz = sz; | 1237 | msz = sz; |
1186 | 1238 | ||
1187 | for (i = 0 ; i < nre; i++) { | 1239 | for (i = 0, evsel = events; i < nre; evsel++, i++) { |
1240 | evsel->idx = i; | ||
1188 | 1241 | ||
1189 | /* | 1242 | /* |
1190 | * must read entire on-file attr struct to | 1243 | * must read entire on-file attr struct to |
@@ -1197,146 +1250,188 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) | |||
1197 | if (ph->needs_swap) | 1250 | if (ph->needs_swap) |
1198 | perf_event__attr_swap(buf); | 1251 | perf_event__attr_swap(buf); |
1199 | 1252 | ||
1200 | memcpy(&attr, buf, msz); | 1253 | memcpy(&evsel->attr, buf, msz); |
1201 | 1254 | ||
1202 | ret = read(fd, &nr, sizeof(nr)); | 1255 | ret = read(fd, &nr, sizeof(nr)); |
1203 | if (ret != (ssize_t)sizeof(nr)) | 1256 | if (ret != (ssize_t)sizeof(nr)) |
1204 | goto error; | 1257 | goto error; |
1205 | 1258 | ||
1206 | if (ph->needs_swap) | 1259 | if (ph->needs_swap) { |
1207 | nr = bswap_32(nr); | 1260 | nr = bswap_32(nr); |
1261 | evsel->needs_swap = true; | ||
1262 | } | ||
1208 | 1263 | ||
1209 | str = do_read_string(fd, ph); | 1264 | evsel->name = do_read_string(fd, ph); |
1210 | fprintf(fp, "# event : name = %s, ", str); | 1265 | |
1211 | free(str); | 1266 | if (!nr) |
1267 | continue; | ||
1268 | |||
1269 | id = calloc(nr, sizeof(*id)); | ||
1270 | if (!id) | ||
1271 | goto error; | ||
1272 | evsel->ids = nr; | ||
1273 | evsel->id = id; | ||
1274 | |||
1275 | for (j = 0 ; j < nr; j++) { | ||
1276 | ret = read(fd, id, sizeof(*id)); | ||
1277 | if (ret != (ssize_t)sizeof(*id)) | ||
1278 | goto error; | ||
1279 | if (ph->needs_swap) | ||
1280 | *id = bswap_64(*id); | ||
1281 | id++; | ||
1282 | } | ||
1283 | } | ||
1284 | out: | ||
1285 | if (buf) | ||
1286 | free(buf); | ||
1287 | return events; | ||
1288 | error: | ||
1289 | if (events) | ||
1290 | free_event_desc(events); | ||
1291 | events = NULL; | ||
1292 | goto out; | ||
1293 | } | ||
1294 | |||
1295 | static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) | ||
1296 | { | ||
1297 | struct perf_evsel *evsel, *events = read_event_desc(ph, fd); | ||
1298 | u32 j; | ||
1299 | u64 *id; | ||
1300 | |||
1301 | if (!events) { | ||
1302 | fprintf(fp, "# event desc: not available or unable to read\n"); | ||
1303 | return; | ||
1304 | } | ||
1305 | |||
1306 | for (evsel = events; evsel->attr.size; evsel++) { | ||
1307 | fprintf(fp, "# event : name = %s, ", evsel->name); | ||
1212 | 1308 | ||
1213 | fprintf(fp, "type = %d, config = 0x%"PRIx64 | 1309 | fprintf(fp, "type = %d, config = 0x%"PRIx64 |
1214 | ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64, | 1310 | ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64, |
1215 | attr.type, | 1311 | evsel->attr.type, |
1216 | (u64)attr.config, | 1312 | (u64)evsel->attr.config, |
1217 | (u64)attr.config1, | 1313 | (u64)evsel->attr.config1, |
1218 | (u64)attr.config2); | 1314 | (u64)evsel->attr.config2); |
1219 | 1315 | ||
1220 | fprintf(fp, ", excl_usr = %d, excl_kern = %d", | 1316 | fprintf(fp, ", excl_usr = %d, excl_kern = %d", |
1221 | attr.exclude_user, | 1317 | evsel->attr.exclude_user, |
1222 | attr.exclude_kernel); | 1318 | evsel->attr.exclude_kernel); |
1223 | 1319 | ||
1224 | fprintf(fp, ", excl_host = %d, excl_guest = %d", | 1320 | fprintf(fp, ", excl_host = %d, excl_guest = %d", |
1225 | attr.exclude_host, | 1321 | evsel->attr.exclude_host, |
1226 | attr.exclude_guest); | 1322 | evsel->attr.exclude_guest); |
1227 | 1323 | ||
1228 | fprintf(fp, ", precise_ip = %d", attr.precise_ip); | 1324 | fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip); |
1229 | 1325 | ||
1230 | if (nr) | 1326 | if (evsel->ids) { |
1231 | fprintf(fp, ", id = {"); | 1327 | fprintf(fp, ", id = {"); |
1232 | 1328 | for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { | |
1233 | for (j = 0 ; j < nr; j++) { | 1329 | if (j) |
1234 | ret = read(fd, &id, sizeof(id)); | 1330 | fputc(',', fp); |
1235 | if (ret != (ssize_t)sizeof(id)) | 1331 | fprintf(fp, " %"PRIu64, *id); |
1236 | goto error; | 1332 | } |
1237 | |||
1238 | if (ph->needs_swap) | ||
1239 | id = bswap_64(id); | ||
1240 | |||
1241 | if (j) | ||
1242 | fputc(',', fp); | ||
1243 | |||
1244 | fprintf(fp, " %"PRIu64, id); | ||
1245 | } | ||
1246 | if (nr && j == nr) | ||
1247 | fprintf(fp, " }"); | 1333 | fprintf(fp, " }"); |
1334 | } | ||
1335 | |||
1248 | fputc('\n', fp); | 1336 | fputc('\n', fp); |
1249 | } | 1337 | } |
1250 | free(buf); | 1338 | |
1251 | return; | 1339 | free_event_desc(events); |
1252 | error: | ||
1253 | fprintf(fp, "# event desc: not available or unable to read\n"); | ||
1254 | } | 1340 | } |
1255 | 1341 | ||
1256 | static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp) | 1342 | static void print_total_mem(struct perf_header *ph, int fd __maybe_unused, |
1343 | FILE *fp) | ||
1257 | { | 1344 | { |
1258 | uint64_t mem; | 1345 | fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem); |
1259 | ssize_t ret; | ||
1260 | |||
1261 | ret = read(fd, &mem, sizeof(mem)); | ||
1262 | if (ret != sizeof(mem)) | ||
1263 | goto error; | ||
1264 | |||
1265 | if (h->needs_swap) | ||
1266 | mem = bswap_64(mem); | ||
1267 | |||
1268 | fprintf(fp, "# total memory : %"PRIu64" kB\n", mem); | ||
1269 | return; | ||
1270 | error: | ||
1271 | fprintf(fp, "# total memory : unknown\n"); | ||
1272 | } | 1346 | } |
1273 | 1347 | ||
1274 | static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp) | 1348 | static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused, |
1349 | FILE *fp) | ||
1275 | { | 1350 | { |
1276 | ssize_t ret; | ||
1277 | u32 nr, c, i; | 1351 | u32 nr, c, i; |
1278 | char *str; | 1352 | char *str, *tmp; |
1279 | uint64_t mem_total, mem_free; | 1353 | uint64_t mem_total, mem_free; |
1280 | 1354 | ||
1281 | /* nr nodes */ | 1355 | /* nr nodes */ |
1282 | ret = read(fd, &nr, sizeof(nr)); | 1356 | nr = ph->env.nr_numa_nodes; |
1283 | if (ret != (ssize_t)sizeof(nr)) | 1357 | str = ph->env.numa_nodes; |
1284 | goto error; | ||
1285 | |||
1286 | if (h->needs_swap) | ||
1287 | nr = bswap_32(nr); | ||
1288 | 1358 | ||
1289 | for (i = 0; i < nr; i++) { | 1359 | for (i = 0; i < nr; i++) { |
1290 | |||
1291 | /* node number */ | 1360 | /* node number */ |
1292 | ret = read(fd, &c, sizeof(c)); | 1361 | c = strtoul(str, &tmp, 0); |
1293 | if (ret != (ssize_t)sizeof(c)) | 1362 | if (*tmp != ':') |
1294 | goto error; | 1363 | goto error; |
1295 | 1364 | ||
1296 | if (h->needs_swap) | 1365 | str = tmp + 1; |
1297 | c = bswap_32(c); | 1366 | mem_total = strtoull(str, &tmp, 0); |
1298 | 1367 | if (*tmp != ':') | |
1299 | ret = read(fd, &mem_total, sizeof(u64)); | ||
1300 | if (ret != sizeof(u64)) | ||
1301 | goto error; | 1368 | goto error; |
1302 | 1369 | ||
1303 | ret = read(fd, &mem_free, sizeof(u64)); | 1370 | str = tmp + 1; |
1304 | if (ret != sizeof(u64)) | 1371 | mem_free = strtoull(str, &tmp, 0); |
1372 | if (*tmp != ':') | ||
1305 | goto error; | 1373 | goto error; |
1306 | 1374 | ||
1307 | if (h->needs_swap) { | ||
1308 | mem_total = bswap_64(mem_total); | ||
1309 | mem_free = bswap_64(mem_free); | ||
1310 | } | ||
1311 | |||
1312 | fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," | 1375 | fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," |
1313 | " free = %"PRIu64" kB\n", | 1376 | " free = %"PRIu64" kB\n", |
1314 | c, | 1377 | c, mem_total, mem_free); |
1315 | mem_total, | ||
1316 | mem_free); | ||
1317 | 1378 | ||
1318 | str = do_read_string(fd, h); | 1379 | str = tmp + 1; |
1319 | fprintf(fp, "# node%u cpu list : %s\n", c, str); | 1380 | fprintf(fp, "# node%u cpu list : %s\n", c, str); |
1320 | free(str); | ||
1321 | } | 1381 | } |
1322 | return; | 1382 | return; |
1323 | error: | 1383 | error: |
1324 | fprintf(fp, "# numa topology : not available\n"); | 1384 | fprintf(fp, "# numa topology : not available\n"); |
1325 | } | 1385 | } |
1326 | 1386 | ||
1327 | static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) | 1387 | static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp) |
1328 | { | 1388 | { |
1329 | char *str = do_read_string(fd, ph); | 1389 | fprintf(fp, "# cpuid : %s\n", ph->env.cpuid); |
1330 | fprintf(fp, "# cpuid : %s\n", str); | ||
1331 | free(str); | ||
1332 | } | 1390 | } |
1333 | 1391 | ||
1334 | static void print_branch_stack(struct perf_header *ph __used, int fd __used, | 1392 | static void print_branch_stack(struct perf_header *ph __maybe_unused, |
1335 | FILE *fp) | 1393 | int fd __maybe_unused, FILE *fp) |
1336 | { | 1394 | { |
1337 | fprintf(fp, "# contains samples with branch stack\n"); | 1395 | fprintf(fp, "# contains samples with branch stack\n"); |
1338 | } | 1396 | } |
1339 | 1397 | ||
1398 | static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused, | ||
1399 | FILE *fp) | ||
1400 | { | ||
1401 | const char *delimiter = "# pmu mappings: "; | ||
1402 | char *str, *tmp; | ||
1403 | u32 pmu_num; | ||
1404 | u32 type; | ||
1405 | |||
1406 | pmu_num = ph->env.nr_pmu_mappings; | ||
1407 | if (!pmu_num) { | ||
1408 | fprintf(fp, "# pmu mappings: not available\n"); | ||
1409 | return; | ||
1410 | } | ||
1411 | |||
1412 | str = ph->env.pmu_mappings; | ||
1413 | |||
1414 | while (pmu_num) { | ||
1415 | type = strtoul(str, &tmp, 0); | ||
1416 | if (*tmp != ':') | ||
1417 | goto error; | ||
1418 | |||
1419 | str = tmp + 1; | ||
1420 | fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); | ||
1421 | |||
1422 | delimiter = ", "; | ||
1423 | str += strlen(str) + 1; | ||
1424 | pmu_num--; | ||
1425 | } | ||
1426 | |||
1427 | fprintf(fp, "\n"); | ||
1428 | |||
1429 | if (!pmu_num) | ||
1430 | return; | ||
1431 | error: | ||
1432 | fprintf(fp, "# pmu mappings: unable to read\n"); | ||
1433 | } | ||
1434 | |||
1340 | static int __event_process_build_id(struct build_id_event *bev, | 1435 | static int __event_process_build_id(struct build_id_event *bev, |
1341 | char *filename, | 1436 | char *filename, |
1342 | struct perf_session *session) | 1437 | struct perf_session *session) |
@@ -1398,7 +1493,7 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, | |||
1398 | struct perf_session *session = container_of(header, struct perf_session, header); | 1493 | struct perf_session *session = container_of(header, struct perf_session, header); |
1399 | struct { | 1494 | struct { |
1400 | struct perf_event_header header; | 1495 | struct perf_event_header header; |
1401 | u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; | 1496 | u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; |
1402 | char filename[0]; | 1497 | char filename[0]; |
1403 | } old_bev; | 1498 | } old_bev; |
1404 | struct build_id_event bev; | 1499 | struct build_id_event bev; |
@@ -1487,28 +1582,375 @@ out: | |||
1487 | return err; | 1582 | return err; |
1488 | } | 1583 | } |
1489 | 1584 | ||
1490 | static int process_tracing_data(struct perf_file_section *section __unused, | 1585 | static int process_tracing_data(struct perf_file_section *section __maybe_unused, |
1491 | struct perf_header *ph __unused, | 1586 | struct perf_header *ph __maybe_unused, |
1492 | int feat __unused, int fd, void *data) | 1587 | int fd, void *data) |
1493 | { | 1588 | { |
1494 | trace_report(fd, data, false); | 1589 | trace_report(fd, data, false); |
1495 | return 0; | 1590 | return 0; |
1496 | } | 1591 | } |
1497 | 1592 | ||
1498 | static int process_build_id(struct perf_file_section *section, | 1593 | static int process_build_id(struct perf_file_section *section, |
1499 | struct perf_header *ph, | 1594 | struct perf_header *ph, int fd, |
1500 | int feat __unused, int fd, void *data __used) | 1595 | void *data __maybe_unused) |
1501 | { | 1596 | { |
1502 | if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) | 1597 | if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) |
1503 | pr_debug("Failed to read buildids, continuing...\n"); | 1598 | pr_debug("Failed to read buildids, continuing...\n"); |
1504 | return 0; | 1599 | return 0; |
1505 | } | 1600 | } |
1506 | 1601 | ||
1602 | static int process_hostname(struct perf_file_section *section __maybe_unused, | ||
1603 | struct perf_header *ph, int fd, | ||
1604 | void *data __maybe_unused) | ||
1605 | { | ||
1606 | ph->env.hostname = do_read_string(fd, ph); | ||
1607 | return ph->env.hostname ? 0 : -ENOMEM; | ||
1608 | } | ||
1609 | |||
1610 | static int process_osrelease(struct perf_file_section *section __maybe_unused, | ||
1611 | struct perf_header *ph, int fd, | ||
1612 | void *data __maybe_unused) | ||
1613 | { | ||
1614 | ph->env.os_release = do_read_string(fd, ph); | ||
1615 | return ph->env.os_release ? 0 : -ENOMEM; | ||
1616 | } | ||
1617 | |||
1618 | static int process_version(struct perf_file_section *section __maybe_unused, | ||
1619 | struct perf_header *ph, int fd, | ||
1620 | void *data __maybe_unused) | ||
1621 | { | ||
1622 | ph->env.version = do_read_string(fd, ph); | ||
1623 | return ph->env.version ? 0 : -ENOMEM; | ||
1624 | } | ||
1625 | |||
1626 | static int process_arch(struct perf_file_section *section __maybe_unused, | ||
1627 | struct perf_header *ph, int fd, | ||
1628 | void *data __maybe_unused) | ||
1629 | { | ||
1630 | ph->env.arch = do_read_string(fd, ph); | ||
1631 | return ph->env.arch ? 0 : -ENOMEM; | ||
1632 | } | ||
1633 | |||
1634 | static int process_nrcpus(struct perf_file_section *section __maybe_unused, | ||
1635 | struct perf_header *ph, int fd, | ||
1636 | void *data __maybe_unused) | ||
1637 | { | ||
1638 | size_t ret; | ||
1639 | u32 nr; | ||
1640 | |||
1641 | ret = read(fd, &nr, sizeof(nr)); | ||
1642 | if (ret != sizeof(nr)) | ||
1643 | return -1; | ||
1644 | |||
1645 | if (ph->needs_swap) | ||
1646 | nr = bswap_32(nr); | ||
1647 | |||
1648 | ph->env.nr_cpus_online = nr; | ||
1649 | |||
1650 | ret = read(fd, &nr, sizeof(nr)); | ||
1651 | if (ret != sizeof(nr)) | ||
1652 | return -1; | ||
1653 | |||
1654 | if (ph->needs_swap) | ||
1655 | nr = bswap_32(nr); | ||
1656 | |||
1657 | ph->env.nr_cpus_avail = nr; | ||
1658 | return 0; | ||
1659 | } | ||
1660 | |||
1661 | static int process_cpudesc(struct perf_file_section *section __maybe_unused, | ||
1662 | struct perf_header *ph, int fd, | ||
1663 | void *data __maybe_unused) | ||
1664 | { | ||
1665 | ph->env.cpu_desc = do_read_string(fd, ph); | ||
1666 | return ph->env.cpu_desc ? 0 : -ENOMEM; | ||
1667 | } | ||
1668 | |||
1669 | static int process_cpuid(struct perf_file_section *section __maybe_unused, | ||
1670 | struct perf_header *ph, int fd, | ||
1671 | void *data __maybe_unused) | ||
1672 | { | ||
1673 | ph->env.cpuid = do_read_string(fd, ph); | ||
1674 | return ph->env.cpuid ? 0 : -ENOMEM; | ||
1675 | } | ||
1676 | |||
1677 | static int process_total_mem(struct perf_file_section *section __maybe_unused, | ||
1678 | struct perf_header *ph, int fd, | ||
1679 | void *data __maybe_unused) | ||
1680 | { | ||
1681 | uint64_t mem; | ||
1682 | size_t ret; | ||
1683 | |||
1684 | ret = read(fd, &mem, sizeof(mem)); | ||
1685 | if (ret != sizeof(mem)) | ||
1686 | return -1; | ||
1687 | |||
1688 | if (ph->needs_swap) | ||
1689 | mem = bswap_64(mem); | ||
1690 | |||
1691 | ph->env.total_mem = mem; | ||
1692 | return 0; | ||
1693 | } | ||
1694 | |||
1695 | static struct perf_evsel * | ||
1696 | perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) | ||
1697 | { | ||
1698 | struct perf_evsel *evsel; | ||
1699 | |||
1700 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
1701 | if (evsel->idx == idx) | ||
1702 | return evsel; | ||
1703 | } | ||
1704 | |||
1705 | return NULL; | ||
1706 | } | ||
1707 | |||
1708 | static void | ||
1709 | perf_evlist__set_event_name(struct perf_evlist *evlist, | ||
1710 | struct perf_evsel *event) | ||
1711 | { | ||
1712 | struct perf_evsel *evsel; | ||
1713 | |||
1714 | if (!event->name) | ||
1715 | return; | ||
1716 | |||
1717 | evsel = perf_evlist__find_by_index(evlist, event->idx); | ||
1718 | if (!evsel) | ||
1719 | return; | ||
1720 | |||
1721 | if (evsel->name) | ||
1722 | return; | ||
1723 | |||
1724 | evsel->name = strdup(event->name); | ||
1725 | } | ||
1726 | |||
1727 | static int | ||
1728 | process_event_desc(struct perf_file_section *section __maybe_unused, | ||
1729 | struct perf_header *header, int fd, | ||
1730 | void *data __maybe_unused) | ||
1731 | { | ||
1732 | struct perf_session *session; | ||
1733 | struct perf_evsel *evsel, *events = read_event_desc(header, fd); | ||
1734 | |||
1735 | if (!events) | ||
1736 | return 0; | ||
1737 | |||
1738 | session = container_of(header, struct perf_session, header); | ||
1739 | for (evsel = events; evsel->attr.size; evsel++) | ||
1740 | perf_evlist__set_event_name(session->evlist, evsel); | ||
1741 | |||
1742 | free_event_desc(events); | ||
1743 | |||
1744 | return 0; | ||
1745 | } | ||
1746 | |||
1747 | static int process_cmdline(struct perf_file_section *section __maybe_unused, | ||
1748 | struct perf_header *ph, int fd, | ||
1749 | void *data __maybe_unused) | ||
1750 | { | ||
1751 | size_t ret; | ||
1752 | char *str; | ||
1753 | u32 nr, i; | ||
1754 | struct strbuf sb; | ||
1755 | |||
1756 | ret = read(fd, &nr, sizeof(nr)); | ||
1757 | if (ret != sizeof(nr)) | ||
1758 | return -1; | ||
1759 | |||
1760 | if (ph->needs_swap) | ||
1761 | nr = bswap_32(nr); | ||
1762 | |||
1763 | ph->env.nr_cmdline = nr; | ||
1764 | strbuf_init(&sb, 128); | ||
1765 | |||
1766 | for (i = 0; i < nr; i++) { | ||
1767 | str = do_read_string(fd, ph); | ||
1768 | if (!str) | ||
1769 | goto error; | ||
1770 | |||
1771 | /* include a NULL character at the end */ | ||
1772 | strbuf_add(&sb, str, strlen(str) + 1); | ||
1773 | free(str); | ||
1774 | } | ||
1775 | ph->env.cmdline = strbuf_detach(&sb, NULL); | ||
1776 | return 0; | ||
1777 | |||
1778 | error: | ||
1779 | strbuf_release(&sb); | ||
1780 | return -1; | ||
1781 | } | ||
1782 | |||
1783 | static int process_cpu_topology(struct perf_file_section *section __maybe_unused, | ||
1784 | struct perf_header *ph, int fd, | ||
1785 | void *data __maybe_unused) | ||
1786 | { | ||
1787 | size_t ret; | ||
1788 | u32 nr, i; | ||
1789 | char *str; | ||
1790 | struct strbuf sb; | ||
1791 | |||
1792 | ret = read(fd, &nr, sizeof(nr)); | ||
1793 | if (ret != sizeof(nr)) | ||
1794 | return -1; | ||
1795 | |||
1796 | if (ph->needs_swap) | ||
1797 | nr = bswap_32(nr); | ||
1798 | |||
1799 | ph->env.nr_sibling_cores = nr; | ||
1800 | strbuf_init(&sb, 128); | ||
1801 | |||
1802 | for (i = 0; i < nr; i++) { | ||
1803 | str = do_read_string(fd, ph); | ||
1804 | if (!str) | ||
1805 | goto error; | ||
1806 | |||
1807 | /* include a NULL character at the end */ | ||
1808 | strbuf_add(&sb, str, strlen(str) + 1); | ||
1809 | free(str); | ||
1810 | } | ||
1811 | ph->env.sibling_cores = strbuf_detach(&sb, NULL); | ||
1812 | |||
1813 | ret = read(fd, &nr, sizeof(nr)); | ||
1814 | if (ret != sizeof(nr)) | ||
1815 | return -1; | ||
1816 | |||
1817 | if (ph->needs_swap) | ||
1818 | nr = bswap_32(nr); | ||
1819 | |||
1820 | ph->env.nr_sibling_threads = nr; | ||
1821 | |||
1822 | for (i = 0; i < nr; i++) { | ||
1823 | str = do_read_string(fd, ph); | ||
1824 | if (!str) | ||
1825 | goto error; | ||
1826 | |||
1827 | /* include a NULL character at the end */ | ||
1828 | strbuf_add(&sb, str, strlen(str) + 1); | ||
1829 | free(str); | ||
1830 | } | ||
1831 | ph->env.sibling_threads = strbuf_detach(&sb, NULL); | ||
1832 | return 0; | ||
1833 | |||
1834 | error: | ||
1835 | strbuf_release(&sb); | ||
1836 | return -1; | ||
1837 | } | ||
1838 | |||
1839 | static int process_numa_topology(struct perf_file_section *section __maybe_unused, | ||
1840 | struct perf_header *ph, int fd, | ||
1841 | void *data __maybe_unused) | ||
1842 | { | ||
1843 | size_t ret; | ||
1844 | u32 nr, node, i; | ||
1845 | char *str; | ||
1846 | uint64_t mem_total, mem_free; | ||
1847 | struct strbuf sb; | ||
1848 | |||
1849 | /* nr nodes */ | ||
1850 | ret = read(fd, &nr, sizeof(nr)); | ||
1851 | if (ret != sizeof(nr)) | ||
1852 | goto error; | ||
1853 | |||
1854 | if (ph->needs_swap) | ||
1855 | nr = bswap_32(nr); | ||
1856 | |||
1857 | ph->env.nr_numa_nodes = nr; | ||
1858 | strbuf_init(&sb, 256); | ||
1859 | |||
1860 | for (i = 0; i < nr; i++) { | ||
1861 | /* node number */ | ||
1862 | ret = read(fd, &node, sizeof(node)); | ||
1863 | if (ret != sizeof(node)) | ||
1864 | goto error; | ||
1865 | |||
1866 | ret = read(fd, &mem_total, sizeof(u64)); | ||
1867 | if (ret != sizeof(u64)) | ||
1868 | goto error; | ||
1869 | |||
1870 | ret = read(fd, &mem_free, sizeof(u64)); | ||
1871 | if (ret != sizeof(u64)) | ||
1872 | goto error; | ||
1873 | |||
1874 | if (ph->needs_swap) { | ||
1875 | node = bswap_32(node); | ||
1876 | mem_total = bswap_64(mem_total); | ||
1877 | mem_free = bswap_64(mem_free); | ||
1878 | } | ||
1879 | |||
1880 | strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":", | ||
1881 | node, mem_total, mem_free); | ||
1882 | |||
1883 | str = do_read_string(fd, ph); | ||
1884 | if (!str) | ||
1885 | goto error; | ||
1886 | |||
1887 | /* include a NULL character at the end */ | ||
1888 | strbuf_add(&sb, str, strlen(str) + 1); | ||
1889 | free(str); | ||
1890 | } | ||
1891 | ph->env.numa_nodes = strbuf_detach(&sb, NULL); | ||
1892 | return 0; | ||
1893 | |||
1894 | error: | ||
1895 | strbuf_release(&sb); | ||
1896 | return -1; | ||
1897 | } | ||
1898 | |||
1899 | static int process_pmu_mappings(struct perf_file_section *section __maybe_unused, | ||
1900 | struct perf_header *ph, int fd, | ||
1901 | void *data __maybe_unused) | ||
1902 | { | ||
1903 | size_t ret; | ||
1904 | char *name; | ||
1905 | u32 pmu_num; | ||
1906 | u32 type; | ||
1907 | struct strbuf sb; | ||
1908 | |||
1909 | ret = read(fd, &pmu_num, sizeof(pmu_num)); | ||
1910 | if (ret != sizeof(pmu_num)) | ||
1911 | return -1; | ||
1912 | |||
1913 | if (ph->needs_swap) | ||
1914 | pmu_num = bswap_32(pmu_num); | ||
1915 | |||
1916 | if (!pmu_num) { | ||
1917 | pr_debug("pmu mappings not available\n"); | ||
1918 | return 0; | ||
1919 | } | ||
1920 | |||
1921 | ph->env.nr_pmu_mappings = pmu_num; | ||
1922 | strbuf_init(&sb, 128); | ||
1923 | |||
1924 | while (pmu_num) { | ||
1925 | if (read(fd, &type, sizeof(type)) != sizeof(type)) | ||
1926 | goto error; | ||
1927 | if (ph->needs_swap) | ||
1928 | type = bswap_32(type); | ||
1929 | |||
1930 | name = do_read_string(fd, ph); | ||
1931 | if (!name) | ||
1932 | goto error; | ||
1933 | |||
1934 | strbuf_addf(&sb, "%u:%s", type, name); | ||
1935 | /* include a NULL character at the end */ | ||
1936 | strbuf_add(&sb, "", 1); | ||
1937 | |||
1938 | free(name); | ||
1939 | pmu_num--; | ||
1940 | } | ||
1941 | ph->env.pmu_mappings = strbuf_detach(&sb, NULL); | ||
1942 | return 0; | ||
1943 | |||
1944 | error: | ||
1945 | strbuf_release(&sb); | ||
1946 | return -1; | ||
1947 | } | ||
1948 | |||
1507 | struct feature_ops { | 1949 | struct feature_ops { |
1508 | int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); | 1950 | int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); |
1509 | void (*print)(struct perf_header *h, int fd, FILE *fp); | 1951 | void (*print)(struct perf_header *h, int fd, FILE *fp); |
1510 | int (*process)(struct perf_file_section *section, | 1952 | int (*process)(struct perf_file_section *section, |
1511 | struct perf_header *h, int feat, int fd, void *data); | 1953 | struct perf_header *h, int fd, void *data); |
1512 | const char *name; | 1954 | const char *name; |
1513 | bool full_only; | 1955 | bool full_only; |
1514 | }; | 1956 | }; |
@@ -1520,7 +1962,7 @@ struct feature_ops { | |||
1520 | .process = process_##func } | 1962 | .process = process_##func } |
1521 | #define FEAT_OPF(n, func) \ | 1963 | #define FEAT_OPF(n, func) \ |
1522 | [n] = { .name = #n, .write = write_##func, .print = print_##func, \ | 1964 | [n] = { .name = #n, .write = write_##func, .print = print_##func, \ |
1523 | .full_only = true } | 1965 | .process = process_##func, .full_only = true } |
1524 | 1966 | ||
1525 | /* feature_ops not implemented: */ | 1967 | /* feature_ops not implemented: */ |
1526 | #define print_tracing_data NULL | 1968 | #define print_tracing_data NULL |
@@ -1529,19 +1971,20 @@ struct feature_ops { | |||
1529 | static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { | 1971 | static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { |
1530 | FEAT_OPP(HEADER_TRACING_DATA, tracing_data), | 1972 | FEAT_OPP(HEADER_TRACING_DATA, tracing_data), |
1531 | FEAT_OPP(HEADER_BUILD_ID, build_id), | 1973 | FEAT_OPP(HEADER_BUILD_ID, build_id), |
1532 | FEAT_OPA(HEADER_HOSTNAME, hostname), | 1974 | FEAT_OPP(HEADER_HOSTNAME, hostname), |
1533 | FEAT_OPA(HEADER_OSRELEASE, osrelease), | 1975 | FEAT_OPP(HEADER_OSRELEASE, osrelease), |
1534 | FEAT_OPA(HEADER_VERSION, version), | 1976 | FEAT_OPP(HEADER_VERSION, version), |
1535 | FEAT_OPA(HEADER_ARCH, arch), | 1977 | FEAT_OPP(HEADER_ARCH, arch), |
1536 | FEAT_OPA(HEADER_NRCPUS, nrcpus), | 1978 | FEAT_OPP(HEADER_NRCPUS, nrcpus), |
1537 | FEAT_OPA(HEADER_CPUDESC, cpudesc), | 1979 | FEAT_OPP(HEADER_CPUDESC, cpudesc), |
1538 | FEAT_OPA(HEADER_CPUID, cpuid), | 1980 | FEAT_OPP(HEADER_CPUID, cpuid), |
1539 | FEAT_OPA(HEADER_TOTAL_MEM, total_mem), | 1981 | FEAT_OPP(HEADER_TOTAL_MEM, total_mem), |
1540 | FEAT_OPA(HEADER_EVENT_DESC, event_desc), | 1982 | FEAT_OPP(HEADER_EVENT_DESC, event_desc), |
1541 | FEAT_OPA(HEADER_CMDLINE, cmdline), | 1983 | FEAT_OPP(HEADER_CMDLINE, cmdline), |
1542 | FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), | 1984 | FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), |
1543 | FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), | 1985 | FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), |
1544 | FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), | 1986 | FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), |
1987 | FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), | ||
1545 | }; | 1988 | }; |
1546 | 1989 | ||
1547 | struct header_print_data { | 1990 | struct header_print_data { |
@@ -1683,17 +2126,17 @@ int perf_session__write_header(struct perf_session *session, | |||
1683 | struct perf_file_header f_header; | 2126 | struct perf_file_header f_header; |
1684 | struct perf_file_attr f_attr; | 2127 | struct perf_file_attr f_attr; |
1685 | struct perf_header *header = &session->header; | 2128 | struct perf_header *header = &session->header; |
1686 | struct perf_evsel *attr, *pair = NULL; | 2129 | struct perf_evsel *evsel, *pair = NULL; |
1687 | int err; | 2130 | int err; |
1688 | 2131 | ||
1689 | lseek(fd, sizeof(f_header), SEEK_SET); | 2132 | lseek(fd, sizeof(f_header), SEEK_SET); |
1690 | 2133 | ||
1691 | if (session->evlist != evlist) | 2134 | if (session->evlist != evlist) |
1692 | pair = list_entry(session->evlist->entries.next, struct perf_evsel, node); | 2135 | pair = perf_evlist__first(session->evlist); |
1693 | 2136 | ||
1694 | list_for_each_entry(attr, &evlist->entries, node) { | 2137 | list_for_each_entry(evsel, &evlist->entries, node) { |
1695 | attr->id_offset = lseek(fd, 0, SEEK_CUR); | 2138 | evsel->id_offset = lseek(fd, 0, SEEK_CUR); |
1696 | err = do_write(fd, attr->id, attr->ids * sizeof(u64)); | 2139 | err = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); |
1697 | if (err < 0) { | 2140 | if (err < 0) { |
1698 | out_err_write: | 2141 | out_err_write: |
1699 | pr_debug("failed to write perf header\n"); | 2142 | pr_debug("failed to write perf header\n"); |
@@ -1703,19 +2146,19 @@ out_err_write: | |||
1703 | err = do_write(fd, pair->id, pair->ids * sizeof(u64)); | 2146 | err = do_write(fd, pair->id, pair->ids * sizeof(u64)); |
1704 | if (err < 0) | 2147 | if (err < 0) |
1705 | goto out_err_write; | 2148 | goto out_err_write; |
1706 | attr->ids += pair->ids; | 2149 | evsel->ids += pair->ids; |
1707 | pair = list_entry(pair->node.next, struct perf_evsel, node); | 2150 | pair = perf_evsel__next(pair); |
1708 | } | 2151 | } |
1709 | } | 2152 | } |
1710 | 2153 | ||
1711 | header->attr_offset = lseek(fd, 0, SEEK_CUR); | 2154 | header->attr_offset = lseek(fd, 0, SEEK_CUR); |
1712 | 2155 | ||
1713 | list_for_each_entry(attr, &evlist->entries, node) { | 2156 | list_for_each_entry(evsel, &evlist->entries, node) { |
1714 | f_attr = (struct perf_file_attr){ | 2157 | f_attr = (struct perf_file_attr){ |
1715 | .attr = attr->attr, | 2158 | .attr = evsel->attr, |
1716 | .ids = { | 2159 | .ids = { |
1717 | .offset = attr->id_offset, | 2160 | .offset = evsel->id_offset, |
1718 | .size = attr->ids * sizeof(u64), | 2161 | .size = evsel->ids * sizeof(u64), |
1719 | } | 2162 | } |
1720 | }; | 2163 | }; |
1721 | err = do_write(fd, &f_attr, sizeof(f_attr)); | 2164 | err = do_write(fd, &f_attr, sizeof(f_attr)); |
@@ -1726,9 +2169,9 @@ out_err_write: | |||
1726 | } | 2169 | } |
1727 | 2170 | ||
1728 | header->event_offset = lseek(fd, 0, SEEK_CUR); | 2171 | header->event_offset = lseek(fd, 0, SEEK_CUR); |
1729 | header->event_size = event_count * sizeof(struct perf_trace_event_type); | 2172 | header->event_size = trace_event_count * sizeof(struct perf_trace_event_type); |
1730 | if (events) { | 2173 | if (trace_events) { |
1731 | err = do_write(fd, events, header->event_size); | 2174 | err = do_write(fd, trace_events, header->event_size); |
1732 | if (err < 0) { | 2175 | if (err < 0) { |
1733 | pr_debug("failed to write perf header events\n"); | 2176 | pr_debug("failed to write perf header events\n"); |
1734 | return err; | 2177 | return err; |
@@ -1829,6 +2272,8 @@ out_free: | |||
1829 | static const int attr_file_abi_sizes[] = { | 2272 | static const int attr_file_abi_sizes[] = { |
1830 | [0] = PERF_ATTR_SIZE_VER0, | 2273 | [0] = PERF_ATTR_SIZE_VER0, |
1831 | [1] = PERF_ATTR_SIZE_VER1, | 2274 | [1] = PERF_ATTR_SIZE_VER1, |
2275 | [2] = PERF_ATTR_SIZE_VER2, | ||
2276 | [3] = PERF_ATTR_SIZE_VER3, | ||
1832 | 0, | 2277 | 0, |
1833 | }; | 2278 | }; |
1834 | 2279 | ||
@@ -2019,7 +2464,7 @@ static int perf_file_section__process(struct perf_file_section *section, | |||
2019 | if (!feat_ops[feat].process) | 2464 | if (!feat_ops[feat].process) |
2020 | return 0; | 2465 | return 0; |
2021 | 2466 | ||
2022 | return feat_ops[feat].process(section, ph, feat, fd, data); | 2467 | return feat_ops[feat].process(section, ph, fd, data); |
2023 | } | 2468 | } |
2024 | 2469 | ||
2025 | static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, | 2470 | static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, |
@@ -2108,32 +2553,39 @@ static int read_attr(int fd, struct perf_header *ph, | |||
2108 | return ret <= 0 ? -1 : 0; | 2553 | return ret <= 0 ? -1 : 0; |
2109 | } | 2554 | } |
2110 | 2555 | ||
2111 | static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel, | 2556 | static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, |
2112 | struct pevent *pevent) | 2557 | struct pevent *pevent) |
2113 | { | 2558 | { |
2114 | struct event_format *event = pevent_find_event(pevent, | 2559 | struct event_format *event; |
2115 | evsel->attr.config); | ||
2116 | char bf[128]; | 2560 | char bf[128]; |
2117 | 2561 | ||
2562 | /* already prepared */ | ||
2563 | if (evsel->tp_format) | ||
2564 | return 0; | ||
2565 | |||
2566 | event = pevent_find_event(pevent, evsel->attr.config); | ||
2118 | if (event == NULL) | 2567 | if (event == NULL) |
2119 | return -1; | 2568 | return -1; |
2120 | 2569 | ||
2121 | snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); | 2570 | if (!evsel->name) { |
2122 | evsel->name = strdup(bf); | 2571 | snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); |
2123 | if (event->name == NULL) | 2572 | evsel->name = strdup(bf); |
2124 | return -1; | 2573 | if (evsel->name == NULL) |
2574 | return -1; | ||
2575 | } | ||
2125 | 2576 | ||
2577 | evsel->tp_format = event; | ||
2126 | return 0; | 2578 | return 0; |
2127 | } | 2579 | } |
2128 | 2580 | ||
2129 | static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist, | 2581 | static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, |
2130 | struct pevent *pevent) | 2582 | struct pevent *pevent) |
2131 | { | 2583 | { |
2132 | struct perf_evsel *pos; | 2584 | struct perf_evsel *pos; |
2133 | 2585 | ||
2134 | list_for_each_entry(pos, &evlist->entries, node) { | 2586 | list_for_each_entry(pos, &evlist->entries, node) { |
2135 | if (pos->attr.type == PERF_TYPE_TRACEPOINT && | 2587 | if (pos->attr.type == PERF_TYPE_TRACEPOINT && |
2136 | perf_evsel__set_tracepoint_name(pos, pevent)) | 2588 | perf_evsel__prepare_tracepoint_event(pos, pevent)) |
2137 | return -1; | 2589 | return -1; |
2138 | } | 2590 | } |
2139 | 2591 | ||
@@ -2176,6 +2628,8 @@ int perf_session__read_header(struct perf_session *session, int fd) | |||
2176 | 2628 | ||
2177 | if (evsel == NULL) | 2629 | if (evsel == NULL) |
2178 | goto out_delete_evlist; | 2630 | goto out_delete_evlist; |
2631 | |||
2632 | evsel->needs_swap = header->needs_swap; | ||
2179 | /* | 2633 | /* |
2180 | * Do it before so that if perf_evsel__alloc_id fails, this | 2634 | * Do it before so that if perf_evsel__alloc_id fails, this |
2181 | * entry gets purged too at perf_evlist__delete(). | 2635 | * entry gets purged too at perf_evlist__delete(). |
@@ -2207,13 +2661,13 @@ int perf_session__read_header(struct perf_session *session, int fd) | |||
2207 | 2661 | ||
2208 | if (f_header.event_types.size) { | 2662 | if (f_header.event_types.size) { |
2209 | lseek(fd, f_header.event_types.offset, SEEK_SET); | 2663 | lseek(fd, f_header.event_types.offset, SEEK_SET); |
2210 | events = malloc(f_header.event_types.size); | 2664 | trace_events = malloc(f_header.event_types.size); |
2211 | if (events == NULL) | 2665 | if (trace_events == NULL) |
2212 | return -ENOMEM; | 2666 | return -ENOMEM; |
2213 | if (perf_header__getbuffer64(header, fd, events, | 2667 | if (perf_header__getbuffer64(header, fd, trace_events, |
2214 | f_header.event_types.size)) | 2668 | f_header.event_types.size)) |
2215 | goto out_errno; | 2669 | goto out_errno; |
2216 | event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); | 2670 | trace_event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); |
2217 | } | 2671 | } |
2218 | 2672 | ||
2219 | perf_header__process_sections(header, fd, &session->pevent, | 2673 | perf_header__process_sections(header, fd, &session->pevent, |
@@ -2221,7 +2675,8 @@ int perf_session__read_header(struct perf_session *session, int fd) | |||
2221 | 2675 | ||
2222 | lseek(fd, header->data_offset, SEEK_SET); | 2676 | lseek(fd, header->data_offset, SEEK_SET); |
2223 | 2677 | ||
2224 | if (perf_evlist__set_tracepoint_names(session->evlist, session->pevent)) | 2678 | if (perf_evlist__prepare_tracepoint_events(session->evlist, |
2679 | session->pevent)) | ||
2225 | goto out_delete_evlist; | 2680 | goto out_delete_evlist; |
2226 | 2681 | ||
2227 | header->frozen = 1; | 2682 | header->frozen = 1; |
@@ -2236,7 +2691,7 @@ out_delete_evlist: | |||
2236 | } | 2691 | } |
2237 | 2692 | ||
2238 | int perf_event__synthesize_attr(struct perf_tool *tool, | 2693 | int perf_event__synthesize_attr(struct perf_tool *tool, |
2239 | struct perf_event_attr *attr, u16 ids, u64 *id, | 2694 | struct perf_event_attr *attr, u32 ids, u64 *id, |
2240 | perf_event__handler_t process) | 2695 | perf_event__handler_t process) |
2241 | { | 2696 | { |
2242 | union perf_event *ev; | 2697 | union perf_event *ev; |
@@ -2244,7 +2699,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool, | |||
2244 | int err; | 2699 | int err; |
2245 | 2700 | ||
2246 | size = sizeof(struct perf_event_attr); | 2701 | size = sizeof(struct perf_event_attr); |
2247 | size = ALIGN(size, sizeof(u64)); | 2702 | size = PERF_ALIGN(size, sizeof(u64)); |
2248 | size += sizeof(struct perf_event_header); | 2703 | size += sizeof(struct perf_event_header); |
2249 | size += ids * sizeof(u64); | 2704 | size += ids * sizeof(u64); |
2250 | 2705 | ||
@@ -2257,9 +2712,12 @@ int perf_event__synthesize_attr(struct perf_tool *tool, | |||
2257 | memcpy(ev->attr.id, id, ids * sizeof(u64)); | 2712 | memcpy(ev->attr.id, id, ids * sizeof(u64)); |
2258 | 2713 | ||
2259 | ev->attr.header.type = PERF_RECORD_HEADER_ATTR; | 2714 | ev->attr.header.type = PERF_RECORD_HEADER_ATTR; |
2260 | ev->attr.header.size = size; | 2715 | ev->attr.header.size = (u16)size; |
2261 | 2716 | ||
2262 | err = process(tool, ev, NULL, NULL); | 2717 | if (ev->attr.header.size == size) |
2718 | err = process(tool, ev, NULL, NULL); | ||
2719 | else | ||
2720 | err = -E2BIG; | ||
2263 | 2721 | ||
2264 | free(ev); | 2722 | free(ev); |
2265 | 2723 | ||
@@ -2270,12 +2728,12 @@ int perf_event__synthesize_attrs(struct perf_tool *tool, | |||
2270 | struct perf_session *session, | 2728 | struct perf_session *session, |
2271 | perf_event__handler_t process) | 2729 | perf_event__handler_t process) |
2272 | { | 2730 | { |
2273 | struct perf_evsel *attr; | 2731 | struct perf_evsel *evsel; |
2274 | int err = 0; | 2732 | int err = 0; |
2275 | 2733 | ||
2276 | list_for_each_entry(attr, &session->evlist->entries, node) { | 2734 | list_for_each_entry(evsel, &session->evlist->entries, node) { |
2277 | err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids, | 2735 | err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, |
2278 | attr->id, process); | 2736 | evsel->id, process); |
2279 | if (err) { | 2737 | if (err) { |
2280 | pr_debug("failed to create perf header attribute\n"); | 2738 | pr_debug("failed to create perf header attribute\n"); |
2281 | return err; | 2739 | return err; |
@@ -2288,7 +2746,7 @@ int perf_event__synthesize_attrs(struct perf_tool *tool, | |||
2288 | int perf_event__process_attr(union perf_event *event, | 2746 | int perf_event__process_attr(union perf_event *event, |
2289 | struct perf_evlist **pevlist) | 2747 | struct perf_evlist **pevlist) |
2290 | { | 2748 | { |
2291 | unsigned int i, ids, n_ids; | 2749 | u32 i, ids, n_ids; |
2292 | struct perf_evsel *evsel; | 2750 | struct perf_evsel *evsel; |
2293 | struct perf_evlist *evlist = *pevlist; | 2751 | struct perf_evlist *evlist = *pevlist; |
2294 | 2752 | ||
@@ -2339,7 +2797,7 @@ int perf_event__synthesize_event_type(struct perf_tool *tool, | |||
2339 | 2797 | ||
2340 | ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; | 2798 | ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; |
2341 | size = strlen(ev.event_type.event_type.name); | 2799 | size = strlen(ev.event_type.event_type.name); |
2342 | size = ALIGN(size, sizeof(u64)); | 2800 | size = PERF_ALIGN(size, sizeof(u64)); |
2343 | ev.event_type.header.size = sizeof(ev.event_type) - | 2801 | ev.event_type.header.size = sizeof(ev.event_type) - |
2344 | (sizeof(ev.event_type.event_type.name) - size); | 2802 | (sizeof(ev.event_type.event_type.name) - size); |
2345 | 2803 | ||
@@ -2355,8 +2813,8 @@ int perf_event__synthesize_event_types(struct perf_tool *tool, | |||
2355 | struct perf_trace_event_type *type; | 2813 | struct perf_trace_event_type *type; |
2356 | int i, err = 0; | 2814 | int i, err = 0; |
2357 | 2815 | ||
2358 | for (i = 0; i < event_count; i++) { | 2816 | for (i = 0; i < trace_event_count; i++) { |
2359 | type = &events[i]; | 2817 | type = &trace_events[i]; |
2360 | 2818 | ||
2361 | err = perf_event__synthesize_event_type(tool, type->event_id, | 2819 | err = perf_event__synthesize_event_type(tool, type->event_id, |
2362 | type->name, process, | 2820 | type->name, process, |
@@ -2370,7 +2828,7 @@ int perf_event__synthesize_event_types(struct perf_tool *tool, | |||
2370 | return err; | 2828 | return err; |
2371 | } | 2829 | } |
2372 | 2830 | ||
2373 | int perf_event__process_event_type(struct perf_tool *tool __unused, | 2831 | int perf_event__process_event_type(struct perf_tool *tool __maybe_unused, |
2374 | union perf_event *event) | 2832 | union perf_event *event) |
2375 | { | 2833 | { |
2376 | if (perf_header__push_event(event->event_type.event_type.event_id, | 2834 | if (perf_header__push_event(event->event_type.event_type.event_id, |
@@ -2387,7 +2845,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, | |||
2387 | union perf_event ev; | 2845 | union perf_event ev; |
2388 | struct tracing_data *tdata; | 2846 | struct tracing_data *tdata; |
2389 | ssize_t size = 0, aligned_size = 0, padding; | 2847 | ssize_t size = 0, aligned_size = 0, padding; |
2390 | int err __used = 0; | 2848 | int err __maybe_unused = 0; |
2391 | 2849 | ||
2392 | /* | 2850 | /* |
2393 | * We are going to store the size of the data followed | 2851 | * We are going to store the size of the data followed |
@@ -2408,7 +2866,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, | |||
2408 | 2866 | ||
2409 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; | 2867 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; |
2410 | size = tdata->size; | 2868 | size = tdata->size; |
2411 | aligned_size = ALIGN(size, sizeof(u64)); | 2869 | aligned_size = PERF_ALIGN(size, sizeof(u64)); |
2412 | padding = aligned_size - size; | 2870 | padding = aligned_size - size; |
2413 | ev.tracing_data.header.size = sizeof(ev.tracing_data); | 2871 | ev.tracing_data.header.size = sizeof(ev.tracing_data); |
2414 | ev.tracing_data.size = aligned_size; | 2872 | ev.tracing_data.size = aligned_size; |
@@ -2439,7 +2897,7 @@ int perf_event__process_tracing_data(union perf_event *event, | |||
2439 | 2897 | ||
2440 | size_read = trace_report(session->fd, &session->pevent, | 2898 | size_read = trace_report(session->fd, &session->pevent, |
2441 | session->repipe); | 2899 | session->repipe); |
2442 | padding = ALIGN(size_read, sizeof(u64)) - size_read; | 2900 | padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; |
2443 | 2901 | ||
2444 | if (read(session->fd, buf, padding) < 0) | 2902 | if (read(session->fd, buf, padding) < 0) |
2445 | die("reading input file"); | 2903 | die("reading input file"); |
@@ -2452,6 +2910,9 @@ int perf_event__process_tracing_data(union perf_event *event, | |||
2452 | if (size_read + padding != size) | 2910 | if (size_read + padding != size) |
2453 | die("tracing data size mismatch"); | 2911 | die("tracing data size mismatch"); |
2454 | 2912 | ||
2913 | perf_evlist__prepare_tracepoint_events(session->evlist, | ||
2914 | session->pevent); | ||
2915 | |||
2455 | return size_read + padding; | 2916 | return size_read + padding; |
2456 | } | 2917 | } |
2457 | 2918 | ||
@@ -2470,7 +2931,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, | |||
2470 | memset(&ev, 0, sizeof(ev)); | 2931 | memset(&ev, 0, sizeof(ev)); |
2471 | 2932 | ||
2472 | len = pos->long_name_len + 1; | 2933 | len = pos->long_name_len + 1; |
2473 | len = ALIGN(len, NAME_ALIGN); | 2934 | len = PERF_ALIGN(len, NAME_ALIGN); |
2474 | memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); | 2935 | memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); |
2475 | ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; | 2936 | ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; |
2476 | ev.build_id.header.misc = misc; | 2937 | ev.build_id.header.misc = misc; |
@@ -2483,7 +2944,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, | |||
2483 | return err; | 2944 | return err; |
2484 | } | 2945 | } |
2485 | 2946 | ||
2486 | int perf_event__process_build_id(struct perf_tool *tool __used, | 2947 | int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, |
2487 | union perf_event *event, | 2948 | union perf_event *event, |
2488 | struct perf_session *session) | 2949 | struct perf_session *session) |
2489 | { | 2950 | { |
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 2d42b3e1826f..99bdd3abce59 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h | |||
@@ -28,6 +28,7 @@ enum { | |||
28 | HEADER_CPU_TOPOLOGY, | 28 | HEADER_CPU_TOPOLOGY, |
29 | HEADER_NUMA_TOPOLOGY, | 29 | HEADER_NUMA_TOPOLOGY, |
30 | HEADER_BRANCH_STACK, | 30 | HEADER_BRANCH_STACK, |
31 | HEADER_PMU_MAPPINGS, | ||
31 | HEADER_LAST_FEATURE, | 32 | HEADER_LAST_FEATURE, |
32 | HEADER_FEAT_BITS = 256, | 33 | HEADER_FEAT_BITS = 256, |
33 | }; | 34 | }; |
@@ -57,6 +58,29 @@ struct perf_header; | |||
57 | int perf_file_header__read(struct perf_file_header *header, | 58 | int perf_file_header__read(struct perf_file_header *header, |
58 | struct perf_header *ph, int fd); | 59 | struct perf_header *ph, int fd); |
59 | 60 | ||
61 | struct perf_session_env { | ||
62 | char *hostname; | ||
63 | char *os_release; | ||
64 | char *version; | ||
65 | char *arch; | ||
66 | int nr_cpus_online; | ||
67 | int nr_cpus_avail; | ||
68 | char *cpu_desc; | ||
69 | char *cpuid; | ||
70 | unsigned long long total_mem; | ||
71 | |||
72 | int nr_cmdline; | ||
73 | char *cmdline; | ||
74 | int nr_sibling_cores; | ||
75 | char *sibling_cores; | ||
76 | int nr_sibling_threads; | ||
77 | char *sibling_threads; | ||
78 | int nr_numa_nodes; | ||
79 | char *numa_nodes; | ||
80 | int nr_pmu_mappings; | ||
81 | char *pmu_mappings; | ||
82 | }; | ||
83 | |||
60 | struct perf_header { | 84 | struct perf_header { |
61 | int frozen; | 85 | int frozen; |
62 | bool needs_swap; | 86 | bool needs_swap; |
@@ -66,6 +90,7 @@ struct perf_header { | |||
66 | u64 event_offset; | 90 | u64 event_offset; |
67 | u64 event_size; | 91 | u64 event_size; |
68 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); | 92 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); |
93 | struct perf_session_env env; | ||
69 | }; | 94 | }; |
70 | 95 | ||
71 | struct perf_evlist; | 96 | struct perf_evlist; |
@@ -95,11 +120,11 @@ int perf_header__process_sections(struct perf_header *header, int fd, | |||
95 | int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); | 120 | int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); |
96 | 121 | ||
97 | int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | 122 | int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, |
98 | const char *name, bool is_kallsyms); | 123 | const char *name, bool is_kallsyms, bool is_vdso); |
99 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); | 124 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); |
100 | 125 | ||
101 | int perf_event__synthesize_attr(struct perf_tool *tool, | 126 | int perf_event__synthesize_attr(struct perf_tool *tool, |
102 | struct perf_event_attr *attr, u16 ids, u64 *id, | 127 | struct perf_event_attr *attr, u32 ids, u64 *id, |
103 | perf_event__handler_t process); | 128 | perf_event__handler_t process); |
104 | int perf_event__synthesize_attrs(struct perf_tool *tool, | 129 | int perf_event__synthesize_attrs(struct perf_tool *tool, |
105 | struct perf_session *session, | 130 | struct perf_session *session, |
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c index 6f2975a00358..8b1f6e891b8a 100644 --- a/tools/perf/util/help.c +++ b/tools/perf/util/help.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include "exec_cmd.h" | 3 | #include "exec_cmd.h" |
4 | #include "levenshtein.h" | 4 | #include "levenshtein.h" |
5 | #include "help.h" | 5 | #include "help.h" |
6 | #include <termios.h> | ||
6 | 7 | ||
7 | void add_cmdname(struct cmdnames *cmds, const char *name, size_t len) | 8 | void add_cmdname(struct cmdnames *cmds, const char *name, size_t len) |
8 | { | 9 | { |
@@ -331,7 +332,8 @@ const char *help_unknown_cmd(const char *cmd) | |||
331 | exit(1); | 332 | exit(1); |
332 | } | 333 | } |
333 | 334 | ||
334 | int cmd_version(int argc __used, const char **argv __used, const char *prefix __used) | 335 | int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused, |
336 | const char *prefix __maybe_unused) | ||
335 | { | 337 | { |
336 | printf("perf version %s\n", perf_version_string); | 338 | printf("perf version %s\n", perf_version_string); |
337 | return 0; | 339 | return 0; |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index f247ef2789a4..236bc9d98ff2 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -45,7 +45,7 @@ bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) | |||
45 | return false; | 45 | return false; |
46 | } | 46 | } |
47 | 47 | ||
48 | static void hists__reset_col_len(struct hists *hists) | 48 | void hists__reset_col_len(struct hists *hists) |
49 | { | 49 | { |
50 | enum hist_column col; | 50 | enum hist_column col; |
51 | 51 | ||
@@ -63,7 +63,7 @@ static void hists__set_unres_dso_col_len(struct hists *hists, int dso) | |||
63 | hists__set_col_len(hists, dso, unresolved_col_width); | 63 | hists__set_col_len(hists, dso, unresolved_col_width); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) | 66 | void hists__calc_col_len(struct hists *hists, struct hist_entry *h) |
67 | { | 67 | { |
68 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | 68 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; |
69 | u16 len; | 69 | u16 len; |
@@ -114,6 +114,22 @@ static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) | |||
114 | } | 114 | } |
115 | } | 115 | } |
116 | 116 | ||
117 | void hists__output_recalc_col_len(struct hists *hists, int max_rows) | ||
118 | { | ||
119 | struct rb_node *next = rb_first(&hists->entries); | ||
120 | struct hist_entry *n; | ||
121 | int row = 0; | ||
122 | |||
123 | hists__reset_col_len(hists); | ||
124 | |||
125 | while (next && row++ < max_rows) { | ||
126 | n = rb_entry(next, struct hist_entry, rb_node); | ||
127 | if (!n->filtered) | ||
128 | hists__calc_col_len(hists, n); | ||
129 | next = rb_next(&n->rb_node); | ||
130 | } | ||
131 | } | ||
132 | |||
117 | static void hist_entry__add_cpumode_period(struct hist_entry *he, | 133 | static void hist_entry__add_cpumode_period(struct hist_entry *he, |
118 | unsigned int cpumode, u64 period) | 134 | unsigned int cpumode, u64 period) |
119 | { | 135 | { |
@@ -378,7 +394,7 @@ void hist_entry__free(struct hist_entry *he) | |||
378 | * collapse the histogram | 394 | * collapse the histogram |
379 | */ | 395 | */ |
380 | 396 | ||
381 | static bool hists__collapse_insert_entry(struct hists *hists __used, | 397 | static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, |
382 | struct rb_root *root, | 398 | struct rb_root *root, |
383 | struct hist_entry *he) | 399 | struct hist_entry *he) |
384 | { | 400 | { |
@@ -394,8 +410,13 @@ static bool hists__collapse_insert_entry(struct hists *hists __used, | |||
394 | cmp = hist_entry__collapse(iter, he); | 410 | cmp = hist_entry__collapse(iter, he); |
395 | 411 | ||
396 | if (!cmp) { | 412 | if (!cmp) { |
397 | iter->period += he->period; | 413 | iter->period += he->period; |
398 | iter->nr_events += he->nr_events; | 414 | iter->period_sys += he->period_sys; |
415 | iter->period_us += he->period_us; | ||
416 | iter->period_guest_sys += he->period_guest_sys; | ||
417 | iter->period_guest_us += he->period_guest_us; | ||
418 | iter->nr_events += he->nr_events; | ||
419 | |||
399 | if (symbol_conf.use_callchain) { | 420 | if (symbol_conf.use_callchain) { |
400 | callchain_cursor_reset(&callchain_cursor); | 421 | callchain_cursor_reset(&callchain_cursor); |
401 | callchain_merge(&callchain_cursor, | 422 | callchain_merge(&callchain_cursor, |
@@ -547,674 +568,6 @@ void hists__output_resort_threaded(struct hists *hists) | |||
547 | return __hists__output_resort(hists, true); | 568 | return __hists__output_resort(hists, true); |
548 | } | 569 | } |
549 | 570 | ||
550 | static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) | ||
551 | { | ||
552 | int i; | ||
553 | int ret = fprintf(fp, " "); | ||
554 | |||
555 | for (i = 0; i < left_margin; i++) | ||
556 | ret += fprintf(fp, " "); | ||
557 | |||
558 | return ret; | ||
559 | } | ||
560 | |||
561 | static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, | ||
562 | int left_margin) | ||
563 | { | ||
564 | int i; | ||
565 | size_t ret = callchain__fprintf_left_margin(fp, left_margin); | ||
566 | |||
567 | for (i = 0; i < depth; i++) | ||
568 | if (depth_mask & (1 << i)) | ||
569 | ret += fprintf(fp, "| "); | ||
570 | else | ||
571 | ret += fprintf(fp, " "); | ||
572 | |||
573 | ret += fprintf(fp, "\n"); | ||
574 | |||
575 | return ret; | ||
576 | } | ||
577 | |||
578 | static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, | ||
579 | int depth, int depth_mask, int period, | ||
580 | u64 total_samples, u64 hits, | ||
581 | int left_margin) | ||
582 | { | ||
583 | int i; | ||
584 | size_t ret = 0; | ||
585 | |||
586 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
587 | for (i = 0; i < depth; i++) { | ||
588 | if (depth_mask & (1 << i)) | ||
589 | ret += fprintf(fp, "|"); | ||
590 | else | ||
591 | ret += fprintf(fp, " "); | ||
592 | if (!period && i == depth - 1) { | ||
593 | double percent; | ||
594 | |||
595 | percent = hits * 100.0 / total_samples; | ||
596 | ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); | ||
597 | } else | ||
598 | ret += fprintf(fp, "%s", " "); | ||
599 | } | ||
600 | if (chain->ms.sym) | ||
601 | ret += fprintf(fp, "%s\n", chain->ms.sym->name); | ||
602 | else | ||
603 | ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip); | ||
604 | |||
605 | return ret; | ||
606 | } | ||
607 | |||
608 | static struct symbol *rem_sq_bracket; | ||
609 | static struct callchain_list rem_hits; | ||
610 | |||
611 | static void init_rem_hits(void) | ||
612 | { | ||
613 | rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); | ||
614 | if (!rem_sq_bracket) { | ||
615 | fprintf(stderr, "Not enough memory to display remaining hits\n"); | ||
616 | return; | ||
617 | } | ||
618 | |||
619 | strcpy(rem_sq_bracket->name, "[...]"); | ||
620 | rem_hits.ms.sym = rem_sq_bracket; | ||
621 | } | ||
622 | |||
623 | static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, | ||
624 | u64 total_samples, int depth, | ||
625 | int depth_mask, int left_margin) | ||
626 | { | ||
627 | struct rb_node *node, *next; | ||
628 | struct callchain_node *child; | ||
629 | struct callchain_list *chain; | ||
630 | int new_depth_mask = depth_mask; | ||
631 | u64 remaining; | ||
632 | size_t ret = 0; | ||
633 | int i; | ||
634 | uint entries_printed = 0; | ||
635 | |||
636 | remaining = total_samples; | ||
637 | |||
638 | node = rb_first(root); | ||
639 | while (node) { | ||
640 | u64 new_total; | ||
641 | u64 cumul; | ||
642 | |||
643 | child = rb_entry(node, struct callchain_node, rb_node); | ||
644 | cumul = callchain_cumul_hits(child); | ||
645 | remaining -= cumul; | ||
646 | |||
647 | /* | ||
648 | * The depth mask manages the output of pipes that show | ||
649 | * the depth. We don't want to keep the pipes of the current | ||
650 | * level for the last child of this depth. | ||
651 | * Except if we have remaining filtered hits. They will | ||
652 | * supersede the last child | ||
653 | */ | ||
654 | next = rb_next(node); | ||
655 | if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) | ||
656 | new_depth_mask &= ~(1 << (depth - 1)); | ||
657 | |||
658 | /* | ||
659 | * But we keep the older depth mask for the line separator | ||
660 | * to keep the level link until we reach the last child | ||
661 | */ | ||
662 | ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, | ||
663 | left_margin); | ||
664 | i = 0; | ||
665 | list_for_each_entry(chain, &child->val, list) { | ||
666 | ret += ipchain__fprintf_graph(fp, chain, depth, | ||
667 | new_depth_mask, i++, | ||
668 | total_samples, | ||
669 | cumul, | ||
670 | left_margin); | ||
671 | } | ||
672 | |||
673 | if (callchain_param.mode == CHAIN_GRAPH_REL) | ||
674 | new_total = child->children_hit; | ||
675 | else | ||
676 | new_total = total_samples; | ||
677 | |||
678 | ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total, | ||
679 | depth + 1, | ||
680 | new_depth_mask | (1 << depth), | ||
681 | left_margin); | ||
682 | node = next; | ||
683 | if (++entries_printed == callchain_param.print_limit) | ||
684 | break; | ||
685 | } | ||
686 | |||
687 | if (callchain_param.mode == CHAIN_GRAPH_REL && | ||
688 | remaining && remaining != total_samples) { | ||
689 | |||
690 | if (!rem_sq_bracket) | ||
691 | return ret; | ||
692 | |||
693 | new_depth_mask &= ~(1 << (depth - 1)); | ||
694 | ret += ipchain__fprintf_graph(fp, &rem_hits, depth, | ||
695 | new_depth_mask, 0, total_samples, | ||
696 | remaining, left_margin); | ||
697 | } | ||
698 | |||
699 | return ret; | ||
700 | } | ||
701 | |||
702 | static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, | ||
703 | u64 total_samples, int left_margin) | ||
704 | { | ||
705 | struct callchain_node *cnode; | ||
706 | struct callchain_list *chain; | ||
707 | u32 entries_printed = 0; | ||
708 | bool printed = false; | ||
709 | struct rb_node *node; | ||
710 | int i = 0; | ||
711 | int ret = 0; | ||
712 | |||
713 | /* | ||
714 | * If have one single callchain root, don't bother printing | ||
715 | * its percentage (100 % in fractal mode and the same percentage | ||
716 | * than the hist in graph mode). This also avoid one level of column. | ||
717 | */ | ||
718 | node = rb_first(root); | ||
719 | if (node && !rb_next(node)) { | ||
720 | cnode = rb_entry(node, struct callchain_node, rb_node); | ||
721 | list_for_each_entry(chain, &cnode->val, list) { | ||
722 | /* | ||
723 | * If we sort by symbol, the first entry is the same than | ||
724 | * the symbol. No need to print it otherwise it appears as | ||
725 | * displayed twice. | ||
726 | */ | ||
727 | if (!i++ && sort__first_dimension == SORT_SYM) | ||
728 | continue; | ||
729 | if (!printed) { | ||
730 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
731 | ret += fprintf(fp, "|\n"); | ||
732 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
733 | ret += fprintf(fp, "---"); | ||
734 | left_margin += 3; | ||
735 | printed = true; | ||
736 | } else | ||
737 | ret += callchain__fprintf_left_margin(fp, left_margin); | ||
738 | |||
739 | if (chain->ms.sym) | ||
740 | ret += fprintf(fp, " %s\n", chain->ms.sym->name); | ||
741 | else | ||
742 | ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); | ||
743 | |||
744 | if (++entries_printed == callchain_param.print_limit) | ||
745 | break; | ||
746 | } | ||
747 | root = &cnode->rb_root; | ||
748 | } | ||
749 | |||
750 | ret += __callchain__fprintf_graph(fp, root, total_samples, | ||
751 | 1, 1, left_margin); | ||
752 | ret += fprintf(fp, "\n"); | ||
753 | |||
754 | return ret; | ||
755 | } | ||
756 | |||
757 | static size_t __callchain__fprintf_flat(FILE *fp, | ||
758 | struct callchain_node *self, | ||
759 | u64 total_samples) | ||
760 | { | ||
761 | struct callchain_list *chain; | ||
762 | size_t ret = 0; | ||
763 | |||
764 | if (!self) | ||
765 | return 0; | ||
766 | |||
767 | ret += __callchain__fprintf_flat(fp, self->parent, total_samples); | ||
768 | |||
769 | |||
770 | list_for_each_entry(chain, &self->val, list) { | ||
771 | if (chain->ip >= PERF_CONTEXT_MAX) | ||
772 | continue; | ||
773 | if (chain->ms.sym) | ||
774 | ret += fprintf(fp, " %s\n", chain->ms.sym->name); | ||
775 | else | ||
776 | ret += fprintf(fp, " %p\n", | ||
777 | (void *)(long)chain->ip); | ||
778 | } | ||
779 | |||
780 | return ret; | ||
781 | } | ||
782 | |||
783 | static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self, | ||
784 | u64 total_samples) | ||
785 | { | ||
786 | size_t ret = 0; | ||
787 | u32 entries_printed = 0; | ||
788 | struct rb_node *rb_node; | ||
789 | struct callchain_node *chain; | ||
790 | |||
791 | rb_node = rb_first(self); | ||
792 | while (rb_node) { | ||
793 | double percent; | ||
794 | |||
795 | chain = rb_entry(rb_node, struct callchain_node, rb_node); | ||
796 | percent = chain->hit * 100.0 / total_samples; | ||
797 | |||
798 | ret = percent_color_fprintf(fp, " %6.2f%%\n", percent); | ||
799 | ret += __callchain__fprintf_flat(fp, chain, total_samples); | ||
800 | ret += fprintf(fp, "\n"); | ||
801 | if (++entries_printed == callchain_param.print_limit) | ||
802 | break; | ||
803 | |||
804 | rb_node = rb_next(rb_node); | ||
805 | } | ||
806 | |||
807 | return ret; | ||
808 | } | ||
809 | |||
810 | static size_t hist_entry_callchain__fprintf(struct hist_entry *he, | ||
811 | u64 total_samples, int left_margin, | ||
812 | FILE *fp) | ||
813 | { | ||
814 | switch (callchain_param.mode) { | ||
815 | case CHAIN_GRAPH_REL: | ||
816 | return callchain__fprintf_graph(fp, &he->sorted_chain, he->period, | ||
817 | left_margin); | ||
818 | break; | ||
819 | case CHAIN_GRAPH_ABS: | ||
820 | return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, | ||
821 | left_margin); | ||
822 | break; | ||
823 | case CHAIN_FLAT: | ||
824 | return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); | ||
825 | break; | ||
826 | case CHAIN_NONE: | ||
827 | break; | ||
828 | default: | ||
829 | pr_err("Bad callchain mode\n"); | ||
830 | } | ||
831 | |||
832 | return 0; | ||
833 | } | ||
834 | |||
835 | void hists__output_recalc_col_len(struct hists *hists, int max_rows) | ||
836 | { | ||
837 | struct rb_node *next = rb_first(&hists->entries); | ||
838 | struct hist_entry *n; | ||
839 | int row = 0; | ||
840 | |||
841 | hists__reset_col_len(hists); | ||
842 | |||
843 | while (next && row++ < max_rows) { | ||
844 | n = rb_entry(next, struct hist_entry, rb_node); | ||
845 | if (!n->filtered) | ||
846 | hists__calc_col_len(hists, n); | ||
847 | next = rb_next(&n->rb_node); | ||
848 | } | ||
849 | } | ||
850 | |||
851 | static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s, | ||
852 | size_t size, struct hists *pair_hists, | ||
853 | bool show_displacement, long displacement, | ||
854 | bool color, u64 total_period) | ||
855 | { | ||
856 | u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; | ||
857 | u64 nr_events; | ||
858 | const char *sep = symbol_conf.field_sep; | ||
859 | int ret; | ||
860 | |||
861 | if (symbol_conf.exclude_other && !he->parent) | ||
862 | return 0; | ||
863 | |||
864 | if (pair_hists) { | ||
865 | period = he->pair ? he->pair->period : 0; | ||
866 | nr_events = he->pair ? he->pair->nr_events : 0; | ||
867 | total = pair_hists->stats.total_period; | ||
868 | period_sys = he->pair ? he->pair->period_sys : 0; | ||
869 | period_us = he->pair ? he->pair->period_us : 0; | ||
870 | period_guest_sys = he->pair ? he->pair->period_guest_sys : 0; | ||
871 | period_guest_us = he->pair ? he->pair->period_guest_us : 0; | ||
872 | } else { | ||
873 | period = he->period; | ||
874 | nr_events = he->nr_events; | ||
875 | total = total_period; | ||
876 | period_sys = he->period_sys; | ||
877 | period_us = he->period_us; | ||
878 | period_guest_sys = he->period_guest_sys; | ||
879 | period_guest_us = he->period_guest_us; | ||
880 | } | ||
881 | |||
882 | if (total) { | ||
883 | if (color) | ||
884 | ret = percent_color_snprintf(s, size, | ||
885 | sep ? "%.2f" : " %6.2f%%", | ||
886 | (period * 100.0) / total); | ||
887 | else | ||
888 | ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%", | ||
889 | (period * 100.0) / total); | ||
890 | if (symbol_conf.show_cpu_utilization) { | ||
891 | ret += percent_color_snprintf(s + ret, size - ret, | ||
892 | sep ? "%.2f" : " %6.2f%%", | ||
893 | (period_sys * 100.0) / total); | ||
894 | ret += percent_color_snprintf(s + ret, size - ret, | ||
895 | sep ? "%.2f" : " %6.2f%%", | ||
896 | (period_us * 100.0) / total); | ||
897 | if (perf_guest) { | ||
898 | ret += percent_color_snprintf(s + ret, | ||
899 | size - ret, | ||
900 | sep ? "%.2f" : " %6.2f%%", | ||
901 | (period_guest_sys * 100.0) / | ||
902 | total); | ||
903 | ret += percent_color_snprintf(s + ret, | ||
904 | size - ret, | ||
905 | sep ? "%.2f" : " %6.2f%%", | ||
906 | (period_guest_us * 100.0) / | ||
907 | total); | ||
908 | } | ||
909 | } | ||
910 | } else | ||
911 | ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); | ||
912 | |||
913 | if (symbol_conf.show_nr_samples) { | ||
914 | if (sep) | ||
915 | ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); | ||
916 | else | ||
917 | ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events); | ||
918 | } | ||
919 | |||
920 | if (symbol_conf.show_total_period) { | ||
921 | if (sep) | ||
922 | ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); | ||
923 | else | ||
924 | ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period); | ||
925 | } | ||
926 | |||
927 | if (pair_hists) { | ||
928 | char bf[32]; | ||
929 | double old_percent = 0, new_percent = 0, diff; | ||
930 | |||
931 | if (total > 0) | ||
932 | old_percent = (period * 100.0) / total; | ||
933 | if (total_period > 0) | ||
934 | new_percent = (he->period * 100.0) / total_period; | ||
935 | |||
936 | diff = new_percent - old_percent; | ||
937 | |||
938 | if (fabs(diff) >= 0.01) | ||
939 | scnprintf(bf, sizeof(bf), "%+4.2F%%", diff); | ||
940 | else | ||
941 | scnprintf(bf, sizeof(bf), " "); | ||
942 | |||
943 | if (sep) | ||
944 | ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); | ||
945 | else | ||
946 | ret += scnprintf(s + ret, size - ret, "%11.11s", bf); | ||
947 | |||
948 | if (show_displacement) { | ||
949 | if (displacement) | ||
950 | scnprintf(bf, sizeof(bf), "%+4ld", displacement); | ||
951 | else | ||
952 | scnprintf(bf, sizeof(bf), " "); | ||
953 | |||
954 | if (sep) | ||
955 | ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); | ||
956 | else | ||
957 | ret += scnprintf(s + ret, size - ret, "%6.6s", bf); | ||
958 | } | ||
959 | } | ||
960 | |||
961 | return ret; | ||
962 | } | ||
963 | |||
964 | int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size, | ||
965 | struct hists *hists) | ||
966 | { | ||
967 | const char *sep = symbol_conf.field_sep; | ||
968 | struct sort_entry *se; | ||
969 | int ret = 0; | ||
970 | |||
971 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
972 | if (se->elide) | ||
973 | continue; | ||
974 | |||
975 | ret += scnprintf(s + ret, size - ret, "%s", sep ?: " "); | ||
976 | ret += se->se_snprintf(he, s + ret, size - ret, | ||
977 | hists__col_len(hists, se->se_width_idx)); | ||
978 | } | ||
979 | |||
980 | return ret; | ||
981 | } | ||
982 | |||
983 | static int hist_entry__fprintf(struct hist_entry *he, size_t size, | ||
984 | struct hists *hists, struct hists *pair_hists, | ||
985 | bool show_displacement, long displacement, | ||
986 | u64 total_period, FILE *fp) | ||
987 | { | ||
988 | char bf[512]; | ||
989 | int ret; | ||
990 | |||
991 | if (size == 0 || size > sizeof(bf)) | ||
992 | size = sizeof(bf); | ||
993 | |||
994 | ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists, | ||
995 | show_displacement, displacement, | ||
996 | true, total_period); | ||
997 | hist_entry__snprintf(he, bf + ret, size - ret, hists); | ||
998 | return fprintf(fp, "%s\n", bf); | ||
999 | } | ||
1000 | |||
1001 | static size_t hist_entry__fprintf_callchain(struct hist_entry *he, | ||
1002 | struct hists *hists, | ||
1003 | u64 total_period, FILE *fp) | ||
1004 | { | ||
1005 | int left_margin = 0; | ||
1006 | |||
1007 | if (sort__first_dimension == SORT_COMM) { | ||
1008 | struct sort_entry *se = list_first_entry(&hist_entry__sort_list, | ||
1009 | typeof(*se), list); | ||
1010 | left_margin = hists__col_len(hists, se->se_width_idx); | ||
1011 | left_margin -= thread__comm_len(he->thread); | ||
1012 | } | ||
1013 | |||
1014 | return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); | ||
1015 | } | ||
1016 | |||
1017 | size_t hists__fprintf(struct hists *hists, struct hists *pair, | ||
1018 | bool show_displacement, bool show_header, int max_rows, | ||
1019 | int max_cols, FILE *fp) | ||
1020 | { | ||
1021 | struct sort_entry *se; | ||
1022 | struct rb_node *nd; | ||
1023 | size_t ret = 0; | ||
1024 | u64 total_period; | ||
1025 | unsigned long position = 1; | ||
1026 | long displacement = 0; | ||
1027 | unsigned int width; | ||
1028 | const char *sep = symbol_conf.field_sep; | ||
1029 | const char *col_width = symbol_conf.col_width_list_str; | ||
1030 | int nr_rows = 0; | ||
1031 | |||
1032 | init_rem_hits(); | ||
1033 | |||
1034 | if (!show_header) | ||
1035 | goto print_entries; | ||
1036 | |||
1037 | fprintf(fp, "# %s", pair ? "Baseline" : "Overhead"); | ||
1038 | |||
1039 | if (symbol_conf.show_cpu_utilization) { | ||
1040 | if (sep) { | ||
1041 | ret += fprintf(fp, "%csys", *sep); | ||
1042 | ret += fprintf(fp, "%cus", *sep); | ||
1043 | if (perf_guest) { | ||
1044 | ret += fprintf(fp, "%cguest sys", *sep); | ||
1045 | ret += fprintf(fp, "%cguest us", *sep); | ||
1046 | } | ||
1047 | } else { | ||
1048 | ret += fprintf(fp, " sys "); | ||
1049 | ret += fprintf(fp, " us "); | ||
1050 | if (perf_guest) { | ||
1051 | ret += fprintf(fp, " guest sys "); | ||
1052 | ret += fprintf(fp, " guest us "); | ||
1053 | } | ||
1054 | } | ||
1055 | } | ||
1056 | |||
1057 | if (symbol_conf.show_nr_samples) { | ||
1058 | if (sep) | ||
1059 | fprintf(fp, "%cSamples", *sep); | ||
1060 | else | ||
1061 | fputs(" Samples ", fp); | ||
1062 | } | ||
1063 | |||
1064 | if (symbol_conf.show_total_period) { | ||
1065 | if (sep) | ||
1066 | ret += fprintf(fp, "%cPeriod", *sep); | ||
1067 | else | ||
1068 | ret += fprintf(fp, " Period "); | ||
1069 | } | ||
1070 | |||
1071 | if (pair) { | ||
1072 | if (sep) | ||
1073 | ret += fprintf(fp, "%cDelta", *sep); | ||
1074 | else | ||
1075 | ret += fprintf(fp, " Delta "); | ||
1076 | |||
1077 | if (show_displacement) { | ||
1078 | if (sep) | ||
1079 | ret += fprintf(fp, "%cDisplacement", *sep); | ||
1080 | else | ||
1081 | ret += fprintf(fp, " Displ"); | ||
1082 | } | ||
1083 | } | ||
1084 | |||
1085 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
1086 | if (se->elide) | ||
1087 | continue; | ||
1088 | if (sep) { | ||
1089 | fprintf(fp, "%c%s", *sep, se->se_header); | ||
1090 | continue; | ||
1091 | } | ||
1092 | width = strlen(se->se_header); | ||
1093 | if (symbol_conf.col_width_list_str) { | ||
1094 | if (col_width) { | ||
1095 | hists__set_col_len(hists, se->se_width_idx, | ||
1096 | atoi(col_width)); | ||
1097 | col_width = strchr(col_width, ','); | ||
1098 | if (col_width) | ||
1099 | ++col_width; | ||
1100 | } | ||
1101 | } | ||
1102 | if (!hists__new_col_len(hists, se->se_width_idx, width)) | ||
1103 | width = hists__col_len(hists, se->se_width_idx); | ||
1104 | fprintf(fp, " %*s", width, se->se_header); | ||
1105 | } | ||
1106 | |||
1107 | fprintf(fp, "\n"); | ||
1108 | if (max_rows && ++nr_rows >= max_rows) | ||
1109 | goto out; | ||
1110 | |||
1111 | if (sep) | ||
1112 | goto print_entries; | ||
1113 | |||
1114 | fprintf(fp, "# ........"); | ||
1115 | if (symbol_conf.show_cpu_utilization) | ||
1116 | fprintf(fp, " ....... ......."); | ||
1117 | if (symbol_conf.show_nr_samples) | ||
1118 | fprintf(fp, " .........."); | ||
1119 | if (symbol_conf.show_total_period) | ||
1120 | fprintf(fp, " ............"); | ||
1121 | if (pair) { | ||
1122 | fprintf(fp, " .........."); | ||
1123 | if (show_displacement) | ||
1124 | fprintf(fp, " ....."); | ||
1125 | } | ||
1126 | list_for_each_entry(se, &hist_entry__sort_list, list) { | ||
1127 | unsigned int i; | ||
1128 | |||
1129 | if (se->elide) | ||
1130 | continue; | ||
1131 | |||
1132 | fprintf(fp, " "); | ||
1133 | width = hists__col_len(hists, se->se_width_idx); | ||
1134 | if (width == 0) | ||
1135 | width = strlen(se->se_header); | ||
1136 | for (i = 0; i < width; i++) | ||
1137 | fprintf(fp, "."); | ||
1138 | } | ||
1139 | |||
1140 | fprintf(fp, "\n"); | ||
1141 | if (max_rows && ++nr_rows >= max_rows) | ||
1142 | goto out; | ||
1143 | |||
1144 | fprintf(fp, "#\n"); | ||
1145 | if (max_rows && ++nr_rows >= max_rows) | ||
1146 | goto out; | ||
1147 | |||
1148 | print_entries: | ||
1149 | total_period = hists->stats.total_period; | ||
1150 | |||
1151 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { | ||
1152 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | ||
1153 | |||
1154 | if (h->filtered) | ||
1155 | continue; | ||
1156 | |||
1157 | if (show_displacement) { | ||
1158 | if (h->pair != NULL) | ||
1159 | displacement = ((long)h->pair->position - | ||
1160 | (long)position); | ||
1161 | else | ||
1162 | displacement = 0; | ||
1163 | ++position; | ||
1164 | } | ||
1165 | ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement, | ||
1166 | displacement, total_period, fp); | ||
1167 | |||
1168 | if (symbol_conf.use_callchain) | ||
1169 | ret += hist_entry__fprintf_callchain(h, hists, total_period, fp); | ||
1170 | if (max_rows && ++nr_rows >= max_rows) | ||
1171 | goto out; | ||
1172 | |||
1173 | if (h->ms.map == NULL && verbose > 1) { | ||
1174 | __map_groups__fprintf_maps(&h->thread->mg, | ||
1175 | MAP__FUNCTION, verbose, fp); | ||
1176 | fprintf(fp, "%.10s end\n", graph_dotted_line); | ||
1177 | } | ||
1178 | } | ||
1179 | out: | ||
1180 | free(rem_sq_bracket); | ||
1181 | |||
1182 | return ret; | ||
1183 | } | ||
1184 | |||
1185 | /* | ||
1186 | * See hists__fprintf to match the column widths | ||
1187 | */ | ||
1188 | unsigned int hists__sort_list_width(struct hists *hists) | ||
1189 | { | ||
1190 | struct sort_entry *se; | ||
1191 | int ret = 9; /* total % */ | ||
1192 | |||
1193 | if (symbol_conf.show_cpu_utilization) { | ||
1194 | ret += 7; /* count_sys % */ | ||
1195 | ret += 6; /* count_us % */ | ||
1196 | if (perf_guest) { | ||
1197 | ret += 13; /* count_guest_sys % */ | ||
1198 | ret += 12; /* count_guest_us % */ | ||
1199 | } | ||
1200 | } | ||
1201 | |||
1202 | if (symbol_conf.show_nr_samples) | ||
1203 | ret += 11; | ||
1204 | |||
1205 | if (symbol_conf.show_total_period) | ||
1206 | ret += 13; | ||
1207 | |||
1208 | list_for_each_entry(se, &hist_entry__sort_list, list) | ||
1209 | if (!se->elide) | ||
1210 | ret += 2 + hists__col_len(hists, se->se_width_idx); | ||
1211 | |||
1212 | if (verbose) /* Addr + origin */ | ||
1213 | ret += 3 + BITS_PER_LONG / 4; | ||
1214 | |||
1215 | return ret; | ||
1216 | } | ||
1217 | |||
1218 | static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, | 571 | static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, |
1219 | enum hist_filter filter) | 572 | enum hist_filter filter) |
1220 | { | 573 | { |
@@ -1342,25 +695,3 @@ void hists__inc_nr_events(struct hists *hists, u32 type) | |||
1342 | ++hists->stats.nr_events[0]; | 695 | ++hists->stats.nr_events[0]; |
1343 | ++hists->stats.nr_events[type]; | 696 | ++hists->stats.nr_events[type]; |
1344 | } | 697 | } |
1345 | |||
1346 | size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) | ||
1347 | { | ||
1348 | int i; | ||
1349 | size_t ret = 0; | ||
1350 | |||
1351 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { | ||
1352 | const char *name; | ||
1353 | |||
1354 | if (hists->stats.nr_events[i] == 0) | ||
1355 | continue; | ||
1356 | |||
1357 | name = perf_event__name(i); | ||
1358 | if (!strcmp(name, "UNKNOWN")) | ||
1359 | continue; | ||
1360 | |||
1361 | ret += fprintf(fp, "%16s events: %10d\n", name, | ||
1362 | hists->stats.nr_events[i]); | ||
1363 | } | ||
1364 | |||
1365 | return ret; | ||
1366 | } | ||
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 0b096c27a419..f011ad4756e8 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -75,8 +75,8 @@ struct hist_entry *__hists__add_entry(struct hists *self, | |||
75 | struct symbol *parent, u64 period); | 75 | struct symbol *parent, u64 period); |
76 | int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); | 76 | int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); |
77 | int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); | 77 | int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); |
78 | int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size, | 78 | int hist_entry__sort_snprintf(struct hist_entry *self, char *bf, size_t size, |
79 | struct hists *hists); | 79 | struct hists *hists); |
80 | void hist_entry__free(struct hist_entry *); | 80 | void hist_entry__free(struct hist_entry *); |
81 | 81 | ||
82 | struct hist_entry *__hists__add_branch_entry(struct hists *self, | 82 | struct hist_entry *__hists__add_branch_entry(struct hists *self, |
@@ -112,25 +112,66 @@ void hists__filter_by_symbol(struct hists *hists); | |||
112 | u16 hists__col_len(struct hists *self, enum hist_column col); | 112 | u16 hists__col_len(struct hists *self, enum hist_column col); |
113 | void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); | 113 | void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); |
114 | bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len); | 114 | bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len); |
115 | void hists__reset_col_len(struct hists *hists); | ||
116 | void hists__calc_col_len(struct hists *hists, struct hist_entry *he); | ||
117 | |||
118 | struct perf_hpp { | ||
119 | char *buf; | ||
120 | size_t size; | ||
121 | u64 total_period; | ||
122 | const char *sep; | ||
123 | long displacement; | ||
124 | void *ptr; | ||
125 | }; | ||
126 | |||
127 | struct perf_hpp_fmt { | ||
128 | bool cond; | ||
129 | int (*header)(struct perf_hpp *hpp); | ||
130 | int (*width)(struct perf_hpp *hpp); | ||
131 | int (*color)(struct perf_hpp *hpp, struct hist_entry *he); | ||
132 | int (*entry)(struct perf_hpp *hpp, struct hist_entry *he); | ||
133 | }; | ||
134 | |||
135 | extern struct perf_hpp_fmt perf_hpp__format[]; | ||
136 | |||
137 | enum { | ||
138 | PERF_HPP__OVERHEAD, | ||
139 | PERF_HPP__OVERHEAD_SYS, | ||
140 | PERF_HPP__OVERHEAD_US, | ||
141 | PERF_HPP__OVERHEAD_GUEST_SYS, | ||
142 | PERF_HPP__OVERHEAD_GUEST_US, | ||
143 | PERF_HPP__SAMPLES, | ||
144 | PERF_HPP__PERIOD, | ||
145 | PERF_HPP__DELTA, | ||
146 | PERF_HPP__DISPL, | ||
147 | |||
148 | PERF_HPP__MAX_INDEX | ||
149 | }; | ||
150 | |||
151 | void perf_hpp__init(bool need_pair, bool show_displacement); | ||
152 | int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, | ||
153 | bool color); | ||
115 | 154 | ||
116 | struct perf_evlist; | 155 | struct perf_evlist; |
117 | 156 | ||
118 | #ifdef NO_NEWT_SUPPORT | 157 | #ifdef NO_NEWT_SUPPORT |
119 | static inline | 158 | static inline |
120 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used, | 159 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, |
121 | const char *help __used, | 160 | const char *help __maybe_unused, |
122 | void(*timer)(void *arg) __used, | 161 | void(*timer)(void *arg) __maybe_unused, |
123 | void *arg __used, | 162 | void *arg __maybe_unused, |
124 | int refresh __used) | 163 | int refresh __maybe_unused) |
125 | { | 164 | { |
126 | return 0; | 165 | return 0; |
127 | } | 166 | } |
128 | 167 | ||
129 | static inline int hist_entry__tui_annotate(struct hist_entry *self __used, | 168 | static inline int hist_entry__tui_annotate(struct hist_entry *self |
130 | int evidx __used, | 169 | __maybe_unused, |
131 | void(*timer)(void *arg) __used, | 170 | int evidx __maybe_unused, |
132 | void *arg __used, | 171 | void(*timer)(void *arg) |
133 | int delay_secs __used) | 172 | __maybe_unused, |
173 | void *arg __maybe_unused, | ||
174 | int delay_secs __maybe_unused) | ||
134 | { | 175 | { |
135 | return 0; | 176 | return 0; |
136 | } | 177 | } |
@@ -148,11 +189,11 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, | |||
148 | 189 | ||
149 | #ifdef NO_GTK2_SUPPORT | 190 | #ifdef NO_GTK2_SUPPORT |
150 | static inline | 191 | static inline |
151 | int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used, | 192 | int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused, |
152 | const char *help __used, | 193 | const char *help __maybe_unused, |
153 | void(*timer)(void *arg) __used, | 194 | void(*timer)(void *arg) __maybe_unused, |
154 | void *arg __used, | 195 | void *arg __maybe_unused, |
155 | int refresh __used) | 196 | int refresh __maybe_unused) |
156 | { | 197 | { |
157 | return 0; | 198 | return 0; |
158 | } | 199 | } |
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index 587a230d2075..a55d8cf083c9 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h | |||
@@ -5,6 +5,10 @@ | |||
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
6 | #include <asm/hweight.h> | 6 | #include <asm/hweight.h> |
7 | 7 | ||
8 | #ifndef __WORDSIZE | ||
9 | #define __WORDSIZE (__SIZEOF_LONG__ * 8) | ||
10 | #endif | ||
11 | |||
8 | #define BITS_PER_LONG __WORDSIZE | 12 | #define BITS_PER_LONG __WORDSIZE |
9 | #define BITS_PER_BYTE 8 | 13 | #define BITS_PER_BYTE 8 |
10 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) | 14 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h index 547628e97f3d..96b919dae11c 100644 --- a/tools/perf/util/include/linux/compiler.h +++ b/tools/perf/util/include/linux/compiler.h | |||
@@ -9,6 +9,13 @@ | |||
9 | #define __attribute_const__ | 9 | #define __attribute_const__ |
10 | #endif | 10 | #endif |
11 | 11 | ||
12 | #define __used __attribute__((__unused__)) | 12 | #ifndef __maybe_unused |
13 | #define __maybe_unused __attribute__((unused)) | ||
14 | #endif | ||
15 | #define __packed __attribute__((__packed__)) | ||
16 | |||
17 | #ifndef __force | ||
18 | #define __force | ||
19 | #endif | ||
13 | 20 | ||
14 | #endif | 21 | #endif |
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index b6842c1d02a8..d8c927c868ee 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h | |||
@@ -8,8 +8,8 @@ | |||
8 | 8 | ||
9 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) | 9 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
10 | 10 | ||
11 | #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) | 11 | #define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) |
12 | #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) | 12 | #define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) |
13 | 13 | ||
14 | #ifndef offsetof | 14 | #ifndef offsetof |
15 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | 15 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) |
@@ -46,9 +46,22 @@ | |||
46 | _min1 < _min2 ? _min1 : _min2; }) | 46 | _min1 < _min2 ? _min1 : _min2; }) |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifndef roundup | ||
50 | #define roundup(x, y) ( \ | ||
51 | { \ | ||
52 | const typeof(y) __y = y; \ | ||
53 | (((x) + (__y - 1)) / __y) * __y; \ | ||
54 | } \ | ||
55 | ) | ||
56 | #endif | ||
57 | |||
49 | #ifndef BUG_ON | 58 | #ifndef BUG_ON |
59 | #ifdef NDEBUG | ||
60 | #define BUG_ON(cond) do { if (cond) {} } while (0) | ||
61 | #else | ||
50 | #define BUG_ON(cond) assert(!(cond)) | 62 | #define BUG_ON(cond) assert(!(cond)) |
51 | #endif | 63 | #endif |
64 | #endif | ||
52 | 65 | ||
53 | /* | 66 | /* |
54 | * Both need more care to handle endianness | 67 | * Both need more care to handle endianness |
diff --git a/tools/perf/util/include/linux/magic.h b/tools/perf/util/include/linux/magic.h new file mode 100644 index 000000000000..58b64ed4da12 --- /dev/null +++ b/tools/perf/util/include/linux/magic.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _PERF_LINUX_MAGIC_H_ | ||
2 | #define _PERF_LINUX_MAGIC_H_ | ||
3 | |||
4 | #ifndef DEBUGFS_MAGIC | ||
5 | #define DEBUGFS_MAGIC 0x64626720 | ||
6 | #endif | ||
7 | |||
8 | #ifndef SYSFS_MAGIC | ||
9 | #define SYSFS_MAGIC 0x62656572 | ||
10 | #endif | ||
11 | |||
12 | #endif | ||
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h index 7a243a143037..2a030c5af3aa 100644 --- a/tools/perf/util/include/linux/rbtree.h +++ b/tools/perf/util/include/linux/rbtree.h | |||
@@ -1 +1,2 @@ | |||
1 | #include <stdbool.h> | ||
1 | #include "../../../../include/linux/rbtree.h" | 2 | #include "../../../../include/linux/rbtree.h" |
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h index 3b2f5900276f..6f19c548ecc0 100644 --- a/tools/perf/util/include/linux/string.h +++ b/tools/perf/util/include/linux/string.h | |||
@@ -1 +1,3 @@ | |||
1 | #include <string.h> | 1 | #include <string.h> |
2 | |||
3 | void *memdup(const void *src, size_t len); | ||
diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h index 12de3b8112f9..eb464786c084 100644 --- a/tools/perf/util/include/linux/types.h +++ b/tools/perf/util/include/linux/types.h | |||
@@ -3,6 +3,14 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | 5 | ||
6 | #ifndef __bitwise | ||
7 | #define __bitwise | ||
8 | #endif | ||
9 | |||
10 | #ifndef __le32 | ||
11 | typedef __u32 __bitwise __le32; | ||
12 | #endif | ||
13 | |||
6 | #define DECLARE_BITMAP(name,bits) \ | 14 | #define DECLARE_BITMAP(name,bits) \ |
7 | unsigned long name[BITS_TO_LONGS(bits)] | 15 | unsigned long name[BITS_TO_LONGS(bits)] |
8 | 16 | ||
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c index fd530dced9cb..9d0740024ba8 100644 --- a/tools/perf/util/intlist.c +++ b/tools/perf/util/intlist.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include "intlist.h" | 12 | #include "intlist.h" |
13 | 13 | ||
14 | static struct rb_node *intlist__node_new(struct rblist *rblist __used, | 14 | static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused, |
15 | const void *entry) | 15 | const void *entry) |
16 | { | 16 | { |
17 | int i = (int)((long)entry); | 17 | int i = (int)((long)entry); |
@@ -31,7 +31,7 @@ static void int_node__delete(struct int_node *ilist) | |||
31 | free(ilist); | 31 | free(ilist); |
32 | } | 32 | } |
33 | 33 | ||
34 | static void intlist__node_delete(struct rblist *rblist __used, | 34 | static void intlist__node_delete(struct rblist *rblist __maybe_unused, |
35 | struct rb_node *rb_node) | 35 | struct rb_node *rb_node) |
36 | { | 36 | { |
37 | struct int_node *node = container_of(rb_node, struct int_node, rb_node); | 37 | struct int_node *node = container_of(rb_node, struct int_node, rb_node); |
@@ -52,9 +52,9 @@ int intlist__add(struct intlist *ilist, int i) | |||
52 | return rblist__add_node(&ilist->rblist, (void *)((long)i)); | 52 | return rblist__add_node(&ilist->rblist, (void *)((long)i)); |
53 | } | 53 | } |
54 | 54 | ||
55 | void intlist__remove(struct intlist *ilist __used, struct int_node *node) | 55 | void intlist__remove(struct intlist *ilist, struct int_node *node) |
56 | { | 56 | { |
57 | int_node__delete(node); | 57 | rblist__remove_node(&ilist->rblist, &node->rb_node); |
58 | } | 58 | } |
59 | 59 | ||
60 | struct int_node *intlist__find(struct intlist *ilist, int i) | 60 | struct int_node *intlist__find(struct intlist *ilist, int i) |
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index cc33486ad9e2..ead5316b3f89 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include "map.h" | 9 | #include "map.h" |
10 | #include "thread.h" | 10 | #include "thread.h" |
11 | #include "strlist.h" | 11 | #include "strlist.h" |
12 | #include "vdso.h" | ||
12 | 13 | ||
13 | const char *map_type__name[MAP__NR_TYPES] = { | 14 | const char *map_type__name[MAP__NR_TYPES] = { |
14 | [MAP__FUNCTION] = "Functions", | 15 | [MAP__FUNCTION] = "Functions", |
@@ -23,7 +24,6 @@ static inline int is_anon_memory(const char *filename) | |||
23 | static inline int is_no_dso_memory(const char *filename) | 24 | static inline int is_no_dso_memory(const char *filename) |
24 | { | 25 | { |
25 | return !strcmp(filename, "[stack]") || | 26 | return !strcmp(filename, "[stack]") || |
26 | !strcmp(filename, "[vdso]") || | ||
27 | !strcmp(filename, "[heap]"); | 27 | !strcmp(filename, "[heap]"); |
28 | } | 28 | } |
29 | 29 | ||
@@ -52,9 +52,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, | |||
52 | if (self != NULL) { | 52 | if (self != NULL) { |
53 | char newfilename[PATH_MAX]; | 53 | char newfilename[PATH_MAX]; |
54 | struct dso *dso; | 54 | struct dso *dso; |
55 | int anon, no_dso; | 55 | int anon, no_dso, vdso; |
56 | 56 | ||
57 | anon = is_anon_memory(filename); | 57 | anon = is_anon_memory(filename); |
58 | vdso = is_vdso_map(filename); | ||
58 | no_dso = is_no_dso_memory(filename); | 59 | no_dso = is_no_dso_memory(filename); |
59 | 60 | ||
60 | if (anon) { | 61 | if (anon) { |
@@ -62,7 +63,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, | |||
62 | filename = newfilename; | 63 | filename = newfilename; |
63 | } | 64 | } |
64 | 65 | ||
65 | dso = __dsos__findnew(dsos__list, filename); | 66 | if (vdso) { |
67 | pgoff = 0; | ||
68 | dso = vdso__dso_findnew(dsos__list); | ||
69 | } else | ||
70 | dso = __dsos__findnew(dsos__list, filename); | ||
71 | |||
66 | if (dso == NULL) | 72 | if (dso == NULL) |
67 | goto out_delete; | 73 | goto out_delete; |
68 | 74 | ||
@@ -86,6 +92,25 @@ out_delete: | |||
86 | return NULL; | 92 | return NULL; |
87 | } | 93 | } |
88 | 94 | ||
95 | /* | ||
96 | * Constructor variant for modules (where we know from /proc/modules where | ||
97 | * they are loaded) and for vmlinux, where only after we load all the | ||
98 | * symbols we'll know where it starts and ends. | ||
99 | */ | ||
100 | struct map *map__new2(u64 start, struct dso *dso, enum map_type type) | ||
101 | { | ||
102 | struct map *map = calloc(1, (sizeof(*map) + | ||
103 | (dso->kernel ? sizeof(struct kmap) : 0))); | ||
104 | if (map != NULL) { | ||
105 | /* | ||
106 | * ->end will be filled after we load all the symbols | ||
107 | */ | ||
108 | map__init(map, type, start, 0, 0, dso); | ||
109 | } | ||
110 | |||
111 | return map; | ||
112 | } | ||
113 | |||
89 | void map__delete(struct map *self) | 114 | void map__delete(struct map *self) |
90 | { | 115 | { |
91 | free(self); | 116 | free(self); |
@@ -137,6 +162,7 @@ int map__load(struct map *self, symbol_filter_t filter) | |||
137 | pr_warning(", continuing without symbols\n"); | 162 | pr_warning(", continuing without symbols\n"); |
138 | return -1; | 163 | return -1; |
139 | } else if (nr == 0) { | 164 | } else if (nr == 0) { |
165 | #ifndef NO_LIBELF_SUPPORT | ||
140 | const size_t len = strlen(name); | 166 | const size_t len = strlen(name); |
141 | const size_t real_len = len - sizeof(DSO__DELETED); | 167 | const size_t real_len = len - sizeof(DSO__DELETED); |
142 | 168 | ||
@@ -149,7 +175,7 @@ int map__load(struct map *self, symbol_filter_t filter) | |||
149 | pr_warning("no symbols found in %s, maybe install " | 175 | pr_warning("no symbols found in %s, maybe install " |
150 | "a debug package?\n", name); | 176 | "a debug package?\n", name); |
151 | } | 177 | } |
152 | 178 | #endif | |
153 | return -1; | 179 | return -1; |
154 | } | 180 | } |
155 | /* | 181 | /* |
@@ -217,15 +243,14 @@ size_t map__fprintf(struct map *self, FILE *fp) | |||
217 | 243 | ||
218 | size_t map__fprintf_dsoname(struct map *map, FILE *fp) | 244 | size_t map__fprintf_dsoname(struct map *map, FILE *fp) |
219 | { | 245 | { |
220 | const char *dsoname; | 246 | const char *dsoname = "[unknown]"; |
221 | 247 | ||
222 | if (map && map->dso && (map->dso->name || map->dso->long_name)) { | 248 | if (map && map->dso && (map->dso->name || map->dso->long_name)) { |
223 | if (symbol_conf.show_kernel_path && map->dso->long_name) | 249 | if (symbol_conf.show_kernel_path && map->dso->long_name) |
224 | dsoname = map->dso->long_name; | 250 | dsoname = map->dso->long_name; |
225 | else if (map->dso->name) | 251 | else if (map->dso->name) |
226 | dsoname = map->dso->name; | 252 | dsoname = map->dso->name; |
227 | } else | 253 | } |
228 | dsoname = "[unknown]"; | ||
229 | 254 | ||
230 | return fprintf(fp, "%s", dsoname); | 255 | return fprintf(fp, "%s", dsoname); |
231 | } | 256 | } |
@@ -242,14 +267,6 @@ u64 map__rip_2objdump(struct map *map, u64 rip) | |||
242 | return addr; | 267 | return addr; |
243 | } | 268 | } |
244 | 269 | ||
245 | u64 map__objdump_2ip(struct map *map, u64 addr) | ||
246 | { | ||
247 | u64 ip = map->dso->adjust_symbols ? | ||
248 | addr : | ||
249 | map->unmap_ip(map, addr); /* RIP -> IP */ | ||
250 | return ip; | ||
251 | } | ||
252 | |||
253 | void map_groups__init(struct map_groups *mg) | 270 | void map_groups__init(struct map_groups *mg) |
254 | { | 271 | { |
255 | int i; | 272 | int i; |
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 03a1e9b08b21..d2250fc97e25 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h | |||
@@ -96,7 +96,7 @@ static inline u64 map__unmap_ip(struct map *map, u64 ip) | |||
96 | return ip + map->start - map->pgoff; | 96 | return ip + map->start - map->pgoff; |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline u64 identity__map_ip(struct map *map __used, u64 ip) | 99 | static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip) |
100 | { | 100 | { |
101 | return ip; | 101 | return ip; |
102 | } | 102 | } |
@@ -104,7 +104,6 @@ static inline u64 identity__map_ip(struct map *map __used, u64 ip) | |||
104 | 104 | ||
105 | /* rip/ip <-> addr suitable for passing to `objdump --start-address=` */ | 105 | /* rip/ip <-> addr suitable for passing to `objdump --start-address=` */ |
106 | u64 map__rip_2objdump(struct map *map, u64 rip); | 106 | u64 map__rip_2objdump(struct map *map, u64 rip); |
107 | u64 map__objdump_2ip(struct map *map, u64 addr); | ||
108 | 107 | ||
109 | struct symbol; | 108 | struct symbol; |
110 | 109 | ||
@@ -115,6 +114,7 @@ void map__init(struct map *self, enum map_type type, | |||
115 | struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, | 114 | struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, |
116 | u64 pgoff, u32 pid, char *filename, | 115 | u64 pgoff, u32 pid, char *filename, |
117 | enum map_type type); | 116 | enum map_type type); |
117 | struct map *map__new2(u64 start, struct dso *dso, enum map_type type); | ||
118 | void map__delete(struct map *self); | 118 | void map__delete(struct map *self); |
119 | struct map *map__clone(struct map *self); | 119 | struct map *map__clone(struct map *self); |
120 | int map__overlap(struct map *l, struct map *r); | 120 | int map__overlap(struct map *l, struct map *r); |
@@ -157,9 +157,12 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid); | |||
157 | void machine__exit(struct machine *self); | 157 | void machine__exit(struct machine *self); |
158 | void machine__delete(struct machine *self); | 158 | void machine__delete(struct machine *self); |
159 | 159 | ||
160 | struct perf_evsel; | ||
161 | struct perf_sample; | ||
160 | int machine__resolve_callchain(struct machine *machine, | 162 | int machine__resolve_callchain(struct machine *machine, |
163 | struct perf_evsel *evsel, | ||
161 | struct thread *thread, | 164 | struct thread *thread, |
162 | struct ip_callchain *chain, | 165 | struct perf_sample *sample, |
163 | struct symbol **parent); | 166 | struct symbol **parent); |
164 | int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, | 167 | int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, |
165 | u64 addr); | 168 | u64 addr); |
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c index 127d648cc548..28c18d1d52c3 100644 --- a/tools/perf/util/parse-events-test.c +++ b/tools/perf/util/parse-events-test.c | |||
@@ -18,8 +18,7 @@ do { \ | |||
18 | 18 | ||
19 | static int test__checkevent_tracepoint(struct perf_evlist *evlist) | 19 | static int test__checkevent_tracepoint(struct perf_evlist *evlist) |
20 | { | 20 | { |
21 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 21 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
22 | struct perf_evsel, node); | ||
23 | 22 | ||
24 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 23 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
25 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); | 24 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); |
@@ -48,8 +47,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist) | |||
48 | 47 | ||
49 | static int test__checkevent_raw(struct perf_evlist *evlist) | 48 | static int test__checkevent_raw(struct perf_evlist *evlist) |
50 | { | 49 | { |
51 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 50 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
52 | struct perf_evsel, node); | ||
53 | 51 | ||
54 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 52 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
55 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | 53 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); |
@@ -59,8 +57,7 @@ static int test__checkevent_raw(struct perf_evlist *evlist) | |||
59 | 57 | ||
60 | static int test__checkevent_numeric(struct perf_evlist *evlist) | 58 | static int test__checkevent_numeric(struct perf_evlist *evlist) |
61 | { | 59 | { |
62 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 60 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
63 | struct perf_evsel, node); | ||
64 | 61 | ||
65 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 62 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
66 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); | 63 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); |
@@ -70,8 +67,7 @@ static int test__checkevent_numeric(struct perf_evlist *evlist) | |||
70 | 67 | ||
71 | static int test__checkevent_symbolic_name(struct perf_evlist *evlist) | 68 | static int test__checkevent_symbolic_name(struct perf_evlist *evlist) |
72 | { | 69 | { |
73 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 70 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
74 | struct perf_evsel, node); | ||
75 | 71 | ||
76 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 72 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
77 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | 73 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); |
@@ -82,8 +78,7 @@ static int test__checkevent_symbolic_name(struct perf_evlist *evlist) | |||
82 | 78 | ||
83 | static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist) | 79 | static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist) |
84 | { | 80 | { |
85 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 81 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
86 | struct perf_evsel, node); | ||
87 | 82 | ||
88 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 83 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
89 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | 84 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); |
@@ -100,8 +95,7 @@ static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist) | |||
100 | 95 | ||
101 | static int test__checkevent_symbolic_alias(struct perf_evlist *evlist) | 96 | static int test__checkevent_symbolic_alias(struct perf_evlist *evlist) |
102 | { | 97 | { |
103 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 98 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
104 | struct perf_evsel, node); | ||
105 | 99 | ||
106 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 100 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
107 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type); | 101 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type); |
@@ -112,8 +106,7 @@ static int test__checkevent_symbolic_alias(struct perf_evlist *evlist) | |||
112 | 106 | ||
113 | static int test__checkevent_genhw(struct perf_evlist *evlist) | 107 | static int test__checkevent_genhw(struct perf_evlist *evlist) |
114 | { | 108 | { |
115 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 109 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
116 | struct perf_evsel, node); | ||
117 | 110 | ||
118 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 111 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
119 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type); | 112 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type); |
@@ -123,8 +116,7 @@ static int test__checkevent_genhw(struct perf_evlist *evlist) | |||
123 | 116 | ||
124 | static int test__checkevent_breakpoint(struct perf_evlist *evlist) | 117 | static int test__checkevent_breakpoint(struct perf_evlist *evlist) |
125 | { | 118 | { |
126 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 119 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
127 | struct perf_evsel, node); | ||
128 | 120 | ||
129 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 121 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
130 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); | 122 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); |
@@ -138,8 +130,7 @@ static int test__checkevent_breakpoint(struct perf_evlist *evlist) | |||
138 | 130 | ||
139 | static int test__checkevent_breakpoint_x(struct perf_evlist *evlist) | 131 | static int test__checkevent_breakpoint_x(struct perf_evlist *evlist) |
140 | { | 132 | { |
141 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 133 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
142 | struct perf_evsel, node); | ||
143 | 134 | ||
144 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 135 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
145 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); | 136 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); |
@@ -152,8 +143,7 @@ static int test__checkevent_breakpoint_x(struct perf_evlist *evlist) | |||
152 | 143 | ||
153 | static int test__checkevent_breakpoint_r(struct perf_evlist *evlist) | 144 | static int test__checkevent_breakpoint_r(struct perf_evlist *evlist) |
154 | { | 145 | { |
155 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 146 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
156 | struct perf_evsel, node); | ||
157 | 147 | ||
158 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 148 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
159 | TEST_ASSERT_VAL("wrong type", | 149 | TEST_ASSERT_VAL("wrong type", |
@@ -168,8 +158,7 @@ static int test__checkevent_breakpoint_r(struct perf_evlist *evlist) | |||
168 | 158 | ||
169 | static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) | 159 | static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) |
170 | { | 160 | { |
171 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 161 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
172 | struct perf_evsel, node); | ||
173 | 162 | ||
174 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 163 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
175 | TEST_ASSERT_VAL("wrong type", | 164 | TEST_ASSERT_VAL("wrong type", |
@@ -184,8 +173,7 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) | |||
184 | 173 | ||
185 | static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist) | 174 | static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist) |
186 | { | 175 | { |
187 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 176 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
188 | struct perf_evsel, node); | ||
189 | 177 | ||
190 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 178 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
191 | TEST_ASSERT_VAL("wrong type", | 179 | TEST_ASSERT_VAL("wrong type", |
@@ -200,8 +188,7 @@ static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist) | |||
200 | 188 | ||
201 | static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist) | 189 | static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist) |
202 | { | 190 | { |
203 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 191 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
204 | struct perf_evsel, node); | ||
205 | 192 | ||
206 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 193 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
207 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | 194 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); |
@@ -232,8 +219,7 @@ test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist) | |||
232 | 219 | ||
233 | static int test__checkevent_raw_modifier(struct perf_evlist *evlist) | 220 | static int test__checkevent_raw_modifier(struct perf_evlist *evlist) |
234 | { | 221 | { |
235 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 222 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
236 | struct perf_evsel, node); | ||
237 | 223 | ||
238 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 224 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
239 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | 225 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); |
@@ -245,8 +231,7 @@ static int test__checkevent_raw_modifier(struct perf_evlist *evlist) | |||
245 | 231 | ||
246 | static int test__checkevent_numeric_modifier(struct perf_evlist *evlist) | 232 | static int test__checkevent_numeric_modifier(struct perf_evlist *evlist) |
247 | { | 233 | { |
248 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 234 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
249 | struct perf_evsel, node); | ||
250 | 235 | ||
251 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 236 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
252 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | 237 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); |
@@ -258,8 +243,7 @@ static int test__checkevent_numeric_modifier(struct perf_evlist *evlist) | |||
258 | 243 | ||
259 | static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) | 244 | static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) |
260 | { | 245 | { |
261 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 246 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
262 | struct perf_evsel, node); | ||
263 | 247 | ||
264 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 248 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
265 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | 249 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); |
@@ -271,8 +255,7 @@ static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) | |||
271 | 255 | ||
272 | static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist) | 256 | static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist) |
273 | { | 257 | { |
274 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 258 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
275 | struct perf_evsel, node); | ||
276 | 259 | ||
277 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | 260 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); |
278 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | 261 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); |
@@ -282,8 +265,7 @@ static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist) | |||
282 | 265 | ||
283 | static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist) | 266 | static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist) |
284 | { | 267 | { |
285 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 268 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
286 | struct perf_evsel, node); | ||
287 | 269 | ||
288 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | 270 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); |
289 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | 271 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); |
@@ -293,8 +275,7 @@ static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist) | |||
293 | 275 | ||
294 | static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) | 276 | static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) |
295 | { | 277 | { |
296 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 278 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
297 | struct perf_evsel, node); | ||
298 | 279 | ||
299 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | 280 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); |
300 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | 281 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); |
@@ -306,8 +287,7 @@ static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) | |||
306 | 287 | ||
307 | static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) | 288 | static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) |
308 | { | 289 | { |
309 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 290 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
310 | struct perf_evsel, node); | ||
311 | 291 | ||
312 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 292 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
313 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | 293 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); |
@@ -319,75 +299,71 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) | |||
319 | 299 | ||
320 | static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist) | 300 | static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist) |
321 | { | 301 | { |
322 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 302 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
323 | struct perf_evsel, node); | 303 | |
324 | 304 | ||
325 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | 305 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); |
326 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | 306 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); |
327 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | 307 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); |
328 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 308 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
329 | TEST_ASSERT_VAL("wrong name", | 309 | TEST_ASSERT_VAL("wrong name", |
330 | !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:u")); | 310 | !strcmp(perf_evsel__name(evsel), "mem:0:u")); |
331 | 311 | ||
332 | return test__checkevent_breakpoint(evlist); | 312 | return test__checkevent_breakpoint(evlist); |
333 | } | 313 | } |
334 | 314 | ||
335 | static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist) | 315 | static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist) |
336 | { | 316 | { |
337 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 317 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
338 | struct perf_evsel, node); | ||
339 | 318 | ||
340 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 319 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
341 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | 320 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); |
342 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | 321 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); |
343 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 322 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
344 | TEST_ASSERT_VAL("wrong name", | 323 | TEST_ASSERT_VAL("wrong name", |
345 | !strcmp(perf_evsel__name(evsel), "mem:0x0:x:k")); | 324 | !strcmp(perf_evsel__name(evsel), "mem:0:x:k")); |
346 | 325 | ||
347 | return test__checkevent_breakpoint_x(evlist); | 326 | return test__checkevent_breakpoint_x(evlist); |
348 | } | 327 | } |
349 | 328 | ||
350 | static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist) | 329 | static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist) |
351 | { | 330 | { |
352 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 331 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
353 | struct perf_evsel, node); | ||
354 | 332 | ||
355 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 333 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
356 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | 334 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); |
357 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | 335 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); |
358 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | 336 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); |
359 | TEST_ASSERT_VAL("wrong name", | 337 | TEST_ASSERT_VAL("wrong name", |
360 | !strcmp(perf_evsel__name(evsel), "mem:0x0:r:hp")); | 338 | !strcmp(perf_evsel__name(evsel), "mem:0:r:hp")); |
361 | 339 | ||
362 | return test__checkevent_breakpoint_r(evlist); | 340 | return test__checkevent_breakpoint_r(evlist); |
363 | } | 341 | } |
364 | 342 | ||
365 | static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist) | 343 | static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist) |
366 | { | 344 | { |
367 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 345 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
368 | struct perf_evsel, node); | ||
369 | 346 | ||
370 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | 347 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); |
371 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | 348 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); |
372 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | 349 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); |
373 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | 350 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); |
374 | TEST_ASSERT_VAL("wrong name", | 351 | TEST_ASSERT_VAL("wrong name", |
375 | !strcmp(perf_evsel__name(evsel), "mem:0x0:w:up")); | 352 | !strcmp(perf_evsel__name(evsel), "mem:0:w:up")); |
376 | 353 | ||
377 | return test__checkevent_breakpoint_w(evlist); | 354 | return test__checkevent_breakpoint_w(evlist); |
378 | } | 355 | } |
379 | 356 | ||
380 | static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist) | 357 | static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist) |
381 | { | 358 | { |
382 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 359 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
383 | struct perf_evsel, node); | ||
384 | 360 | ||
385 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 361 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
386 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | 362 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); |
387 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | 363 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); |
388 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); | 364 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); |
389 | TEST_ASSERT_VAL("wrong name", | 365 | TEST_ASSERT_VAL("wrong name", |
390 | !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:kp")); | 366 | !strcmp(perf_evsel__name(evsel), "mem:0:rw:kp")); |
391 | 367 | ||
392 | return test__checkevent_breakpoint_rw(evlist); | 368 | return test__checkevent_breakpoint_rw(evlist); |
393 | } | 369 | } |
@@ -395,8 +371,7 @@ static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist) | |||
395 | static int test__checkevent_pmu(struct perf_evlist *evlist) | 371 | static int test__checkevent_pmu(struct perf_evlist *evlist) |
396 | { | 372 | { |
397 | 373 | ||
398 | struct perf_evsel *evsel = list_entry(evlist->entries.next, | 374 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
399 | struct perf_evsel, node); | ||
400 | 375 | ||
401 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); | 376 | TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); |
402 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | 377 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); |
@@ -410,12 +385,11 @@ static int test__checkevent_pmu(struct perf_evlist *evlist) | |||
410 | 385 | ||
411 | static int test__checkevent_list(struct perf_evlist *evlist) | 386 | static int test__checkevent_list(struct perf_evlist *evlist) |
412 | { | 387 | { |
413 | struct perf_evsel *evsel; | 388 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
414 | 389 | ||
415 | TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); | 390 | TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); |
416 | 391 | ||
417 | /* r1 */ | 392 | /* r1 */ |
418 | evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
419 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | 393 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); |
420 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | 394 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); |
421 | TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1); | 395 | TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1); |
@@ -426,7 +400,7 @@ static int test__checkevent_list(struct perf_evlist *evlist) | |||
426 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 400 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
427 | 401 | ||
428 | /* syscalls:sys_enter_open:k */ | 402 | /* syscalls:sys_enter_open:k */ |
429 | evsel = list_entry(evsel->node.next, struct perf_evsel, node); | 403 | evsel = perf_evsel__next(evsel); |
430 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); | 404 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); |
431 | TEST_ASSERT_VAL("wrong sample_type", | 405 | TEST_ASSERT_VAL("wrong sample_type", |
432 | PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); | 406 | PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); |
@@ -437,7 +411,7 @@ static int test__checkevent_list(struct perf_evlist *evlist) | |||
437 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | 411 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); |
438 | 412 | ||
439 | /* 1:1:hp */ | 413 | /* 1:1:hp */ |
440 | evsel = list_entry(evsel->node.next, struct perf_evsel, node); | 414 | evsel = perf_evsel__next(evsel); |
441 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); | 415 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); |
442 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | 416 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); |
443 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | 417 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); |
@@ -450,22 +424,21 @@ static int test__checkevent_list(struct perf_evlist *evlist) | |||
450 | 424 | ||
451 | static int test__checkevent_pmu_name(struct perf_evlist *evlist) | 425 | static int test__checkevent_pmu_name(struct perf_evlist *evlist) |
452 | { | 426 | { |
453 | struct perf_evsel *evsel; | 427 | struct perf_evsel *evsel = perf_evlist__first(evlist); |
454 | 428 | ||
455 | /* cpu/config=1,name=krava/u */ | 429 | /* cpu/config=1,name=krava/u */ |
456 | evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
457 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); | 430 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); |
458 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | 431 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); |
459 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); | 432 | TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); |
460 | TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava")); | 433 | TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava")); |
461 | 434 | ||
462 | /* cpu/config=2/u" */ | 435 | /* cpu/config=2/u" */ |
463 | evsel = list_entry(evsel->node.next, struct perf_evsel, node); | 436 | evsel = perf_evsel__next(evsel); |
464 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); | 437 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); |
465 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); | 438 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); |
466 | TEST_ASSERT_VAL("wrong config", 2 == evsel->attr.config); | 439 | TEST_ASSERT_VAL("wrong config", 2 == evsel->attr.config); |
467 | TEST_ASSERT_VAL("wrong name", | 440 | TEST_ASSERT_VAL("wrong name", |
468 | !strcmp(perf_evsel__name(evsel), "raw 0x2:u")); | 441 | !strcmp(perf_evsel__name(evsel), "cpu/config=2/u")); |
469 | 442 | ||
470 | return 0; | 443 | return 0; |
471 | } | 444 | } |
@@ -513,6 +486,280 @@ static int test__checkterms_simple(struct list_head *terms) | |||
513 | return 0; | 486 | return 0; |
514 | } | 487 | } |
515 | 488 | ||
489 | static int test__group1(struct perf_evlist *evlist) | ||
490 | { | ||
491 | struct perf_evsel *evsel, *leader; | ||
492 | |||
493 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); | ||
494 | |||
495 | /* instructions:k */ | ||
496 | evsel = leader = perf_evlist__first(evlist); | ||
497 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
498 | TEST_ASSERT_VAL("wrong config", | ||
499 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
500 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
501 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
502 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
503 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
504 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
505 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
506 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
507 | |||
508 | /* cycles:upp */ | ||
509 | evsel = perf_evsel__next(evsel); | ||
510 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
511 | TEST_ASSERT_VAL("wrong config", | ||
512 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
513 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
514 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
515 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
516 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
517 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
518 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); | ||
519 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
520 | |||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | static int test__group2(struct perf_evlist *evlist) | ||
525 | { | ||
526 | struct perf_evsel *evsel, *leader; | ||
527 | |||
528 | TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); | ||
529 | |||
530 | /* faults + :ku modifier */ | ||
531 | evsel = leader = perf_evlist__first(evlist); | ||
532 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type); | ||
533 | TEST_ASSERT_VAL("wrong config", | ||
534 | PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config); | ||
535 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
536 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
537 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
538 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
539 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
540 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
541 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
542 | |||
543 | /* cache-references + :u modifier */ | ||
544 | evsel = perf_evsel__next(evsel); | ||
545 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
546 | TEST_ASSERT_VAL("wrong config", | ||
547 | PERF_COUNT_HW_CACHE_REFERENCES == evsel->attr.config); | ||
548 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
549 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
550 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
551 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
552 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
553 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
554 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
555 | |||
556 | /* cycles:k */ | ||
557 | evsel = perf_evsel__next(evsel); | ||
558 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
559 | TEST_ASSERT_VAL("wrong config", | ||
560 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
561 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
562 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
563 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
564 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
565 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
566 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
567 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
568 | |||
569 | return 0; | ||
570 | } | ||
571 | |||
572 | static int test__group3(struct perf_evlist *evlist __maybe_unused) | ||
573 | { | ||
574 | struct perf_evsel *evsel, *leader; | ||
575 | |||
576 | TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); | ||
577 | |||
578 | /* group1 syscalls:sys_enter_open:H */ | ||
579 | evsel = leader = perf_evlist__first(evlist); | ||
580 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); | ||
581 | TEST_ASSERT_VAL("wrong sample_type", | ||
582 | PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); | ||
583 | TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); | ||
584 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
585 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
586 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
587 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
588 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
589 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
590 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
591 | TEST_ASSERT_VAL("wrong group name", | ||
592 | !strcmp(leader->group_name, "group1")); | ||
593 | |||
594 | /* group1 cycles:kppp */ | ||
595 | evsel = perf_evsel__next(evsel); | ||
596 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
597 | TEST_ASSERT_VAL("wrong config", | ||
598 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
599 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
600 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
601 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
602 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
603 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
604 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3); | ||
605 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
606 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | ||
607 | |||
608 | /* group2 cycles + G modifier */ | ||
609 | evsel = leader = perf_evsel__next(evsel); | ||
610 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
611 | TEST_ASSERT_VAL("wrong config", | ||
612 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
613 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
614 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
615 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
616 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
617 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
618 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
619 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
620 | TEST_ASSERT_VAL("wrong group name", | ||
621 | !strcmp(leader->group_name, "group2")); | ||
622 | |||
623 | /* group2 1:3 + G modifier */ | ||
624 | evsel = perf_evsel__next(evsel); | ||
625 | TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); | ||
626 | TEST_ASSERT_VAL("wrong config", 3 == evsel->attr.config); | ||
627 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
628 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
629 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
630 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
631 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
632 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
633 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
634 | |||
635 | /* instructions:u */ | ||
636 | evsel = perf_evsel__next(evsel); | ||
637 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
638 | TEST_ASSERT_VAL("wrong config", | ||
639 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
640 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
641 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
642 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
643 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
644 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
645 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
646 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | static int test__group4(struct perf_evlist *evlist __maybe_unused) | ||
652 | { | ||
653 | struct perf_evsel *evsel, *leader; | ||
654 | |||
655 | TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); | ||
656 | |||
657 | /* cycles:u + p */ | ||
658 | evsel = leader = perf_evlist__first(evlist); | ||
659 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
660 | TEST_ASSERT_VAL("wrong config", | ||
661 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
662 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
663 | TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); | ||
664 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
665 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
666 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
667 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); | ||
668 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | ||
669 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
670 | |||
671 | /* instructions:kp + p */ | ||
672 | evsel = perf_evsel__next(evsel); | ||
673 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
674 | TEST_ASSERT_VAL("wrong config", | ||
675 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
676 | TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); | ||
677 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
678 | TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); | ||
679 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
680 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
681 | TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); | ||
682 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
683 | |||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | static int test__group5(struct perf_evlist *evlist __maybe_unused) | ||
688 | { | ||
689 | struct perf_evsel *evsel, *leader; | ||
690 | |||
691 | TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); | ||
692 | |||
693 | /* cycles + G */ | ||
694 | evsel = leader = perf_evlist__first(evlist); | ||
695 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
696 | TEST_ASSERT_VAL("wrong config", | ||
697 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
698 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
699 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
700 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
701 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
702 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
703 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
704 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | ||
705 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
706 | |||
707 | /* instructions + G */ | ||
708 | evsel = perf_evsel__next(evsel); | ||
709 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
710 | TEST_ASSERT_VAL("wrong config", | ||
711 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
712 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
713 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
714 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
715 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
716 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
717 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
718 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
719 | |||
720 | /* cycles:G */ | ||
721 | evsel = leader = perf_evsel__next(evsel); | ||
722 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
723 | TEST_ASSERT_VAL("wrong config", | ||
724 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
725 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
726 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
727 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
728 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
729 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
730 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
731 | TEST_ASSERT_VAL("wrong group name", !evsel->group_name); | ||
732 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
733 | |||
734 | /* instructions:G */ | ||
735 | evsel = perf_evsel__next(evsel); | ||
736 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
737 | TEST_ASSERT_VAL("wrong config", | ||
738 | PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); | ||
739 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
740 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
741 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
742 | TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); | ||
743 | TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); | ||
744 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
745 | TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); | ||
746 | |||
747 | /* cycles */ | ||
748 | evsel = perf_evsel__next(evsel); | ||
749 | TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); | ||
750 | TEST_ASSERT_VAL("wrong config", | ||
751 | PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); | ||
752 | TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); | ||
753 | TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); | ||
754 | TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); | ||
755 | TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); | ||
756 | TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); | ||
757 | TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); | ||
758 | TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
516 | struct test__event_st { | 763 | struct test__event_st { |
517 | const char *name; | 764 | const char *name; |
518 | __u32 type; | 765 | __u32 type; |
@@ -632,6 +879,26 @@ static struct test__event_st test__events[] = { | |||
632 | .name = "mem:0:rw:kp", | 879 | .name = "mem:0:rw:kp", |
633 | .check = test__checkevent_breakpoint_rw_modifier, | 880 | .check = test__checkevent_breakpoint_rw_modifier, |
634 | }, | 881 | }, |
882 | [28] = { | ||
883 | .name = "{instructions:k,cycles:upp}", | ||
884 | .check = test__group1, | ||
885 | }, | ||
886 | [29] = { | ||
887 | .name = "{faults:k,cache-references}:u,cycles:k", | ||
888 | .check = test__group2, | ||
889 | }, | ||
890 | [30] = { | ||
891 | .name = "group1{syscalls:sys_enter_open:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u", | ||
892 | .check = test__group3, | ||
893 | }, | ||
894 | [31] = { | ||
895 | .name = "{cycles:u,instructions:kp}:p", | ||
896 | .check = test__group4, | ||
897 | }, | ||
898 | [32] = { | ||
899 | .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles", | ||
900 | .check = test__group5, | ||
901 | }, | ||
635 | }; | 902 | }; |
636 | 903 | ||
637 | static struct test__event_st test__events_pmu[] = { | 904 | static struct test__event_st test__events_pmu[] = { |
@@ -658,9 +925,6 @@ static struct test__term test__terms[] = { | |||
658 | }, | 925 | }, |
659 | }; | 926 | }; |
660 | 927 | ||
661 | #define TEST__TERMS_CNT (sizeof(test__terms) / \ | ||
662 | sizeof(struct test__term)) | ||
663 | |||
664 | static int test_event(struct test__event_st *e) | 928 | static int test_event(struct test__event_st *e) |
665 | { | 929 | { |
666 | struct perf_evlist *evlist; | 930 | struct perf_evlist *evlist; |
@@ -685,19 +949,19 @@ static int test_event(struct test__event_st *e) | |||
685 | 949 | ||
686 | static int test_events(struct test__event_st *events, unsigned cnt) | 950 | static int test_events(struct test__event_st *events, unsigned cnt) |
687 | { | 951 | { |
688 | int ret = 0; | 952 | int ret1, ret2 = 0; |
689 | unsigned i; | 953 | unsigned i; |
690 | 954 | ||
691 | for (i = 0; i < cnt; i++) { | 955 | for (i = 0; i < cnt; i++) { |
692 | struct test__event_st *e = &events[i]; | 956 | struct test__event_st *e = &events[i]; |
693 | 957 | ||
694 | pr_debug("running test %d '%s'\n", i, e->name); | 958 | pr_debug("running test %d '%s'\n", i, e->name); |
695 | ret = test_event(e); | 959 | ret1 = test_event(e); |
696 | if (ret) | 960 | if (ret1) |
697 | break; | 961 | ret2 = ret1; |
698 | } | 962 | } |
699 | 963 | ||
700 | return ret; | 964 | return ret2; |
701 | } | 965 | } |
702 | 966 | ||
703 | static int test_term(struct test__term *t) | 967 | static int test_term(struct test__term *t) |
@@ -752,19 +1016,19 @@ static int test_pmu(void) | |||
752 | 1016 | ||
753 | ret = stat(path, &st); | 1017 | ret = stat(path, &st); |
754 | if (ret) | 1018 | if (ret) |
755 | pr_debug("ommiting PMU cpu tests\n"); | 1019 | pr_debug("omitting PMU cpu tests\n"); |
756 | return !ret; | 1020 | return !ret; |
757 | } | 1021 | } |
758 | 1022 | ||
759 | int parse_events__test(void) | 1023 | int parse_events__test(void) |
760 | { | 1024 | { |
761 | int ret; | 1025 | int ret1, ret2 = 0; |
762 | 1026 | ||
763 | #define TEST_EVENTS(tests) \ | 1027 | #define TEST_EVENTS(tests) \ |
764 | do { \ | 1028 | do { \ |
765 | ret = test_events(tests, ARRAY_SIZE(tests)); \ | 1029 | ret1 = test_events(tests, ARRAY_SIZE(tests)); \ |
766 | if (ret) \ | 1030 | if (!ret2) \ |
767 | return ret; \ | 1031 | ret2 = ret1; \ |
768 | } while (0) | 1032 | } while (0) |
769 | 1033 | ||
770 | TEST_EVENTS(test__events); | 1034 | TEST_EVENTS(test__events); |
@@ -772,5 +1036,9 @@ do { \ | |||
772 | if (test_pmu()) | 1036 | if (test_pmu()) |
773 | TEST_EVENTS(test__events_pmu); | 1037 | TEST_EVENTS(test__events_pmu); |
774 | 1038 | ||
775 | return test_terms(test__terms, ARRAY_SIZE(test__terms)); | 1039 | ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms)); |
1040 | if (!ret2) | ||
1041 | ret2 = ret1; | ||
1042 | |||
1043 | return ret2; | ||
776 | } | 1044 | } |
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 74a5af4d33ec..aed38e4b9dfa 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -239,8 +239,11 @@ const char *event_type(int type) | |||
239 | return "unknown"; | 239 | return "unknown"; |
240 | } | 240 | } |
241 | 241 | ||
242 | static int add_event(struct list_head **_list, int *idx, | 242 | |
243 | struct perf_event_attr *attr, char *name) | 243 | |
244 | static int __add_event(struct list_head **_list, int *idx, | ||
245 | struct perf_event_attr *attr, | ||
246 | char *name, struct cpu_map *cpus) | ||
244 | { | 247 | { |
245 | struct perf_evsel *evsel; | 248 | struct perf_evsel *evsel; |
246 | struct list_head *list = *_list; | 249 | struct list_head *list = *_list; |
@@ -260,6 +263,7 @@ static int add_event(struct list_head **_list, int *idx, | |||
260 | return -ENOMEM; | 263 | return -ENOMEM; |
261 | } | 264 | } |
262 | 265 | ||
266 | evsel->cpus = cpus; | ||
263 | if (name) | 267 | if (name) |
264 | evsel->name = strdup(name); | 268 | evsel->name = strdup(name); |
265 | list_add_tail(&evsel->node, list); | 269 | list_add_tail(&evsel->node, list); |
@@ -267,6 +271,12 @@ static int add_event(struct list_head **_list, int *idx, | |||
267 | return 0; | 271 | return 0; |
268 | } | 272 | } |
269 | 273 | ||
274 | static int add_event(struct list_head **_list, int *idx, | ||
275 | struct perf_event_attr *attr, char *name) | ||
276 | { | ||
277 | return __add_event(_list, idx, attr, name, NULL); | ||
278 | } | ||
279 | |||
270 | static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) | 280 | static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) |
271 | { | 281 | { |
272 | int i, j; | 282 | int i, j; |
@@ -308,7 +318,7 @@ int parse_events_add_cache(struct list_head **list, int *idx, | |||
308 | for (i = 0; (i < 2) && (op_result[i]); i++) { | 318 | for (i = 0; (i < 2) && (op_result[i]); i++) { |
309 | char *str = op_result[i]; | 319 | char *str = op_result[i]; |
310 | 320 | ||
311 | snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str); | 321 | n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); |
312 | 322 | ||
313 | if (cache_op == -1) { | 323 | if (cache_op == -1) { |
314 | cache_op = parse_aliases(str, perf_evsel__hw_cache_op, | 324 | cache_op = parse_aliases(str, perf_evsel__hw_cache_op, |
@@ -346,42 +356,28 @@ int parse_events_add_cache(struct list_head **list, int *idx, | |||
346 | return add_event(list, idx, &attr, name); | 356 | return add_event(list, idx, &attr, name); |
347 | } | 357 | } |
348 | 358 | ||
349 | static int add_tracepoint(struct list_head **list, int *idx, | 359 | static int add_tracepoint(struct list_head **listp, int *idx, |
350 | char *sys_name, char *evt_name) | 360 | char *sys_name, char *evt_name) |
351 | { | 361 | { |
352 | struct perf_event_attr attr; | 362 | struct perf_evsel *evsel; |
353 | char name[MAX_NAME_LEN]; | 363 | struct list_head *list = *listp; |
354 | char evt_path[MAXPATHLEN]; | ||
355 | char id_buf[4]; | ||
356 | u64 id; | ||
357 | int fd; | ||
358 | |||
359 | snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, | ||
360 | sys_name, evt_name); | ||
361 | |||
362 | fd = open(evt_path, O_RDONLY); | ||
363 | if (fd < 0) | ||
364 | return -1; | ||
365 | 364 | ||
366 | if (read(fd, id_buf, sizeof(id_buf)) < 0) { | 365 | if (!list) { |
367 | close(fd); | 366 | list = malloc(sizeof(*list)); |
368 | return -1; | 367 | if (!list) |
368 | return -ENOMEM; | ||
369 | INIT_LIST_HEAD(list); | ||
369 | } | 370 | } |
370 | 371 | ||
371 | close(fd); | 372 | evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); |
372 | id = atoll(id_buf); | 373 | if (!evsel) { |
373 | 374 | free(list); | |
374 | memset(&attr, 0, sizeof(attr)); | 375 | return -ENOMEM; |
375 | attr.config = id; | 376 | } |
376 | attr.type = PERF_TYPE_TRACEPOINT; | ||
377 | attr.sample_type |= PERF_SAMPLE_RAW; | ||
378 | attr.sample_type |= PERF_SAMPLE_TIME; | ||
379 | attr.sample_type |= PERF_SAMPLE_CPU; | ||
380 | attr.sample_type |= PERF_SAMPLE_PERIOD; | ||
381 | attr.sample_period = 1; | ||
382 | 377 | ||
383 | snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name); | 378 | list_add_tail(&evsel->node, list); |
384 | return add_event(list, idx, &attr, name); | 379 | *listp = list; |
380 | return 0; | ||
385 | } | 381 | } |
386 | 382 | ||
387 | static int add_tracepoint_multi(struct list_head **list, int *idx, | 383 | static int add_tracepoint_multi(struct list_head **list, int *idx, |
@@ -551,7 +547,7 @@ static int config_attr(struct perf_event_attr *attr, | |||
551 | } | 547 | } |
552 | 548 | ||
553 | int parse_events_add_numeric(struct list_head **list, int *idx, | 549 | int parse_events_add_numeric(struct list_head **list, int *idx, |
554 | unsigned long type, unsigned long config, | 550 | u32 type, u64 config, |
555 | struct list_head *head_config) | 551 | struct list_head *head_config) |
556 | { | 552 | { |
557 | struct perf_event_attr attr; | 553 | struct perf_event_attr attr; |
@@ -607,8 +603,23 @@ int parse_events_add_pmu(struct list_head **list, int *idx, | |||
607 | if (perf_pmu__config(pmu, &attr, head_config)) | 603 | if (perf_pmu__config(pmu, &attr, head_config)) |
608 | return -EINVAL; | 604 | return -EINVAL; |
609 | 605 | ||
610 | return add_event(list, idx, &attr, | 606 | return __add_event(list, idx, &attr, pmu_event_name(head_config), |
611 | pmu_event_name(head_config)); | 607 | pmu->cpus); |
608 | } | ||
609 | |||
610 | int parse_events__modifier_group(struct list_head *list, | ||
611 | char *event_mod) | ||
612 | { | ||
613 | return parse_events__modifier_event(list, event_mod, true); | ||
614 | } | ||
615 | |||
616 | void parse_events__set_leader(char *name, struct list_head *list) | ||
617 | { | ||
618 | struct perf_evsel *leader; | ||
619 | |||
620 | __perf_evlist__set_leader(list); | ||
621 | leader = list_entry(list->next, struct perf_evsel, node); | ||
622 | leader->group_name = name ? strdup(name) : NULL; | ||
612 | } | 623 | } |
613 | 624 | ||
614 | void parse_events_update_lists(struct list_head *list_event, | 625 | void parse_events_update_lists(struct list_head *list_event, |
@@ -616,21 +627,45 @@ void parse_events_update_lists(struct list_head *list_event, | |||
616 | { | 627 | { |
617 | /* | 628 | /* |
618 | * Called for single event definition. Update the | 629 | * Called for single event definition. Update the |
619 | * 'all event' list, and reinit the 'signle event' | 630 | * 'all event' list, and reinit the 'single event' |
620 | * list, for next event definition. | 631 | * list, for next event definition. |
621 | */ | 632 | */ |
622 | list_splice_tail(list_event, list_all); | 633 | list_splice_tail(list_event, list_all); |
623 | free(list_event); | 634 | free(list_event); |
624 | } | 635 | } |
625 | 636 | ||
626 | int parse_events_modifier(struct list_head *list, char *str) | 637 | struct event_modifier { |
638 | int eu; | ||
639 | int ek; | ||
640 | int eh; | ||
641 | int eH; | ||
642 | int eG; | ||
643 | int precise; | ||
644 | int exclude_GH; | ||
645 | }; | ||
646 | |||
647 | static int get_event_modifier(struct event_modifier *mod, char *str, | ||
648 | struct perf_evsel *evsel) | ||
627 | { | 649 | { |
628 | struct perf_evsel *evsel; | 650 | int eu = evsel ? evsel->attr.exclude_user : 0; |
629 | int exclude = 0, exclude_GH = 0; | 651 | int ek = evsel ? evsel->attr.exclude_kernel : 0; |
630 | int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0; | 652 | int eh = evsel ? evsel->attr.exclude_hv : 0; |
653 | int eH = evsel ? evsel->attr.exclude_host : 0; | ||
654 | int eG = evsel ? evsel->attr.exclude_guest : 0; | ||
655 | int precise = evsel ? evsel->attr.precise_ip : 0; | ||
631 | 656 | ||
632 | if (str == NULL) | 657 | int exclude = eu | ek | eh; |
633 | return 0; | 658 | int exclude_GH = evsel ? evsel->exclude_GH : 0; |
659 | |||
660 | /* | ||
661 | * We are here for group and 'GH' was not set as event | ||
662 | * modifier and whatever event/group modifier override | ||
663 | * default 'GH' setup. | ||
664 | */ | ||
665 | if (evsel && !exclude_GH) | ||
666 | eH = eG = 0; | ||
667 | |||
668 | memset(mod, 0, sizeof(*mod)); | ||
634 | 669 | ||
635 | while (*str) { | 670 | while (*str) { |
636 | if (*str == 'u') { | 671 | if (*str == 'u') { |
@@ -674,13 +709,51 @@ int parse_events_modifier(struct list_head *list, char *str) | |||
674 | if (precise > 3) | 709 | if (precise > 3) |
675 | return -EINVAL; | 710 | return -EINVAL; |
676 | 711 | ||
712 | mod->eu = eu; | ||
713 | mod->ek = ek; | ||
714 | mod->eh = eh; | ||
715 | mod->eH = eH; | ||
716 | mod->eG = eG; | ||
717 | mod->precise = precise; | ||
718 | mod->exclude_GH = exclude_GH; | ||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | int parse_events__modifier_event(struct list_head *list, char *str, bool add) | ||
723 | { | ||
724 | struct perf_evsel *evsel; | ||
725 | struct event_modifier mod; | ||
726 | |||
727 | if (str == NULL) | ||
728 | return 0; | ||
729 | |||
730 | if (!add && get_event_modifier(&mod, str, NULL)) | ||
731 | return -EINVAL; | ||
732 | |||
677 | list_for_each_entry(evsel, list, node) { | 733 | list_for_each_entry(evsel, list, node) { |
678 | evsel->attr.exclude_user = eu; | 734 | |
679 | evsel->attr.exclude_kernel = ek; | 735 | if (add && get_event_modifier(&mod, str, evsel)) |
680 | evsel->attr.exclude_hv = eh; | 736 | return -EINVAL; |
681 | evsel->attr.precise_ip = precise; | 737 | |
682 | evsel->attr.exclude_host = eH; | 738 | evsel->attr.exclude_user = mod.eu; |
683 | evsel->attr.exclude_guest = eG; | 739 | evsel->attr.exclude_kernel = mod.ek; |
740 | evsel->attr.exclude_hv = mod.eh; | ||
741 | evsel->attr.precise_ip = mod.precise; | ||
742 | evsel->attr.exclude_host = mod.eH; | ||
743 | evsel->attr.exclude_guest = mod.eG; | ||
744 | evsel->exclude_GH = mod.exclude_GH; | ||
745 | } | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | int parse_events_name(struct list_head *list, char *name) | ||
751 | { | ||
752 | struct perf_evsel *evsel; | ||
753 | |||
754 | list_for_each_entry(evsel, list, node) { | ||
755 | if (!evsel->name) | ||
756 | evsel->name = strdup(name); | ||
684 | } | 757 | } |
685 | 758 | ||
686 | return 0; | 759 | return 0; |
@@ -730,7 +803,8 @@ int parse_events_terms(struct list_head *terms, const char *str) | |||
730 | return ret; | 803 | return ret; |
731 | } | 804 | } |
732 | 805 | ||
733 | int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) | 806 | int parse_events(struct perf_evlist *evlist, const char *str, |
807 | int unset __maybe_unused) | ||
734 | { | 808 | { |
735 | struct parse_events_data__events data = { | 809 | struct parse_events_data__events data = { |
736 | .list = LIST_HEAD_INIT(data.list), | 810 | .list = LIST_HEAD_INIT(data.list), |
@@ -756,20 +830,20 @@ int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) | |||
756 | } | 830 | } |
757 | 831 | ||
758 | int parse_events_option(const struct option *opt, const char *str, | 832 | int parse_events_option(const struct option *opt, const char *str, |
759 | int unset __used) | 833 | int unset __maybe_unused) |
760 | { | 834 | { |
761 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | 835 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; |
762 | return parse_events(evlist, str, unset); | 836 | return parse_events(evlist, str, unset); |
763 | } | 837 | } |
764 | 838 | ||
765 | int parse_filter(const struct option *opt, const char *str, | 839 | int parse_filter(const struct option *opt, const char *str, |
766 | int unset __used) | 840 | int unset __maybe_unused) |
767 | { | 841 | { |
768 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | 842 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; |
769 | struct perf_evsel *last = NULL; | 843 | struct perf_evsel *last = NULL; |
770 | 844 | ||
771 | if (evlist->nr_entries > 0) | 845 | if (evlist->nr_entries > 0) |
772 | last = list_entry(evlist->entries.prev, struct perf_evsel, node); | 846 | last = perf_evlist__last(evlist); |
773 | 847 | ||
774 | if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { | 848 | if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { |
775 | fprintf(stderr, | 849 | fprintf(stderr, |
@@ -799,7 +873,8 @@ static const char * const event_type_descriptors[] = { | |||
799 | * Print the events from <debugfs_mount_point>/tracing/events | 873 | * Print the events from <debugfs_mount_point>/tracing/events |
800 | */ | 874 | */ |
801 | 875 | ||
802 | void print_tracepoint_events(const char *subsys_glob, const char *event_glob) | 876 | void print_tracepoint_events(const char *subsys_glob, const char *event_glob, |
877 | bool name_only) | ||
803 | { | 878 | { |
804 | DIR *sys_dir, *evt_dir; | 879 | DIR *sys_dir, *evt_dir; |
805 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; | 880 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; |
@@ -829,6 +904,11 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) | |||
829 | !strglobmatch(evt_dirent.d_name, event_glob)) | 904 | !strglobmatch(evt_dirent.d_name, event_glob)) |
830 | continue; | 905 | continue; |
831 | 906 | ||
907 | if (name_only) { | ||
908 | printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name); | ||
909 | continue; | ||
910 | } | ||
911 | |||
832 | snprintf(evt_path, MAXPATHLEN, "%s:%s", | 912 | snprintf(evt_path, MAXPATHLEN, "%s:%s", |
833 | sys_dirent.d_name, evt_dirent.d_name); | 913 | sys_dirent.d_name, evt_dirent.d_name); |
834 | printf(" %-50s [%s]\n", evt_path, | 914 | printf(" %-50s [%s]\n", evt_path, |
@@ -906,7 +986,7 @@ void print_events_type(u8 type) | |||
906 | __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX); | 986 | __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX); |
907 | } | 987 | } |
908 | 988 | ||
909 | int print_hwcache_events(const char *event_glob) | 989 | int print_hwcache_events(const char *event_glob, bool name_only) |
910 | { | 990 | { |
911 | unsigned int type, op, i, printed = 0; | 991 | unsigned int type, op, i, printed = 0; |
912 | char name[64]; | 992 | char name[64]; |
@@ -923,8 +1003,11 @@ int print_hwcache_events(const char *event_glob) | |||
923 | if (event_glob != NULL && !strglobmatch(name, event_glob)) | 1003 | if (event_glob != NULL && !strglobmatch(name, event_glob)) |
924 | continue; | 1004 | continue; |
925 | 1005 | ||
926 | printf(" %-50s [%s]\n", name, | 1006 | if (name_only) |
927 | event_type_descriptors[PERF_TYPE_HW_CACHE]); | 1007 | printf("%s ", name); |
1008 | else | ||
1009 | printf(" %-50s [%s]\n", name, | ||
1010 | event_type_descriptors[PERF_TYPE_HW_CACHE]); | ||
928 | ++printed; | 1011 | ++printed; |
929 | } | 1012 | } |
930 | } | 1013 | } |
@@ -934,7 +1017,8 @@ int print_hwcache_events(const char *event_glob) | |||
934 | } | 1017 | } |
935 | 1018 | ||
936 | static void print_symbol_events(const char *event_glob, unsigned type, | 1019 | static void print_symbol_events(const char *event_glob, unsigned type, |
937 | struct event_symbol *syms, unsigned max) | 1020 | struct event_symbol *syms, unsigned max, |
1021 | bool name_only) | ||
938 | { | 1022 | { |
939 | unsigned i, printed = 0; | 1023 | unsigned i, printed = 0; |
940 | char name[MAX_NAME_LEN]; | 1024 | char name[MAX_NAME_LEN]; |
@@ -946,6 +1030,11 @@ static void print_symbol_events(const char *event_glob, unsigned type, | |||
946 | (syms->alias && strglobmatch(syms->alias, event_glob)))) | 1030 | (syms->alias && strglobmatch(syms->alias, event_glob)))) |
947 | continue; | 1031 | continue; |
948 | 1032 | ||
1033 | if (name_only) { | ||
1034 | printf("%s ", syms->symbol); | ||
1035 | continue; | ||
1036 | } | ||
1037 | |||
949 | if (strlen(syms->alias)) | 1038 | if (strlen(syms->alias)) |
950 | snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); | 1039 | snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); |
951 | else | 1040 | else |
@@ -963,39 +1052,42 @@ static void print_symbol_events(const char *event_glob, unsigned type, | |||
963 | /* | 1052 | /* |
964 | * Print the help text for the event symbols: | 1053 | * Print the help text for the event symbols: |
965 | */ | 1054 | */ |
966 | void print_events(const char *event_glob) | 1055 | void print_events(const char *event_glob, bool name_only) |
967 | { | 1056 | { |
968 | 1057 | if (!name_only) { | |
969 | printf("\n"); | 1058 | printf("\n"); |
970 | printf("List of pre-defined events (to be used in -e):\n"); | 1059 | printf("List of pre-defined events (to be used in -e):\n"); |
1060 | } | ||
971 | 1061 | ||
972 | print_symbol_events(event_glob, PERF_TYPE_HARDWARE, | 1062 | print_symbol_events(event_glob, PERF_TYPE_HARDWARE, |
973 | event_symbols_hw, PERF_COUNT_HW_MAX); | 1063 | event_symbols_hw, PERF_COUNT_HW_MAX, name_only); |
974 | 1064 | ||
975 | print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, | 1065 | print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, |
976 | event_symbols_sw, PERF_COUNT_SW_MAX); | 1066 | event_symbols_sw, PERF_COUNT_SW_MAX, name_only); |
977 | 1067 | ||
978 | print_hwcache_events(event_glob); | 1068 | print_hwcache_events(event_glob, name_only); |
979 | 1069 | ||
980 | if (event_glob != NULL) | 1070 | if (event_glob != NULL) |
981 | return; | 1071 | return; |
982 | 1072 | ||
983 | printf("\n"); | 1073 | if (!name_only) { |
984 | printf(" %-50s [%s]\n", | 1074 | printf("\n"); |
985 | "rNNN", | 1075 | printf(" %-50s [%s]\n", |
986 | event_type_descriptors[PERF_TYPE_RAW]); | 1076 | "rNNN", |
987 | printf(" %-50s [%s]\n", | 1077 | event_type_descriptors[PERF_TYPE_RAW]); |
988 | "cpu/t1=v1[,t2=v2,t3 ...]/modifier", | 1078 | printf(" %-50s [%s]\n", |
989 | event_type_descriptors[PERF_TYPE_RAW]); | 1079 | "cpu/t1=v1[,t2=v2,t3 ...]/modifier", |
990 | printf(" (see 'perf list --help' on how to encode it)\n"); | 1080 | event_type_descriptors[PERF_TYPE_RAW]); |
991 | printf("\n"); | 1081 | printf(" (see 'perf list --help' on how to encode it)\n"); |
992 | 1082 | printf("\n"); | |
993 | printf(" %-50s [%s]\n", | 1083 | |
994 | "mem:<addr>[:access]", | 1084 | printf(" %-50s [%s]\n", |
1085 | "mem:<addr>[:access]", | ||
995 | event_type_descriptors[PERF_TYPE_BREAKPOINT]); | 1086 | event_type_descriptors[PERF_TYPE_BREAKPOINT]); |
996 | printf("\n"); | 1087 | printf("\n"); |
1088 | } | ||
997 | 1089 | ||
998 | print_tracepoint_events(NULL, NULL); | 1090 | print_tracepoint_events(NULL, NULL, name_only); |
999 | } | 1091 | } |
1000 | 1092 | ||
1001 | int parse_events__is_hardcoded_term(struct parse_events__term *term) | 1093 | int parse_events__is_hardcoded_term(struct parse_events__term *term) |
@@ -1005,7 +1097,7 @@ int parse_events__is_hardcoded_term(struct parse_events__term *term) | |||
1005 | 1097 | ||
1006 | static int new_term(struct parse_events__term **_term, int type_val, | 1098 | static int new_term(struct parse_events__term **_term, int type_val, |
1007 | int type_term, char *config, | 1099 | int type_term, char *config, |
1008 | char *str, long num) | 1100 | char *str, u64 num) |
1009 | { | 1101 | { |
1010 | struct parse_events__term *term; | 1102 | struct parse_events__term *term; |
1011 | 1103 | ||
@@ -1034,7 +1126,7 @@ static int new_term(struct parse_events__term **_term, int type_val, | |||
1034 | } | 1126 | } |
1035 | 1127 | ||
1036 | int parse_events__term_num(struct parse_events__term **term, | 1128 | int parse_events__term_num(struct parse_events__term **term, |
1037 | int type_term, char *config, long num) | 1129 | int type_term, char *config, u64 num) |
1038 | { | 1130 | { |
1039 | return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, | 1131 | return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, |
1040 | config, NULL, num); | 1132 | config, NULL, num); |
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index ee9c218a193c..c356e443448d 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -55,7 +55,7 @@ struct parse_events__term { | |||
55 | char *config; | 55 | char *config; |
56 | union { | 56 | union { |
57 | char *str; | 57 | char *str; |
58 | long num; | 58 | u64 num; |
59 | } val; | 59 | } val; |
60 | int type_val; | 60 | int type_val; |
61 | int type_term; | 61 | int type_term; |
@@ -73,17 +73,19 @@ struct parse_events_data__terms { | |||
73 | 73 | ||
74 | int parse_events__is_hardcoded_term(struct parse_events__term *term); | 74 | int parse_events__is_hardcoded_term(struct parse_events__term *term); |
75 | int parse_events__term_num(struct parse_events__term **_term, | 75 | int parse_events__term_num(struct parse_events__term **_term, |
76 | int type_term, char *config, long num); | 76 | int type_term, char *config, u64 num); |
77 | int parse_events__term_str(struct parse_events__term **_term, | 77 | int parse_events__term_str(struct parse_events__term **_term, |
78 | int type_term, char *config, char *str); | 78 | int type_term, char *config, char *str); |
79 | int parse_events__term_clone(struct parse_events__term **new, | 79 | int parse_events__term_clone(struct parse_events__term **new, |
80 | struct parse_events__term *term); | 80 | struct parse_events__term *term); |
81 | void parse_events__free_terms(struct list_head *terms); | 81 | void parse_events__free_terms(struct list_head *terms); |
82 | int parse_events_modifier(struct list_head *list, char *str); | 82 | int parse_events__modifier_event(struct list_head *list, char *str, bool add); |
83 | int parse_events__modifier_group(struct list_head *list, char *event_mod); | ||
84 | int parse_events_name(struct list_head *list, char *name); | ||
83 | int parse_events_add_tracepoint(struct list_head **list, int *idx, | 85 | int parse_events_add_tracepoint(struct list_head **list, int *idx, |
84 | char *sys, char *event); | 86 | char *sys, char *event); |
85 | int parse_events_add_numeric(struct list_head **list, int *idx, | 87 | int parse_events_add_numeric(struct list_head **list, int *idx, |
86 | unsigned long type, unsigned long config, | 88 | u32 type, u64 config, |
87 | struct list_head *head_config); | 89 | struct list_head *head_config); |
88 | int parse_events_add_cache(struct list_head **list, int *idx, | 90 | int parse_events_add_cache(struct list_head **list, int *idx, |
89 | char *type, char *op_result1, char *op_result2); | 91 | char *type, char *op_result1, char *op_result2); |
@@ -91,15 +93,17 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx, | |||
91 | void *ptr, char *type); | 93 | void *ptr, char *type); |
92 | int parse_events_add_pmu(struct list_head **list, int *idx, | 94 | int parse_events_add_pmu(struct list_head **list, int *idx, |
93 | char *pmu , struct list_head *head_config); | 95 | char *pmu , struct list_head *head_config); |
96 | void parse_events__set_leader(char *name, struct list_head *list); | ||
94 | void parse_events_update_lists(struct list_head *list_event, | 97 | void parse_events_update_lists(struct list_head *list_event, |
95 | struct list_head *list_all); | 98 | struct list_head *list_all); |
96 | void parse_events_error(void *data, void *scanner, char const *msg); | 99 | void parse_events_error(void *data, void *scanner, char const *msg); |
97 | int parse_events__test(void); | 100 | int parse_events__test(void); |
98 | 101 | ||
99 | void print_events(const char *event_glob); | 102 | void print_events(const char *event_glob, bool name_only); |
100 | void print_events_type(u8 type); | 103 | void print_events_type(u8 type); |
101 | void print_tracepoint_events(const char *subsys_glob, const char *event_glob); | 104 | void print_tracepoint_events(const char *subsys_glob, const char *event_glob, |
102 | int print_hwcache_events(const char *event_glob); | 105 | bool name_only); |
106 | int print_hwcache_events(const char *event_glob, bool name_only); | ||
103 | extern int is_valid_tracepoint(const char *event_string); | 107 | extern int is_valid_tracepoint(const char *event_string); |
104 | 108 | ||
105 | extern int valid_debugfs_mount(const char *debugfs); | 109 | extern int valid_debugfs_mount(const char *debugfs); |
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 384ca74c6b22..c87efc12579d 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l | |||
@@ -15,10 +15,10 @@ YYSTYPE *parse_events_get_lval(yyscan_t yyscanner); | |||
15 | 15 | ||
16 | static int __value(YYSTYPE *yylval, char *str, int base, int token) | 16 | static int __value(YYSTYPE *yylval, char *str, int base, int token) |
17 | { | 17 | { |
18 | long num; | 18 | u64 num; |
19 | 19 | ||
20 | errno = 0; | 20 | errno = 0; |
21 | num = strtoul(str, NULL, base); | 21 | num = strtoull(str, NULL, base); |
22 | if (errno) | 22 | if (errno) |
23 | return PE_ERROR; | 23 | return PE_ERROR; |
24 | 24 | ||
@@ -70,6 +70,12 @@ static int term(yyscan_t scanner, int type) | |||
70 | %} | 70 | %} |
71 | 71 | ||
72 | %x mem | 72 | %x mem |
73 | %s config | ||
74 | %x event | ||
75 | |||
76 | group [^,{}/]*[{][^}]*[}][^,{}/]* | ||
77 | event_pmu [^,{}/]+[/][^/]*[/][^,{}/]* | ||
78 | event [^,{}/]+ | ||
73 | 79 | ||
74 | num_dec [0-9]+ | 80 | num_dec [0-9]+ |
75 | num_hex 0x[a-fA-F0-9]+ | 81 | num_hex 0x[a-fA-F0-9]+ |
@@ -84,7 +90,13 @@ modifier_bp [rwx]{1,3} | |||
84 | { | 90 | { |
85 | int start_token; | 91 | int start_token; |
86 | 92 | ||
87 | start_token = (int) parse_events_get_extra(yyscanner); | 93 | start_token = parse_events_get_extra(yyscanner); |
94 | |||
95 | if (start_token == PE_START_TERMS) | ||
96 | BEGIN(config); | ||
97 | else if (start_token == PE_START_EVENTS) | ||
98 | BEGIN(event); | ||
99 | |||
88 | if (start_token) { | 100 | if (start_token) { |
89 | parse_events_set_extra(NULL, yyscanner); | 101 | parse_events_set_extra(NULL, yyscanner); |
90 | return start_token; | 102 | return start_token; |
@@ -92,6 +104,26 @@ modifier_bp [rwx]{1,3} | |||
92 | } | 104 | } |
93 | %} | 105 | %} |
94 | 106 | ||
107 | <event>{ | ||
108 | |||
109 | {group} { | ||
110 | BEGIN(INITIAL); yyless(0); | ||
111 | } | ||
112 | |||
113 | {event_pmu} | | ||
114 | {event} { | ||
115 | str(yyscanner, PE_EVENT_NAME); | ||
116 | BEGIN(INITIAL); yyless(0); | ||
117 | return PE_EVENT_NAME; | ||
118 | } | ||
119 | |||
120 | . | | ||
121 | <<EOF>> { | ||
122 | BEGIN(INITIAL); yyless(0); | ||
123 | } | ||
124 | |||
125 | } | ||
126 | |||
95 | cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); } | 127 | cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); } |
96 | stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); } | 128 | stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); } |
97 | stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); } | 129 | stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); } |
@@ -127,18 +159,16 @@ speculative-read|speculative-load | | |||
127 | refs|Reference|ops|access | | 159 | refs|Reference|ops|access | |
128 | misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); } | 160 | misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); } |
129 | 161 | ||
130 | /* | 162 | <config>{ |
131 | * These are event config hardcoded term names to be specified | ||
132 | * within xxx/.../ syntax. So far we dont clash with other names, | ||
133 | * so we can put them here directly. In case the we have a conflict | ||
134 | * in future, this needs to go into '//' condition block. | ||
135 | */ | ||
136 | config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } | 163 | config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } |
137 | config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); } | 164 | config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); } |
138 | config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); } | 165 | config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); } |
139 | name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); } | 166 | name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); } |
140 | period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } | 167 | period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } |
141 | branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } | 168 | branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } |
169 | , { return ','; } | ||
170 | "/" { BEGIN(INITIAL); return '/'; } | ||
171 | } | ||
142 | 172 | ||
143 | mem: { BEGIN(mem); return PE_PREFIX_MEM; } | 173 | mem: { BEGIN(mem); return PE_PREFIX_MEM; } |
144 | r{num_raw_hex} { return raw(yyscanner); } | 174 | r{num_raw_hex} { return raw(yyscanner); } |
@@ -147,10 +177,12 @@ r{num_raw_hex} { return raw(yyscanner); } | |||
147 | 177 | ||
148 | {modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } | 178 | {modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } |
149 | {name} { return str(yyscanner, PE_NAME); } | 179 | {name} { return str(yyscanner, PE_NAME); } |
150 | "/" { return '/'; } | 180 | "/" { BEGIN(config); return '/'; } |
151 | - { return '-'; } | 181 | - { return '-'; } |
152 | , { return ','; } | 182 | , { BEGIN(event); return ','; } |
153 | : { return ':'; } | 183 | : { return ':'; } |
184 | "{" { BEGIN(event); return '{'; } | ||
185 | "}" { return '}'; } | ||
154 | = { return '='; } | 186 | = { return '='; } |
155 | \n { } | 187 | \n { } |
156 | 188 | ||
@@ -175,7 +207,7 @@ r{num_raw_hex} { return raw(yyscanner); } | |||
175 | 207 | ||
176 | %% | 208 | %% |
177 | 209 | ||
178 | int parse_events_wrap(void *scanner __used) | 210 | int parse_events_wrap(void *scanner __maybe_unused) |
179 | { | 211 | { |
180 | return 1; | 212 | return 1; |
181 | } | 213 | } |
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 2bc5fbff2b5d..cd88209e3c58 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y | |||
@@ -27,10 +27,11 @@ do { \ | |||
27 | 27 | ||
28 | %token PE_START_EVENTS PE_START_TERMS | 28 | %token PE_START_EVENTS PE_START_TERMS |
29 | %token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM | 29 | %token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM |
30 | %token PE_EVENT_NAME | ||
30 | %token PE_NAME | 31 | %token PE_NAME |
31 | %token PE_MODIFIER_EVENT PE_MODIFIER_BP | 32 | %token PE_MODIFIER_EVENT PE_MODIFIER_BP |
32 | %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT | 33 | %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT |
33 | %token PE_PREFIX_MEM PE_PREFIX_RAW | 34 | %token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP |
34 | %token PE_ERROR | 35 | %token PE_ERROR |
35 | %type <num> PE_VALUE | 36 | %type <num> PE_VALUE |
36 | %type <num> PE_VALUE_SYM_HW | 37 | %type <num> PE_VALUE_SYM_HW |
@@ -42,6 +43,7 @@ do { \ | |||
42 | %type <str> PE_NAME_CACHE_OP_RESULT | 43 | %type <str> PE_NAME_CACHE_OP_RESULT |
43 | %type <str> PE_MODIFIER_EVENT | 44 | %type <str> PE_MODIFIER_EVENT |
44 | %type <str> PE_MODIFIER_BP | 45 | %type <str> PE_MODIFIER_BP |
46 | %type <str> PE_EVENT_NAME | ||
45 | %type <num> value_sym | 47 | %type <num> value_sym |
46 | %type <head> event_config | 48 | %type <head> event_config |
47 | %type <term> event_term | 49 | %type <term> event_term |
@@ -53,44 +55,125 @@ do { \ | |||
53 | %type <head> event_legacy_numeric | 55 | %type <head> event_legacy_numeric |
54 | %type <head> event_legacy_raw | 56 | %type <head> event_legacy_raw |
55 | %type <head> event_def | 57 | %type <head> event_def |
58 | %type <head> event_mod | ||
59 | %type <head> event_name | ||
60 | %type <head> event | ||
61 | %type <head> events | ||
62 | %type <head> group_def | ||
63 | %type <head> group | ||
64 | %type <head> groups | ||
56 | 65 | ||
57 | %union | 66 | %union |
58 | { | 67 | { |
59 | char *str; | 68 | char *str; |
60 | unsigned long num; | 69 | u64 num; |
61 | struct list_head *head; | 70 | struct list_head *head; |
62 | struct parse_events__term *term; | 71 | struct parse_events__term *term; |
63 | } | 72 | } |
64 | %% | 73 | %% |
65 | 74 | ||
66 | start: | 75 | start: |
67 | PE_START_EVENTS events | 76 | PE_START_EVENTS start_events |
68 | | | 77 | | |
69 | PE_START_TERMS terms | 78 | PE_START_TERMS start_terms |
79 | |||
80 | start_events: groups | ||
81 | { | ||
82 | struct parse_events_data__events *data = _data; | ||
83 | |||
84 | parse_events_update_lists($1, &data->list); | ||
85 | } | ||
86 | |||
87 | groups: | ||
88 | groups ',' group | ||
89 | { | ||
90 | struct list_head *list = $1; | ||
91 | struct list_head *group = $3; | ||
92 | |||
93 | parse_events_update_lists(group, list); | ||
94 | $$ = list; | ||
95 | } | ||
96 | | | ||
97 | groups ',' event | ||
98 | { | ||
99 | struct list_head *list = $1; | ||
100 | struct list_head *event = $3; | ||
101 | |||
102 | parse_events_update_lists(event, list); | ||
103 | $$ = list; | ||
104 | } | ||
105 | | | ||
106 | group | ||
107 | | | ||
108 | event | ||
109 | |||
110 | group: | ||
111 | group_def ':' PE_MODIFIER_EVENT | ||
112 | { | ||
113 | struct list_head *list = $1; | ||
114 | |||
115 | ABORT_ON(parse_events__modifier_group(list, $3)); | ||
116 | $$ = list; | ||
117 | } | ||
118 | | | ||
119 | group_def | ||
120 | |||
121 | group_def: | ||
122 | PE_NAME '{' events '}' | ||
123 | { | ||
124 | struct list_head *list = $3; | ||
125 | |||
126 | parse_events__set_leader($1, list); | ||
127 | $$ = list; | ||
128 | } | ||
129 | | | ||
130 | '{' events '}' | ||
131 | { | ||
132 | struct list_head *list = $2; | ||
133 | |||
134 | parse_events__set_leader(NULL, list); | ||
135 | $$ = list; | ||
136 | } | ||
70 | 137 | ||
71 | events: | 138 | events: |
72 | events ',' event | event | 139 | events ',' event |
140 | { | ||
141 | struct list_head *event = $3; | ||
142 | struct list_head *list = $1; | ||
73 | 143 | ||
74 | event: | 144 | parse_events_update_lists(event, list); |
75 | event_def PE_MODIFIER_EVENT | 145 | $$ = list; |
146 | } | ||
147 | | | ||
148 | event | ||
149 | |||
150 | event: event_mod | ||
151 | |||
152 | event_mod: | ||
153 | event_name PE_MODIFIER_EVENT | ||
76 | { | 154 | { |
77 | struct parse_events_data__events *data = _data; | 155 | struct list_head *list = $1; |
78 | 156 | ||
79 | /* | 157 | /* |
80 | * Apply modifier on all events added by single event definition | 158 | * Apply modifier on all events added by single event definition |
81 | * (there could be more events added for multiple tracepoint | 159 | * (there could be more events added for multiple tracepoint |
82 | * definitions via '*?'. | 160 | * definitions via '*?'. |
83 | */ | 161 | */ |
84 | ABORT_ON(parse_events_modifier($1, $2)); | 162 | ABORT_ON(parse_events__modifier_event(list, $2, false)); |
85 | parse_events_update_lists($1, &data->list); | 163 | $$ = list; |
86 | } | 164 | } |
87 | | | 165 | | |
88 | event_def | 166 | event_name |
89 | { | ||
90 | struct parse_events_data__events *data = _data; | ||
91 | 167 | ||
92 | parse_events_update_lists($1, &data->list); | 168 | event_name: |
169 | PE_EVENT_NAME event_def | ||
170 | { | ||
171 | ABORT_ON(parse_events_name($2, $1)); | ||
172 | free($1); | ||
173 | $$ = $2; | ||
93 | } | 174 | } |
175 | | | ||
176 | event_def | ||
94 | 177 | ||
95 | event_def: event_pmu | | 178 | event_def: event_pmu | |
96 | event_legacy_symbol | | 179 | event_legacy_symbol | |
@@ -207,7 +290,7 @@ PE_VALUE ':' PE_VALUE | |||
207 | struct parse_events_data__events *data = _data; | 290 | struct parse_events_data__events *data = _data; |
208 | struct list_head *list = NULL; | 291 | struct list_head *list = NULL; |
209 | 292 | ||
210 | ABORT_ON(parse_events_add_numeric(&list, &data->idx, $1, $3, NULL)); | 293 | ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL)); |
211 | $$ = list; | 294 | $$ = list; |
212 | } | 295 | } |
213 | 296 | ||
@@ -222,7 +305,7 @@ PE_RAW | |||
222 | $$ = list; | 305 | $$ = list; |
223 | } | 306 | } |
224 | 307 | ||
225 | terms: event_config | 308 | start_terms: event_config |
226 | { | 309 | { |
227 | struct parse_events_data__terms *data = _data; | 310 | struct parse_events_data__terms *data = _data; |
228 | data->terms = $1; | 311 | data->terms = $1; |
@@ -282,7 +365,7 @@ PE_TERM '=' PE_NAME | |||
282 | { | 365 | { |
283 | struct parse_events__term *term; | 366 | struct parse_events__term *term; |
284 | 367 | ||
285 | ABORT_ON(parse_events__term_str(&term, $1, NULL, $3)); | 368 | ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3)); |
286 | $$ = term; | 369 | $$ = term; |
287 | } | 370 | } |
288 | | | 371 | | |
@@ -290,7 +373,7 @@ PE_TERM '=' PE_VALUE | |||
290 | { | 373 | { |
291 | struct parse_events__term *term; | 374 | struct parse_events__term *term; |
292 | 375 | ||
293 | ABORT_ON(parse_events__term_num(&term, $1, NULL, $3)); | 376 | ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3)); |
294 | $$ = term; | 377 | $$ = term; |
295 | } | 378 | } |
296 | | | 379 | | |
@@ -298,7 +381,7 @@ PE_TERM | |||
298 | { | 381 | { |
299 | struct parse_events__term *term; | 382 | struct parse_events__term *term; |
300 | 383 | ||
301 | ABORT_ON(parse_events__term_num(&term, $1, NULL, 1)); | 384 | ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1)); |
302 | $$ = term; | 385 | $$ = term; |
303 | } | 386 | } |
304 | 387 | ||
@@ -308,7 +391,7 @@ sep_slash_dc: '/' | ':' | | |||
308 | 391 | ||
309 | %% | 392 | %% |
310 | 393 | ||
311 | void parse_events_error(void *data __used, void *scanner __used, | 394 | void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused, |
312 | char const *msg __used) | 395 | char const *msg __maybe_unused) |
313 | { | 396 | { |
314 | } | 397 | } |
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c index 594f8fad5ecd..443fc116512b 100644 --- a/tools/perf/util/parse-options.c +++ b/tools/perf/util/parse-options.c | |||
@@ -557,7 +557,8 @@ int parse_options_usage(const char * const *usagestr, | |||
557 | } | 557 | } |
558 | 558 | ||
559 | 559 | ||
560 | int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used, | 560 | int parse_opt_verbosity_cb(const struct option *opt, |
561 | const char *arg __maybe_unused, | ||
561 | int unset) | 562 | int unset) |
562 | { | 563 | { |
563 | int *target = opt->value; | 564 | int *target = opt->value; |
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h new file mode 100644 index 000000000000..316dbe7f86ed --- /dev/null +++ b/tools/perf/util/perf_regs.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef __PERF_REGS_H | ||
2 | #define __PERF_REGS_H | ||
3 | |||
4 | #ifndef NO_PERF_REGS | ||
5 | #include <perf_regs.h> | ||
6 | #else | ||
7 | #define PERF_REGS_MASK 0 | ||
8 | |||
9 | static inline const char *perf_reg_name(int id __maybe_unused) | ||
10 | { | ||
11 | return NULL; | ||
12 | } | ||
13 | #endif /* NO_PERF_REGS */ | ||
14 | #endif /* __PERF_REGS_H */ | ||
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 67715a42cd6d..8a2229da594f 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c | |||
@@ -9,6 +9,9 @@ | |||
9 | #include "util.h" | 9 | #include "util.h" |
10 | #include "pmu.h" | 10 | #include "pmu.h" |
11 | #include "parse-events.h" | 11 | #include "parse-events.h" |
12 | #include "cpumap.h" | ||
13 | |||
14 | #define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/" | ||
12 | 15 | ||
13 | int perf_pmu_parse(struct list_head *list, char *name); | 16 | int perf_pmu_parse(struct list_head *list, char *name); |
14 | extern FILE *perf_pmu_in; | 17 | extern FILE *perf_pmu_in; |
@@ -69,7 +72,7 @@ static int pmu_format(char *name, struct list_head *format) | |||
69 | return -1; | 72 | return -1; |
70 | 73 | ||
71 | snprintf(path, PATH_MAX, | 74 | snprintf(path, PATH_MAX, |
72 | "%s/bus/event_source/devices/%s/format", sysfs, name); | 75 | "%s" EVENT_SOURCE_DEVICE_PATH "%s/format", sysfs, name); |
73 | 76 | ||
74 | if (stat(path, &st) < 0) | 77 | if (stat(path, &st) < 0) |
75 | return 0; /* no error if format does not exist */ | 78 | return 0; /* no error if format does not exist */ |
@@ -206,7 +209,7 @@ static int pmu_type(char *name, __u32 *type) | |||
206 | return -1; | 209 | return -1; |
207 | 210 | ||
208 | snprintf(path, PATH_MAX, | 211 | snprintf(path, PATH_MAX, |
209 | "%s/bus/event_source/devices/%s/type", sysfs, name); | 212 | "%s" EVENT_SOURCE_DEVICE_PATH "%s/type", sysfs, name); |
210 | 213 | ||
211 | if (stat(path, &st) < 0) | 214 | if (stat(path, &st) < 0) |
212 | return -1; | 215 | return -1; |
@@ -222,6 +225,62 @@ static int pmu_type(char *name, __u32 *type) | |||
222 | return ret; | 225 | return ret; |
223 | } | 226 | } |
224 | 227 | ||
228 | /* Add all pmus in sysfs to pmu list: */ | ||
229 | static void pmu_read_sysfs(void) | ||
230 | { | ||
231 | char path[PATH_MAX]; | ||
232 | const char *sysfs; | ||
233 | DIR *dir; | ||
234 | struct dirent *dent; | ||
235 | |||
236 | sysfs = sysfs_find_mountpoint(); | ||
237 | if (!sysfs) | ||
238 | return; | ||
239 | |||
240 | snprintf(path, PATH_MAX, | ||
241 | "%s" EVENT_SOURCE_DEVICE_PATH, sysfs); | ||
242 | |||
243 | dir = opendir(path); | ||
244 | if (!dir) | ||
245 | return; | ||
246 | |||
247 | while ((dent = readdir(dir))) { | ||
248 | if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) | ||
249 | continue; | ||
250 | /* add to static LIST_HEAD(pmus): */ | ||
251 | perf_pmu__find(dent->d_name); | ||
252 | } | ||
253 | |||
254 | closedir(dir); | ||
255 | } | ||
256 | |||
257 | static struct cpu_map *pmu_cpumask(char *name) | ||
258 | { | ||
259 | struct stat st; | ||
260 | char path[PATH_MAX]; | ||
261 | const char *sysfs; | ||
262 | FILE *file; | ||
263 | struct cpu_map *cpus; | ||
264 | |||
265 | sysfs = sysfs_find_mountpoint(); | ||
266 | if (!sysfs) | ||
267 | return NULL; | ||
268 | |||
269 | snprintf(path, PATH_MAX, | ||
270 | "%s/bus/event_source/devices/%s/cpumask", sysfs, name); | ||
271 | |||
272 | if (stat(path, &st) < 0) | ||
273 | return NULL; | ||
274 | |||
275 | file = fopen(path, "r"); | ||
276 | if (!file) | ||
277 | return NULL; | ||
278 | |||
279 | cpus = cpu_map__read(file); | ||
280 | fclose(file); | ||
281 | return cpus; | ||
282 | } | ||
283 | |||
225 | static struct perf_pmu *pmu_lookup(char *name) | 284 | static struct perf_pmu *pmu_lookup(char *name) |
226 | { | 285 | { |
227 | struct perf_pmu *pmu; | 286 | struct perf_pmu *pmu; |
@@ -244,6 +303,8 @@ static struct perf_pmu *pmu_lookup(char *name) | |||
244 | if (!pmu) | 303 | if (!pmu) |
245 | return NULL; | 304 | return NULL; |
246 | 305 | ||
306 | pmu->cpus = pmu_cpumask(name); | ||
307 | |||
247 | pmu_aliases(name, &aliases); | 308 | pmu_aliases(name, &aliases); |
248 | 309 | ||
249 | INIT_LIST_HEAD(&pmu->format); | 310 | INIT_LIST_HEAD(&pmu->format); |
@@ -267,6 +328,21 @@ static struct perf_pmu *pmu_find(char *name) | |||
267 | return NULL; | 328 | return NULL; |
268 | } | 329 | } |
269 | 330 | ||
331 | struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu) | ||
332 | { | ||
333 | /* | ||
334 | * pmu iterator: If pmu is NULL, we start at the begin, | ||
335 | * otherwise return the next pmu. Returns NULL on end. | ||
336 | */ | ||
337 | if (!pmu) { | ||
338 | pmu_read_sysfs(); | ||
339 | pmu = list_prepare_entry(pmu, &pmus, list); | ||
340 | } | ||
341 | list_for_each_entry_continue(pmu, &pmus, list) | ||
342 | return pmu; | ||
343 | return NULL; | ||
344 | } | ||
345 | |||
270 | struct perf_pmu *perf_pmu__find(char *name) | 346 | struct perf_pmu *perf_pmu__find(char *name) |
271 | { | 347 | { |
272 | struct perf_pmu *pmu; | 348 | struct perf_pmu *pmu; |
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 535f2c5258ab..53c7794fc4be 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h | |||
@@ -28,6 +28,7 @@ struct perf_pmu__alias { | |||
28 | struct perf_pmu { | 28 | struct perf_pmu { |
29 | char *name; | 29 | char *name; |
30 | __u32 type; | 30 | __u32 type; |
31 | struct cpu_map *cpus; | ||
31 | struct list_head format; | 32 | struct list_head format; |
32 | struct list_head aliases; | 33 | struct list_head aliases; |
33 | struct list_head list; | 34 | struct list_head list; |
@@ -46,5 +47,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, | |||
46 | int config, unsigned long *bits); | 47 | int config, unsigned long *bits); |
47 | void perf_pmu__set_format(unsigned long *bits, long from, long to); | 48 | void perf_pmu__set_format(unsigned long *bits, long from, long to); |
48 | 49 | ||
50 | struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); | ||
51 | |||
49 | int perf_pmu__test(void); | 52 | int perf_pmu__test(void); |
50 | #endif /* __PMU_H */ | 53 | #endif /* __PMU_H */ |
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index 20ea77e93169..ec898047ebb9 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y | |||
@@ -86,8 +86,8 @@ PP_VALUE | |||
86 | 86 | ||
87 | %% | 87 | %% |
88 | 88 | ||
89 | void perf_pmu_error(struct list_head *list __used, | 89 | void perf_pmu_error(struct list_head *list __maybe_unused, |
90 | char *name __used, | 90 | char *name __maybe_unused, |
91 | char const *msg __used) | 91 | char const *msg __maybe_unused) |
92 | { | 92 | { |
93 | } | 93 | } |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 0dda25d82d06..49a256e6e0a2 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include "symbol.h" | 41 | #include "symbol.h" |
42 | #include "thread.h" | 42 | #include "thread.h" |
43 | #include "debugfs.h" | 43 | #include "debugfs.h" |
44 | #include "trace-event.h" /* For __unused */ | 44 | #include "trace-event.h" /* For __maybe_unused */ |
45 | #include "probe-event.h" | 45 | #include "probe-event.h" |
46 | #include "probe-finder.h" | 46 | #include "probe-finder.h" |
47 | #include "session.h" | 47 | #include "session.h" |
@@ -647,8 +647,8 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, | |||
647 | } | 647 | } |
648 | 648 | ||
649 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | 649 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, |
650 | struct probe_trace_event **tevs __unused, | 650 | struct probe_trace_event **tevs __maybe_unused, |
651 | int max_tevs __unused, const char *target) | 651 | int max_tevs __maybe_unused, const char *target) |
652 | { | 652 | { |
653 | if (perf_probe_event_need_dwarf(pev)) { | 653 | if (perf_probe_event_need_dwarf(pev)) { |
654 | pr_warning("Debuginfo-analysis is not supported.\n"); | 654 | pr_warning("Debuginfo-analysis is not supported.\n"); |
@@ -661,17 +661,18 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | |||
661 | return 0; | 661 | return 0; |
662 | } | 662 | } |
663 | 663 | ||
664 | int show_line_range(struct line_range *lr __unused, const char *module __unused) | 664 | int show_line_range(struct line_range *lr __maybe_unused, |
665 | const char *module __maybe_unused) | ||
665 | { | 666 | { |
666 | pr_warning("Debuginfo-analysis is not supported.\n"); | 667 | pr_warning("Debuginfo-analysis is not supported.\n"); |
667 | return -ENOSYS; | 668 | return -ENOSYS; |
668 | } | 669 | } |
669 | 670 | ||
670 | int show_available_vars(struct perf_probe_event *pevs __unused, | 671 | int show_available_vars(struct perf_probe_event *pevs __maybe_unused, |
671 | int npevs __unused, int max_vls __unused, | 672 | int npevs __maybe_unused, int max_vls __maybe_unused, |
672 | const char *module __unused, | 673 | const char *module __maybe_unused, |
673 | struct strfilter *filter __unused, | 674 | struct strfilter *filter __maybe_unused, |
674 | bool externs __unused) | 675 | bool externs __maybe_unused) |
675 | { | 676 | { |
676 | pr_warning("Debuginfo-analysis is not supported.\n"); | 677 | pr_warning("Debuginfo-analysis is not supported.\n"); |
677 | return -ENOSYS; | 678 | return -ENOSYS; |
@@ -1099,6 +1100,7 @@ static int parse_probe_trace_command(const char *cmd, | |||
1099 | struct probe_trace_point *tp = &tev->point; | 1100 | struct probe_trace_point *tp = &tev->point; |
1100 | char pr; | 1101 | char pr; |
1101 | char *p; | 1102 | char *p; |
1103 | char *argv0_str = NULL, *fmt, *fmt1_str, *fmt2_str, *fmt3_str; | ||
1102 | int ret, i, argc; | 1104 | int ret, i, argc; |
1103 | char **argv; | 1105 | char **argv; |
1104 | 1106 | ||
@@ -1115,14 +1117,27 @@ static int parse_probe_trace_command(const char *cmd, | |||
1115 | } | 1117 | } |
1116 | 1118 | ||
1117 | /* Scan event and group name. */ | 1119 | /* Scan event and group name. */ |
1118 | ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", | 1120 | argv0_str = strdup(argv[0]); |
1119 | &pr, (float *)(void *)&tev->group, | 1121 | if (argv0_str == NULL) { |
1120 | (float *)(void *)&tev->event); | 1122 | ret = -ENOMEM; |
1121 | if (ret != 3) { | 1123 | goto out; |
1124 | } | ||
1125 | fmt1_str = strtok_r(argv0_str, ":", &fmt); | ||
1126 | fmt2_str = strtok_r(NULL, "/", &fmt); | ||
1127 | fmt3_str = strtok_r(NULL, " \t", &fmt); | ||
1128 | if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL | ||
1129 | || fmt3_str == NULL) { | ||
1122 | semantic_error("Failed to parse event name: %s\n", argv[0]); | 1130 | semantic_error("Failed to parse event name: %s\n", argv[0]); |
1123 | ret = -EINVAL; | 1131 | ret = -EINVAL; |
1124 | goto out; | 1132 | goto out; |
1125 | } | 1133 | } |
1134 | pr = fmt1_str[0]; | ||
1135 | tev->group = strdup(fmt2_str); | ||
1136 | tev->event = strdup(fmt3_str); | ||
1137 | if (tev->group == NULL || tev->event == NULL) { | ||
1138 | ret = -ENOMEM; | ||
1139 | goto out; | ||
1140 | } | ||
1126 | pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr); | 1141 | pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr); |
1127 | 1142 | ||
1128 | tp->retprobe = (pr == 'r'); | 1143 | tp->retprobe = (pr == 'r'); |
@@ -1134,10 +1149,17 @@ static int parse_probe_trace_command(const char *cmd, | |||
1134 | p++; | 1149 | p++; |
1135 | } else | 1150 | } else |
1136 | p = argv[1]; | 1151 | p = argv[1]; |
1137 | ret = sscanf(p, "%a[^+]+%lu", (float *)(void *)&tp->symbol, | 1152 | fmt1_str = strtok_r(p, "+", &fmt); |
1138 | &tp->offset); | 1153 | tp->symbol = strdup(fmt1_str); |
1139 | if (ret == 1) | 1154 | if (tp->symbol == NULL) { |
1155 | ret = -ENOMEM; | ||
1156 | goto out; | ||
1157 | } | ||
1158 | fmt2_str = strtok_r(NULL, "", &fmt); | ||
1159 | if (fmt2_str == NULL) | ||
1140 | tp->offset = 0; | 1160 | tp->offset = 0; |
1161 | else | ||
1162 | tp->offset = strtoul(fmt2_str, NULL, 10); | ||
1141 | 1163 | ||
1142 | tev->nargs = argc - 2; | 1164 | tev->nargs = argc - 2; |
1143 | tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); | 1165 | tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); |
@@ -1161,6 +1183,7 @@ static int parse_probe_trace_command(const char *cmd, | |||
1161 | } | 1183 | } |
1162 | ret = 0; | 1184 | ret = 0; |
1163 | out: | 1185 | out: |
1186 | free(argv0_str); | ||
1164 | argv_free(argv); | 1187 | argv_free(argv); |
1165 | return ret; | 1188 | return ret; |
1166 | } | 1189 | } |
@@ -2183,7 +2206,7 @@ static struct strfilter *available_func_filter; | |||
2183 | * If a symbol corresponds to a function with global binding and | 2206 | * If a symbol corresponds to a function with global binding and |
2184 | * matches filter return 0. For all others return 1. | 2207 | * matches filter return 0. For all others return 1. |
2185 | */ | 2208 | */ |
2186 | static int filter_available_functions(struct map *map __unused, | 2209 | static int filter_available_functions(struct map *map __maybe_unused, |
2187 | struct symbol *sym) | 2210 | struct symbol *sym) |
2188 | { | 2211 | { |
2189 | if (sym->binding == STB_GLOBAL && | 2212 | if (sym->binding == STB_GLOBAL && |
@@ -2307,10 +2330,17 @@ static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec) | |||
2307 | function = NULL; | 2330 | function = NULL; |
2308 | } | 2331 | } |
2309 | if (!pev->group) { | 2332 | if (!pev->group) { |
2310 | char *ptr1, *ptr2; | 2333 | char *ptr1, *ptr2, *exec_copy; |
2311 | 2334 | ||
2312 | pev->group = zalloc(sizeof(char *) * 64); | 2335 | pev->group = zalloc(sizeof(char *) * 64); |
2313 | ptr1 = strdup(basename(exec)); | 2336 | exec_copy = strdup(exec); |
2337 | if (!exec_copy) { | ||
2338 | ret = -ENOMEM; | ||
2339 | pr_warning("Failed to copy exec string.\n"); | ||
2340 | goto out; | ||
2341 | } | ||
2342 | |||
2343 | ptr1 = strdup(basename(exec_copy)); | ||
2314 | if (ptr1) { | 2344 | if (ptr1) { |
2315 | ptr2 = strpbrk(ptr1, "-._"); | 2345 | ptr2 = strpbrk(ptr1, "-._"); |
2316 | if (ptr2) | 2346 | if (ptr2) |
@@ -2319,6 +2349,7 @@ static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec) | |||
2319 | ptr1); | 2349 | ptr1); |
2320 | free(ptr1); | 2350 | free(ptr1); |
2321 | } | 2351 | } |
2352 | free(exec_copy); | ||
2322 | } | 2353 | } |
2323 | free(pp->function); | 2354 | free(pp->function); |
2324 | pp->function = zalloc(sizeof(char *) * MAX_PROBE_ARGS); | 2355 | pp->function = zalloc(sizeof(char *) * MAX_PROBE_ARGS); |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index d448984ed789..1daf5c14e751 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -207,7 +207,7 @@ static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, | |||
207 | #else | 207 | #else |
208 | /* With older elfutils, this just support kernel module... */ | 208 | /* With older elfutils, this just support kernel module... */ |
209 | static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, | 209 | static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, |
210 | Dwarf_Addr addr __used) | 210 | Dwarf_Addr addr __maybe_unused) |
211 | { | 211 | { |
212 | const char *path = kernel_get_module_path("kernel"); | 212 | const char *path = kernel_get_module_path("kernel"); |
213 | 213 | ||
@@ -525,8 +525,10 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, | |||
525 | return -ENOENT; | 525 | return -ENOENT; |
526 | } | 526 | } |
527 | /* Verify it is a data structure */ | 527 | /* Verify it is a data structure */ |
528 | if (dwarf_tag(&type) != DW_TAG_structure_type) { | 528 | tag = dwarf_tag(&type); |
529 | pr_warning("%s is not a data structure.\n", varname); | 529 | if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { |
530 | pr_warning("%s is not a data structure nor an union.\n", | ||
531 | varname); | ||
530 | return -EINVAL; | 532 | return -EINVAL; |
531 | } | 533 | } |
532 | 534 | ||
@@ -539,8 +541,9 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, | |||
539 | *ref_ptr = ref; | 541 | *ref_ptr = ref; |
540 | } else { | 542 | } else { |
541 | /* Verify it is a data structure */ | 543 | /* Verify it is a data structure */ |
542 | if (tag != DW_TAG_structure_type) { | 544 | if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { |
543 | pr_warning("%s is not a data structure.\n", varname); | 545 | pr_warning("%s is not a data structure nor an union.\n", |
546 | varname); | ||
544 | return -EINVAL; | 547 | return -EINVAL; |
545 | } | 548 | } |
546 | if (field->name[0] == '[') { | 549 | if (field->name[0] == '[') { |
@@ -567,10 +570,15 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, | |||
567 | } | 570 | } |
568 | 571 | ||
569 | /* Get the offset of the field */ | 572 | /* Get the offset of the field */ |
570 | ret = die_get_data_member_location(die_mem, &offs); | 573 | if (tag == DW_TAG_union_type) { |
571 | if (ret < 0) { | 574 | offs = 0; |
572 | pr_warning("Failed to get the offset of %s.\n", field->name); | 575 | } else { |
573 | return ret; | 576 | ret = die_get_data_member_location(die_mem, &offs); |
577 | if (ret < 0) { | ||
578 | pr_warning("Failed to get the offset of %s.\n", | ||
579 | field->name); | ||
580 | return ret; | ||
581 | } | ||
574 | } | 582 | } |
575 | ref->offset += (long)offs; | 583 | ref->offset += (long)offs; |
576 | 584 | ||
@@ -1419,7 +1427,7 @@ static int line_range_add_line(const char *src, unsigned int lineno, | |||
1419 | } | 1427 | } |
1420 | 1428 | ||
1421 | static int line_range_walk_cb(const char *fname, int lineno, | 1429 | static int line_range_walk_cb(const char *fname, int lineno, |
1422 | Dwarf_Addr addr __used, | 1430 | Dwarf_Addr addr __maybe_unused, |
1423 | void *data) | 1431 | void *data) |
1424 | { | 1432 | { |
1425 | struct line_finder *lf = data; | 1433 | struct line_finder *lf = data; |
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index 213362850abd..c40c2d33199e 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # List of files needed by perf python extention | 2 | # List of files needed by perf python extension |
3 | # | 3 | # |
4 | # Each source file must be placed on its own line so that it can be | 4 | # Each source file must be placed on its own line so that it can be |
5 | # processed by Makefile and util/setup.py accordingly. | 5 | # processed by Makefile and util/setup.py accordingly. |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 0688bfb6d280..9181bf212fb9 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -627,7 +627,7 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, | |||
627 | * This will group just the fds for this single evsel, to group | 627 | * This will group just the fds for this single evsel, to group |
628 | * multiple events, use evlist.open(). | 628 | * multiple events, use evlist.open(). |
629 | */ | 629 | */ |
630 | if (perf_evsel__open(evsel, cpus, threads, group, NULL) < 0) { | 630 | if (perf_evsel__open(evsel, cpus, threads) < 0) { |
631 | PyErr_SetFromErrno(PyExc_OSError); | 631 | PyErr_SetFromErrno(PyExc_OSError); |
632 | return NULL; | 632 | return NULL; |
633 | } | 633 | } |
@@ -672,7 +672,7 @@ struct pyrf_evlist { | |||
672 | }; | 672 | }; |
673 | 673 | ||
674 | static int pyrf_evlist__init(struct pyrf_evlist *pevlist, | 674 | static int pyrf_evlist__init(struct pyrf_evlist *pevlist, |
675 | PyObject *args, PyObject *kwargs __used) | 675 | PyObject *args, PyObject *kwargs __maybe_unused) |
676 | { | 676 | { |
677 | PyObject *pcpus = NULL, *pthreads = NULL; | 677 | PyObject *pcpus = NULL, *pthreads = NULL; |
678 | struct cpu_map *cpus; | 678 | struct cpu_map *cpus; |
@@ -733,7 +733,8 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, | |||
733 | } | 733 | } |
734 | 734 | ||
735 | static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, | 735 | static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, |
736 | PyObject *args __used, PyObject *kwargs __used) | 736 | PyObject *args __maybe_unused, |
737 | PyObject *kwargs __maybe_unused) | ||
737 | { | 738 | { |
738 | struct perf_evlist *evlist = &pevlist->evlist; | 739 | struct perf_evlist *evlist = &pevlist->evlist; |
739 | PyObject *list = PyList_New(0); | 740 | PyObject *list = PyList_New(0); |
@@ -765,7 +766,8 @@ free_list: | |||
765 | 766 | ||
766 | 767 | ||
767 | static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, | 768 | static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, |
768 | PyObject *args, PyObject *kwargs __used) | 769 | PyObject *args, |
770 | PyObject *kwargs __maybe_unused) | ||
769 | { | 771 | { |
770 | struct perf_evlist *evlist = &pevlist->evlist; | 772 | struct perf_evlist *evlist = &pevlist->evlist; |
771 | PyObject *pevsel; | 773 | PyObject *pevsel; |
@@ -803,7 +805,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, | |||
803 | if (pyevent == NULL) | 805 | if (pyevent == NULL) |
804 | return PyErr_NoMemory(); | 806 | return PyErr_NoMemory(); |
805 | 807 | ||
806 | err = perf_evlist__parse_sample(evlist, event, &pevent->sample, false); | 808 | err = perf_evlist__parse_sample(evlist, event, &pevent->sample); |
807 | if (err) | 809 | if (err) |
808 | return PyErr_Format(PyExc_OSError, | 810 | return PyErr_Format(PyExc_OSError, |
809 | "perf: can't parse sample, err=%d", err); | 811 | "perf: can't parse sample, err=%d", err); |
@@ -824,7 +826,10 @@ static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, | |||
824 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group)) | 826 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group)) |
825 | return NULL; | 827 | return NULL; |
826 | 828 | ||
827 | if (perf_evlist__open(evlist, group) < 0) { | 829 | if (group) |
830 | perf_evlist__set_leader(evlist); | ||
831 | |||
832 | if (perf_evlist__open(evlist) < 0) { | ||
828 | PyErr_SetFromErrno(PyExc_OSError); | 833 | PyErr_SetFromErrno(PyExc_OSError); |
829 | return NULL; | 834 | return NULL; |
830 | } | 835 | } |
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 02dfa19a467f..f80605eb1855 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c | |||
@@ -25,16 +25,16 @@ | |||
25 | #include <ctype.h> | 25 | #include <ctype.h> |
26 | #include <errno.h> | 26 | #include <errno.h> |
27 | 27 | ||
28 | #include "../../perf.h" | ||
29 | #include "../util.h" | 28 | #include "../util.h" |
29 | #include <EXTERN.h> | ||
30 | #include <perl.h> | ||
31 | |||
32 | #include "../../perf.h" | ||
30 | #include "../thread.h" | 33 | #include "../thread.h" |
31 | #include "../event.h" | 34 | #include "../event.h" |
32 | #include "../trace-event.h" | 35 | #include "../trace-event.h" |
33 | #include "../evsel.h" | 36 | #include "../evsel.h" |
34 | 37 | ||
35 | #include <EXTERN.h> | ||
36 | #include <perl.h> | ||
37 | |||
38 | void boot_Perf__Trace__Context(pTHX_ CV *cv); | 38 | void boot_Perf__Trace__Context(pTHX_ CV *cv); |
39 | void boot_DynaLoader(pTHX_ CV *cv); | 39 | void boot_DynaLoader(pTHX_ CV *cv); |
40 | typedef PerlInterpreter * INTERP; | 40 | typedef PerlInterpreter * INTERP; |
@@ -237,16 +237,16 @@ static void define_event_symbols(struct event_format *event, | |||
237 | define_event_symbols(event, ev_name, args->next); | 237 | define_event_symbols(event, ev_name, args->next); |
238 | } | 238 | } |
239 | 239 | ||
240 | static inline | 240 | static inline struct event_format *find_cache_event(struct perf_evsel *evsel) |
241 | struct event_format *find_cache_event(struct pevent *pevent, int type) | ||
242 | { | 241 | { |
243 | static char ev_name[256]; | 242 | static char ev_name[256]; |
244 | struct event_format *event; | 243 | struct event_format *event; |
244 | int type = evsel->attr.config; | ||
245 | 245 | ||
246 | if (events[type]) | 246 | if (events[type]) |
247 | return events[type]; | 247 | return events[type]; |
248 | 248 | ||
249 | events[type] = event = pevent_find_event(pevent, type); | 249 | events[type] = event = evsel->tp_format; |
250 | if (!event) | 250 | if (!event) |
251 | return NULL; | 251 | return NULL; |
252 | 252 | ||
@@ -257,23 +257,22 @@ struct event_format *find_cache_event(struct pevent *pevent, int type) | |||
257 | return event; | 257 | return event; |
258 | } | 258 | } |
259 | 259 | ||
260 | static void perl_process_tracepoint(union perf_event *perf_event __unused, | 260 | static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, |
261 | struct pevent *pevent, | ||
262 | struct perf_sample *sample, | 261 | struct perf_sample *sample, |
263 | struct perf_evsel *evsel, | 262 | struct perf_evsel *evsel, |
264 | struct machine *machine __unused, | 263 | struct machine *machine __maybe_unused, |
265 | struct thread *thread) | 264 | struct addr_location *al) |
266 | { | 265 | { |
267 | struct format_field *field; | 266 | struct format_field *field; |
268 | static char handler[256]; | 267 | static char handler[256]; |
269 | unsigned long long val; | 268 | unsigned long long val; |
270 | unsigned long s, ns; | 269 | unsigned long s, ns; |
271 | struct event_format *event; | 270 | struct event_format *event; |
272 | int type; | ||
273 | int pid; | 271 | int pid; |
274 | int cpu = sample->cpu; | 272 | int cpu = sample->cpu; |
275 | void *data = sample->raw_data; | 273 | void *data = sample->raw_data; |
276 | unsigned long long nsecs = sample->time; | 274 | unsigned long long nsecs = sample->time; |
275 | struct thread *thread = al->thread; | ||
277 | char *comm = thread->comm; | 276 | char *comm = thread->comm; |
278 | 277 | ||
279 | dSP; | 278 | dSP; |
@@ -281,13 +280,11 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, | |||
281 | if (evsel->attr.type != PERF_TYPE_TRACEPOINT) | 280 | if (evsel->attr.type != PERF_TYPE_TRACEPOINT) |
282 | return; | 281 | return; |
283 | 282 | ||
284 | type = trace_parse_common_type(pevent, data); | 283 | event = find_cache_event(evsel); |
285 | |||
286 | event = find_cache_event(pevent, type); | ||
287 | if (!event) | 284 | if (!event) |
288 | die("ug! no event found for type %d", type); | 285 | die("ug! no event found for type %" PRIu64, evsel->attr.config); |
289 | 286 | ||
290 | pid = trace_parse_common_pid(pevent, data); | 287 | pid = raw_field_value(event, "common_pid", data); |
291 | 288 | ||
292 | sprintf(handler, "%s::%s", event->system, event->name); | 289 | sprintf(handler, "%s::%s", event->system, event->name); |
293 | 290 | ||
@@ -320,7 +317,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, | |||
320 | offset = field->offset; | 317 | offset = field->offset; |
321 | XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); | 318 | XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); |
322 | } else { /* FIELD_IS_NUMERIC */ | 319 | } else { /* FIELD_IS_NUMERIC */ |
323 | val = read_size(pevent, data + field->offset, | 320 | val = read_size(event, data + field->offset, |
324 | field->size); | 321 | field->size); |
325 | if (field->flags & FIELD_IS_SIGNED) { | 322 | if (field->flags & FIELD_IS_SIGNED) { |
326 | XPUSHs(sv_2mortal(newSViv(val))); | 323 | XPUSHs(sv_2mortal(newSViv(val))); |
@@ -349,11 +346,11 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, | |||
349 | LEAVE; | 346 | LEAVE; |
350 | } | 347 | } |
351 | 348 | ||
352 | static void perl_process_event_generic(union perf_event *pevent __unused, | 349 | static void perl_process_event_generic(union perf_event *event, |
353 | struct perf_sample *sample, | 350 | struct perf_sample *sample, |
354 | struct perf_evsel *evsel __unused, | 351 | struct perf_evsel *evsel, |
355 | struct machine *machine __unused, | 352 | struct machine *machine __maybe_unused, |
356 | struct thread *thread __unused) | 353 | struct addr_location *al __maybe_unused) |
357 | { | 354 | { |
358 | dSP; | 355 | dSP; |
359 | 356 | ||
@@ -363,7 +360,7 @@ static void perl_process_event_generic(union perf_event *pevent __unused, | |||
363 | ENTER; | 360 | ENTER; |
364 | SAVETMPS; | 361 | SAVETMPS; |
365 | PUSHMARK(SP); | 362 | PUSHMARK(SP); |
366 | XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size))); | 363 | XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size))); |
367 | XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); | 364 | XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); |
368 | XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); | 365 | XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); |
369 | XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); | 366 | XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); |
@@ -376,14 +373,13 @@ static void perl_process_event_generic(union perf_event *pevent __unused, | |||
376 | } | 373 | } |
377 | 374 | ||
378 | static void perl_process_event(union perf_event *event, | 375 | static void perl_process_event(union perf_event *event, |
379 | struct pevent *pevent, | ||
380 | struct perf_sample *sample, | 376 | struct perf_sample *sample, |
381 | struct perf_evsel *evsel, | 377 | struct perf_evsel *evsel, |
382 | struct machine *machine, | 378 | struct machine *machine, |
383 | struct thread *thread) | 379 | struct addr_location *al) |
384 | { | 380 | { |
385 | perl_process_tracepoint(event, pevent, sample, evsel, machine, thread); | 381 | perl_process_tracepoint(event, sample, evsel, machine, al); |
386 | perl_process_event_generic(event, sample, evsel, machine, thread); | 382 | perl_process_event_generic(event, sample, evsel, machine, al); |
387 | } | 383 | } |
388 | 384 | ||
389 | static void run_start_sub(void) | 385 | static void run_start_sub(void) |
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index ce4d1b0c3862..730c6630cba5 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -27,10 +27,12 @@ | |||
27 | #include <errno.h> | 27 | #include <errno.h> |
28 | 28 | ||
29 | #include "../../perf.h" | 29 | #include "../../perf.h" |
30 | #include "../evsel.h" | ||
30 | #include "../util.h" | 31 | #include "../util.h" |
31 | #include "../event.h" | 32 | #include "../event.h" |
32 | #include "../thread.h" | 33 | #include "../thread.h" |
33 | #include "../trace-event.h" | 34 | #include "../trace-event.h" |
35 | #include "../evsel.h" | ||
34 | 36 | ||
35 | PyMODINIT_FUNC initperf_trace_context(void); | 37 | PyMODINIT_FUNC initperf_trace_context(void); |
36 | 38 | ||
@@ -194,16 +196,21 @@ static void define_event_symbols(struct event_format *event, | |||
194 | define_event_symbols(event, ev_name, args->next); | 196 | define_event_symbols(event, ev_name, args->next); |
195 | } | 197 | } |
196 | 198 | ||
197 | static inline | 199 | static inline struct event_format *find_cache_event(struct perf_evsel *evsel) |
198 | struct event_format *find_cache_event(struct pevent *pevent, int type) | ||
199 | { | 200 | { |
200 | static char ev_name[256]; | 201 | static char ev_name[256]; |
201 | struct event_format *event; | 202 | struct event_format *event; |
203 | int type = evsel->attr.config; | ||
202 | 204 | ||
205 | /* | ||
206 | * XXX: Do we really need to cache this since now we have evsel->tp_format | ||
207 | * cached already? Need to re-read this "cache" routine that as well calls | ||
208 | * define_event_symbols() :-\ | ||
209 | */ | ||
203 | if (events[type]) | 210 | if (events[type]) |
204 | return events[type]; | 211 | return events[type]; |
205 | 212 | ||
206 | events[type] = event = pevent_find_event(pevent, type); | 213 | events[type] = event = evsel->tp_format; |
207 | if (!event) | 214 | if (!event) |
208 | return NULL; | 215 | return NULL; |
209 | 216 | ||
@@ -214,12 +221,12 @@ struct event_format *find_cache_event(struct pevent *pevent, int type) | |||
214 | return event; | 221 | return event; |
215 | } | 222 | } |
216 | 223 | ||
217 | static void python_process_event(union perf_event *perf_event __unused, | 224 | static void python_process_tracepoint(union perf_event *perf_event |
218 | struct pevent *pevent, | 225 | __maybe_unused, |
219 | struct perf_sample *sample, | 226 | struct perf_sample *sample, |
220 | struct perf_evsel *evsel __unused, | 227 | struct perf_evsel *evsel, |
221 | struct machine *machine __unused, | 228 | struct machine *machine __maybe_unused, |
222 | struct thread *thread) | 229 | struct addr_location *al) |
223 | { | 230 | { |
224 | PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; | 231 | PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; |
225 | static char handler_name[256]; | 232 | static char handler_name[256]; |
@@ -228,24 +235,22 @@ static void python_process_event(union perf_event *perf_event __unused, | |||
228 | unsigned long s, ns; | 235 | unsigned long s, ns; |
229 | struct event_format *event; | 236 | struct event_format *event; |
230 | unsigned n = 0; | 237 | unsigned n = 0; |
231 | int type; | ||
232 | int pid; | 238 | int pid; |
233 | int cpu = sample->cpu; | 239 | int cpu = sample->cpu; |
234 | void *data = sample->raw_data; | 240 | void *data = sample->raw_data; |
235 | unsigned long long nsecs = sample->time; | 241 | unsigned long long nsecs = sample->time; |
242 | struct thread *thread = al->thread; | ||
236 | char *comm = thread->comm; | 243 | char *comm = thread->comm; |
237 | 244 | ||
238 | t = PyTuple_New(MAX_FIELDS); | 245 | t = PyTuple_New(MAX_FIELDS); |
239 | if (!t) | 246 | if (!t) |
240 | Py_FatalError("couldn't create Python tuple"); | 247 | Py_FatalError("couldn't create Python tuple"); |
241 | 248 | ||
242 | type = trace_parse_common_type(pevent, data); | 249 | event = find_cache_event(evsel); |
243 | |||
244 | event = find_cache_event(pevent, type); | ||
245 | if (!event) | 250 | if (!event) |
246 | die("ug! no event found for type %d", type); | 251 | die("ug! no event found for type %d", (int)evsel->attr.config); |
247 | 252 | ||
248 | pid = trace_parse_common_pid(pevent, data); | 253 | pid = raw_field_value(event, "common_pid", data); |
249 | 254 | ||
250 | sprintf(handler_name, "%s__%s", event->system, event->name); | 255 | sprintf(handler_name, "%s__%s", event->system, event->name); |
251 | 256 | ||
@@ -290,7 +295,7 @@ static void python_process_event(union perf_event *perf_event __unused, | |||
290 | offset = field->offset; | 295 | offset = field->offset; |
291 | obj = PyString_FromString((char *)data + offset); | 296 | obj = PyString_FromString((char *)data + offset); |
292 | } else { /* FIELD_IS_NUMERIC */ | 297 | } else { /* FIELD_IS_NUMERIC */ |
293 | val = read_size(pevent, data + field->offset, | 298 | val = read_size(event, data + field->offset, |
294 | field->size); | 299 | field->size); |
295 | if (field->flags & FIELD_IS_SIGNED) { | 300 | if (field->flags & FIELD_IS_SIGNED) { |
296 | if ((long long)val >= LONG_MIN && | 301 | if ((long long)val >= LONG_MIN && |
@@ -335,6 +340,84 @@ static void python_process_event(union perf_event *perf_event __unused, | |||
335 | Py_DECREF(t); | 340 | Py_DECREF(t); |
336 | } | 341 | } |
337 | 342 | ||
343 | static void python_process_general_event(union perf_event *perf_event | ||
344 | __maybe_unused, | ||
345 | struct perf_sample *sample, | ||
346 | struct perf_evsel *evsel, | ||
347 | struct machine *machine __maybe_unused, | ||
348 | struct addr_location *al) | ||
349 | { | ||
350 | PyObject *handler, *retval, *t, *dict; | ||
351 | static char handler_name[64]; | ||
352 | unsigned n = 0; | ||
353 | struct thread *thread = al->thread; | ||
354 | |||
355 | /* | ||
356 | * Use the MAX_FIELDS to make the function expandable, though | ||
357 | * currently there is only one item for the tuple. | ||
358 | */ | ||
359 | t = PyTuple_New(MAX_FIELDS); | ||
360 | if (!t) | ||
361 | Py_FatalError("couldn't create Python tuple"); | ||
362 | |||
363 | dict = PyDict_New(); | ||
364 | if (!dict) | ||
365 | Py_FatalError("couldn't create Python dictionary"); | ||
366 | |||
367 | snprintf(handler_name, sizeof(handler_name), "%s", "process_event"); | ||
368 | |||
369 | handler = PyDict_GetItemString(main_dict, handler_name); | ||
370 | if (!handler || !PyCallable_Check(handler)) | ||
371 | goto exit; | ||
372 | |||
373 | PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); | ||
374 | PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize( | ||
375 | (const char *)&evsel->attr, sizeof(evsel->attr))); | ||
376 | PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize( | ||
377 | (const char *)sample, sizeof(*sample))); | ||
378 | PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize( | ||
379 | (const char *)sample->raw_data, sample->raw_size)); | ||
380 | PyDict_SetItemString(dict, "comm", | ||
381 | PyString_FromString(thread->comm)); | ||
382 | if (al->map) { | ||
383 | PyDict_SetItemString(dict, "dso", | ||
384 | PyString_FromString(al->map->dso->name)); | ||
385 | } | ||
386 | if (al->sym) { | ||
387 | PyDict_SetItemString(dict, "symbol", | ||
388 | PyString_FromString(al->sym->name)); | ||
389 | } | ||
390 | |||
391 | PyTuple_SetItem(t, n++, dict); | ||
392 | if (_PyTuple_Resize(&t, n) == -1) | ||
393 | Py_FatalError("error resizing Python tuple"); | ||
394 | |||
395 | retval = PyObject_CallObject(handler, t); | ||
396 | if (retval == NULL) | ||
397 | handler_call_die(handler_name); | ||
398 | exit: | ||
399 | Py_DECREF(dict); | ||
400 | Py_DECREF(t); | ||
401 | } | ||
402 | |||
403 | static void python_process_event(union perf_event *perf_event, | ||
404 | struct perf_sample *sample, | ||
405 | struct perf_evsel *evsel, | ||
406 | struct machine *machine, | ||
407 | struct addr_location *al) | ||
408 | { | ||
409 | switch (evsel->attr.type) { | ||
410 | case PERF_TYPE_TRACEPOINT: | ||
411 | python_process_tracepoint(perf_event, sample, evsel, | ||
412 | machine, al); | ||
413 | break; | ||
414 | /* Reserve for future process_hw/sw/raw APIs */ | ||
415 | default: | ||
416 | python_process_general_event(perf_event, sample, evsel, | ||
417 | machine, al); | ||
418 | } | ||
419 | } | ||
420 | |||
338 | static int run_start_sub(void) | 421 | static int run_start_sub(void) |
339 | { | 422 | { |
340 | PyObject *handler, *retval; | 423 | PyObject *handler, *retval; |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 2437fb0b463a..8cdd23239c90 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -15,6 +15,9 @@ | |||
15 | #include "util.h" | 15 | #include "util.h" |
16 | #include "cpumap.h" | 16 | #include "cpumap.h" |
17 | #include "event-parse.h" | 17 | #include "event-parse.h" |
18 | #include "perf_regs.h" | ||
19 | #include "unwind.h" | ||
20 | #include "vdso.h" | ||
18 | 21 | ||
19 | static int perf_session__open(struct perf_session *self, bool force) | 22 | static int perf_session__open(struct perf_session *self, bool force) |
20 | { | 23 | { |
@@ -209,6 +212,7 @@ void perf_session__delete(struct perf_session *self) | |||
209 | machine__exit(&self->host_machine); | 212 | machine__exit(&self->host_machine); |
210 | close(self->fd); | 213 | close(self->fd); |
211 | free(self); | 214 | free(self); |
215 | vdso__exit(); | ||
212 | } | 216 | } |
213 | 217 | ||
214 | void machine__remove_thread(struct machine *self, struct thread *th) | 218 | void machine__remove_thread(struct machine *self, struct thread *th) |
@@ -288,10 +292,11 @@ struct branch_info *machine__resolve_bstack(struct machine *self, | |||
288 | return bi; | 292 | return bi; |
289 | } | 293 | } |
290 | 294 | ||
291 | int machine__resolve_callchain(struct machine *self, | 295 | static int machine__resolve_callchain_sample(struct machine *machine, |
292 | struct thread *thread, | 296 | struct thread *thread, |
293 | struct ip_callchain *chain, | 297 | struct ip_callchain *chain, |
294 | struct symbol **parent) | 298 | struct symbol **parent) |
299 | |||
295 | { | 300 | { |
296 | u8 cpumode = PERF_RECORD_MISC_USER; | 301 | u8 cpumode = PERF_RECORD_MISC_USER; |
297 | unsigned int i; | 302 | unsigned int i; |
@@ -316,11 +321,14 @@ int machine__resolve_callchain(struct machine *self, | |||
316 | if (ip >= PERF_CONTEXT_MAX) { | 321 | if (ip >= PERF_CONTEXT_MAX) { |
317 | switch (ip) { | 322 | switch (ip) { |
318 | case PERF_CONTEXT_HV: | 323 | case PERF_CONTEXT_HV: |
319 | cpumode = PERF_RECORD_MISC_HYPERVISOR; break; | 324 | cpumode = PERF_RECORD_MISC_HYPERVISOR; |
325 | break; | ||
320 | case PERF_CONTEXT_KERNEL: | 326 | case PERF_CONTEXT_KERNEL: |
321 | cpumode = PERF_RECORD_MISC_KERNEL; break; | 327 | cpumode = PERF_RECORD_MISC_KERNEL; |
328 | break; | ||
322 | case PERF_CONTEXT_USER: | 329 | case PERF_CONTEXT_USER: |
323 | cpumode = PERF_RECORD_MISC_USER; break; | 330 | cpumode = PERF_RECORD_MISC_USER; |
331 | break; | ||
324 | default: | 332 | default: |
325 | pr_debug("invalid callchain context: " | 333 | pr_debug("invalid callchain context: " |
326 | "%"PRId64"\n", (s64) ip); | 334 | "%"PRId64"\n", (s64) ip); |
@@ -335,7 +343,7 @@ int machine__resolve_callchain(struct machine *self, | |||
335 | } | 343 | } |
336 | 344 | ||
337 | al.filtered = false; | 345 | al.filtered = false; |
338 | thread__find_addr_location(thread, self, cpumode, | 346 | thread__find_addr_location(thread, machine, cpumode, |
339 | MAP__FUNCTION, ip, &al, NULL); | 347 | MAP__FUNCTION, ip, &al, NULL); |
340 | if (al.sym != NULL) { | 348 | if (al.sym != NULL) { |
341 | if (sort__has_parent && !*parent && | 349 | if (sort__has_parent && !*parent && |
@@ -354,49 +362,92 @@ int machine__resolve_callchain(struct machine *self, | |||
354 | return 0; | 362 | return 0; |
355 | } | 363 | } |
356 | 364 | ||
357 | static int process_event_synth_tracing_data_stub(union perf_event *event __used, | 365 | static int unwind_entry(struct unwind_entry *entry, void *arg) |
358 | struct perf_session *session __used) | 366 | { |
367 | struct callchain_cursor *cursor = arg; | ||
368 | return callchain_cursor_append(cursor, entry->ip, | ||
369 | entry->map, entry->sym); | ||
370 | } | ||
371 | |||
372 | int machine__resolve_callchain(struct machine *machine, | ||
373 | struct perf_evsel *evsel, | ||
374 | struct thread *thread, | ||
375 | struct perf_sample *sample, | ||
376 | struct symbol **parent) | ||
377 | |||
378 | { | ||
379 | int ret; | ||
380 | |||
381 | callchain_cursor_reset(&callchain_cursor); | ||
382 | |||
383 | ret = machine__resolve_callchain_sample(machine, thread, | ||
384 | sample->callchain, parent); | ||
385 | if (ret) | ||
386 | return ret; | ||
387 | |||
388 | /* Can we do dwarf post unwind? */ | ||
389 | if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && | ||
390 | (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) | ||
391 | return 0; | ||
392 | |||
393 | /* Bail out if nothing was captured. */ | ||
394 | if ((!sample->user_regs.regs) || | ||
395 | (!sample->user_stack.size)) | ||
396 | return 0; | ||
397 | |||
398 | return unwind__get_entries(unwind_entry, &callchain_cursor, machine, | ||
399 | thread, evsel->attr.sample_regs_user, | ||
400 | sample); | ||
401 | |||
402 | } | ||
403 | |||
404 | static int process_event_synth_tracing_data_stub(union perf_event *event | ||
405 | __maybe_unused, | ||
406 | struct perf_session *session | ||
407 | __maybe_unused) | ||
359 | { | 408 | { |
360 | dump_printf(": unhandled!\n"); | 409 | dump_printf(": unhandled!\n"); |
361 | return 0; | 410 | return 0; |
362 | } | 411 | } |
363 | 412 | ||
364 | static int process_event_synth_attr_stub(union perf_event *event __used, | 413 | static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, |
365 | struct perf_evlist **pevlist __used) | 414 | struct perf_evlist **pevlist |
415 | __maybe_unused) | ||
366 | { | 416 | { |
367 | dump_printf(": unhandled!\n"); | 417 | dump_printf(": unhandled!\n"); |
368 | return 0; | 418 | return 0; |
369 | } | 419 | } |
370 | 420 | ||
371 | static int process_event_sample_stub(struct perf_tool *tool __used, | 421 | static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, |
372 | union perf_event *event __used, | 422 | union perf_event *event __maybe_unused, |
373 | struct perf_sample *sample __used, | 423 | struct perf_sample *sample __maybe_unused, |
374 | struct perf_evsel *evsel __used, | 424 | struct perf_evsel *evsel __maybe_unused, |
375 | struct machine *machine __used) | 425 | struct machine *machine __maybe_unused) |
376 | { | 426 | { |
377 | dump_printf(": unhandled!\n"); | 427 | dump_printf(": unhandled!\n"); |
378 | return 0; | 428 | return 0; |
379 | } | 429 | } |
380 | 430 | ||
381 | static int process_event_stub(struct perf_tool *tool __used, | 431 | static int process_event_stub(struct perf_tool *tool __maybe_unused, |
382 | union perf_event *event __used, | 432 | union perf_event *event __maybe_unused, |
383 | struct perf_sample *sample __used, | 433 | struct perf_sample *sample __maybe_unused, |
384 | struct machine *machine __used) | 434 | struct machine *machine __maybe_unused) |
385 | { | 435 | { |
386 | dump_printf(": unhandled!\n"); | 436 | dump_printf(": unhandled!\n"); |
387 | return 0; | 437 | return 0; |
388 | } | 438 | } |
389 | 439 | ||
390 | static int process_finished_round_stub(struct perf_tool *tool __used, | 440 | static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, |
391 | union perf_event *event __used, | 441 | union perf_event *event __maybe_unused, |
392 | struct perf_session *perf_session __used) | 442 | struct perf_session *perf_session |
443 | __maybe_unused) | ||
393 | { | 444 | { |
394 | dump_printf(": unhandled!\n"); | 445 | dump_printf(": unhandled!\n"); |
395 | return 0; | 446 | return 0; |
396 | } | 447 | } |
397 | 448 | ||
398 | static int process_event_type_stub(struct perf_tool *tool __used, | 449 | static int process_event_type_stub(struct perf_tool *tool __maybe_unused, |
399 | union perf_event *event __used) | 450 | union perf_event *event __maybe_unused) |
400 | { | 451 | { |
401 | dump_printf(": unhandled!\n"); | 452 | dump_printf(": unhandled!\n"); |
402 | return 0; | 453 | return 0; |
@@ -473,7 +524,7 @@ static void swap_sample_id_all(union perf_event *event, void *data) | |||
473 | } | 524 | } |
474 | 525 | ||
475 | static void perf_event__all64_swap(union perf_event *event, | 526 | static void perf_event__all64_swap(union perf_event *event, |
476 | bool sample_id_all __used) | 527 | bool sample_id_all __maybe_unused) |
477 | { | 528 | { |
478 | struct perf_event_header *hdr = &event->header; | 529 | struct perf_event_header *hdr = &event->header; |
479 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); | 530 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); |
@@ -487,7 +538,7 @@ static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) | |||
487 | if (sample_id_all) { | 538 | if (sample_id_all) { |
488 | void *data = &event->comm.comm; | 539 | void *data = &event->comm.comm; |
489 | 540 | ||
490 | data += ALIGN(strlen(data) + 1, sizeof(u64)); | 541 | data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); |
491 | swap_sample_id_all(event, data); | 542 | swap_sample_id_all(event, data); |
492 | } | 543 | } |
493 | } | 544 | } |
@@ -504,7 +555,7 @@ static void perf_event__mmap_swap(union perf_event *event, | |||
504 | if (sample_id_all) { | 555 | if (sample_id_all) { |
505 | void *data = &event->mmap.filename; | 556 | void *data = &event->mmap.filename; |
506 | 557 | ||
507 | data += ALIGN(strlen(data) + 1, sizeof(u64)); | 558 | data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); |
508 | swap_sample_id_all(event, data); | 559 | swap_sample_id_all(event, data); |
509 | } | 560 | } |
510 | } | 561 | } |
@@ -584,7 +635,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr) | |||
584 | } | 635 | } |
585 | 636 | ||
586 | static void perf_event__hdr_attr_swap(union perf_event *event, | 637 | static void perf_event__hdr_attr_swap(union perf_event *event, |
587 | bool sample_id_all __used) | 638 | bool sample_id_all __maybe_unused) |
588 | { | 639 | { |
589 | size_t size; | 640 | size_t size; |
590 | 641 | ||
@@ -596,14 +647,14 @@ static void perf_event__hdr_attr_swap(union perf_event *event, | |||
596 | } | 647 | } |
597 | 648 | ||
598 | static void perf_event__event_type_swap(union perf_event *event, | 649 | static void perf_event__event_type_swap(union perf_event *event, |
599 | bool sample_id_all __used) | 650 | bool sample_id_all __maybe_unused) |
600 | { | 651 | { |
601 | event->event_type.event_type.event_id = | 652 | event->event_type.event_type.event_id = |
602 | bswap_64(event->event_type.event_type.event_id); | 653 | bswap_64(event->event_type.event_type.event_id); |
603 | } | 654 | } |
604 | 655 | ||
605 | static void perf_event__tracing_data_swap(union perf_event *event, | 656 | static void perf_event__tracing_data_swap(union perf_event *event, |
606 | bool sample_id_all __used) | 657 | bool sample_id_all __maybe_unused) |
607 | { | 658 | { |
608 | event->tracing_data.size = bswap_32(event->tracing_data.size); | 659 | event->tracing_data.size = bswap_32(event->tracing_data.size); |
609 | } | 660 | } |
@@ -652,7 +703,7 @@ static int perf_session_deliver_event(struct perf_session *session, | |||
652 | struct perf_tool *tool, | 703 | struct perf_tool *tool, |
653 | u64 file_offset); | 704 | u64 file_offset); |
654 | 705 | ||
655 | static void flush_sample_queue(struct perf_session *s, | 706 | static int flush_sample_queue(struct perf_session *s, |
656 | struct perf_tool *tool) | 707 | struct perf_tool *tool) |
657 | { | 708 | { |
658 | struct ordered_samples *os = &s->ordered_samples; | 709 | struct ordered_samples *os = &s->ordered_samples; |
@@ -665,19 +716,21 @@ static void flush_sample_queue(struct perf_session *s, | |||
665 | int ret; | 716 | int ret; |
666 | 717 | ||
667 | if (!tool->ordered_samples || !limit) | 718 | if (!tool->ordered_samples || !limit) |
668 | return; | 719 | return 0; |
669 | 720 | ||
670 | list_for_each_entry_safe(iter, tmp, head, list) { | 721 | list_for_each_entry_safe(iter, tmp, head, list) { |
671 | if (iter->timestamp > limit) | 722 | if (iter->timestamp > limit) |
672 | break; | 723 | break; |
673 | 724 | ||
674 | ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample, | 725 | ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); |
675 | s->header.needs_swap); | ||
676 | if (ret) | 726 | if (ret) |
677 | pr_err("Can't parse sample, err = %d\n", ret); | 727 | pr_err("Can't parse sample, err = %d\n", ret); |
678 | else | 728 | else { |
679 | perf_session_deliver_event(s, iter->event, &sample, tool, | 729 | ret = perf_session_deliver_event(s, iter->event, &sample, tool, |
680 | iter->file_offset); | 730 | iter->file_offset); |
731 | if (ret) | ||
732 | return ret; | ||
733 | } | ||
681 | 734 | ||
682 | os->last_flush = iter->timestamp; | 735 | os->last_flush = iter->timestamp; |
683 | list_del(&iter->list); | 736 | list_del(&iter->list); |
@@ -697,6 +750,8 @@ static void flush_sample_queue(struct perf_session *s, | |||
697 | } | 750 | } |
698 | 751 | ||
699 | os->nr_samples = 0; | 752 | os->nr_samples = 0; |
753 | |||
754 | return 0; | ||
700 | } | 755 | } |
701 | 756 | ||
702 | /* | 757 | /* |
@@ -739,13 +794,14 @@ static void flush_sample_queue(struct perf_session *s, | |||
739 | * etc... | 794 | * etc... |
740 | */ | 795 | */ |
741 | static int process_finished_round(struct perf_tool *tool, | 796 | static int process_finished_round(struct perf_tool *tool, |
742 | union perf_event *event __used, | 797 | union perf_event *event __maybe_unused, |
743 | struct perf_session *session) | 798 | struct perf_session *session) |
744 | { | 799 | { |
745 | flush_sample_queue(session, tool); | 800 | int ret = flush_sample_queue(session, tool); |
746 | session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; | 801 | if (!ret) |
802 | session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; | ||
747 | 803 | ||
748 | return 0; | 804 | return ret; |
749 | } | 805 | } |
750 | 806 | ||
751 | /* The queue is ordered by time */ | 807 | /* The queue is ordered by time */ |
@@ -860,6 +916,34 @@ static void branch_stack__printf(struct perf_sample *sample) | |||
860 | sample->branch_stack->entries[i].to); | 916 | sample->branch_stack->entries[i].to); |
861 | } | 917 | } |
862 | 918 | ||
919 | static void regs_dump__printf(u64 mask, u64 *regs) | ||
920 | { | ||
921 | unsigned rid, i = 0; | ||
922 | |||
923 | for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { | ||
924 | u64 val = regs[i++]; | ||
925 | |||
926 | printf(".... %-5s 0x%" PRIx64 "\n", | ||
927 | perf_reg_name(rid), val); | ||
928 | } | ||
929 | } | ||
930 | |||
931 | static void regs_user__printf(struct perf_sample *sample, u64 mask) | ||
932 | { | ||
933 | struct regs_dump *user_regs = &sample->user_regs; | ||
934 | |||
935 | if (user_regs->regs) { | ||
936 | printf("... user regs: mask 0x%" PRIx64 "\n", mask); | ||
937 | regs_dump__printf(mask, user_regs->regs); | ||
938 | } | ||
939 | } | ||
940 | |||
941 | static void stack_user__printf(struct stack_dump *dump) | ||
942 | { | ||
943 | printf("... ustack: size %" PRIu64 ", offset 0x%x\n", | ||
944 | dump->size, dump->offset); | ||
945 | } | ||
946 | |||
863 | static void perf_session__print_tstamp(struct perf_session *session, | 947 | static void perf_session__print_tstamp(struct perf_session *session, |
864 | union perf_event *event, | 948 | union perf_event *event, |
865 | struct perf_sample *sample) | 949 | struct perf_sample *sample) |
@@ -897,7 +981,7 @@ static void dump_event(struct perf_session *session, union perf_event *event, | |||
897 | event->header.size, perf_event__name(event->header.type)); | 981 | event->header.size, perf_event__name(event->header.type)); |
898 | } | 982 | } |
899 | 983 | ||
900 | static void dump_sample(struct perf_session *session, union perf_event *event, | 984 | static void dump_sample(struct perf_evsel *evsel, union perf_event *event, |
901 | struct perf_sample *sample) | 985 | struct perf_sample *sample) |
902 | { | 986 | { |
903 | u64 sample_type; | 987 | u64 sample_type; |
@@ -909,13 +993,19 @@ static void dump_sample(struct perf_session *session, union perf_event *event, | |||
909 | event->header.misc, sample->pid, sample->tid, sample->ip, | 993 | event->header.misc, sample->pid, sample->tid, sample->ip, |
910 | sample->period, sample->addr); | 994 | sample->period, sample->addr); |
911 | 995 | ||
912 | sample_type = perf_evlist__sample_type(session->evlist); | 996 | sample_type = evsel->attr.sample_type; |
913 | 997 | ||
914 | if (sample_type & PERF_SAMPLE_CALLCHAIN) | 998 | if (sample_type & PERF_SAMPLE_CALLCHAIN) |
915 | callchain__printf(sample); | 999 | callchain__printf(sample); |
916 | 1000 | ||
917 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) | 1001 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) |
918 | branch_stack__printf(sample); | 1002 | branch_stack__printf(sample); |
1003 | |||
1004 | if (sample_type & PERF_SAMPLE_REGS_USER) | ||
1005 | regs_user__printf(sample, evsel->attr.sample_regs_user); | ||
1006 | |||
1007 | if (sample_type & PERF_SAMPLE_STACK_USER) | ||
1008 | stack_user__printf(&sample->user_stack); | ||
919 | } | 1009 | } |
920 | 1010 | ||
921 | static struct machine * | 1011 | static struct machine * |
@@ -973,7 +1063,7 @@ static int perf_session_deliver_event(struct perf_session *session, | |||
973 | 1063 | ||
974 | switch (event->header.type) { | 1064 | switch (event->header.type) { |
975 | case PERF_RECORD_SAMPLE: | 1065 | case PERF_RECORD_SAMPLE: |
976 | dump_sample(session, event, sample); | 1066 | dump_sample(evsel, event, sample); |
977 | if (evsel == NULL) { | 1067 | if (evsel == NULL) { |
978 | ++session->hists.stats.nr_unknown_id; | 1068 | ++session->hists.stats.nr_unknown_id; |
979 | return 0; | 1069 | return 0; |
@@ -1083,8 +1173,7 @@ static int perf_session__process_event(struct perf_session *session, | |||
1083 | /* | 1173 | /* |
1084 | * For all kernel events we get the sample data | 1174 | * For all kernel events we get the sample data |
1085 | */ | 1175 | */ |
1086 | ret = perf_evlist__parse_sample(session->evlist, event, &sample, | 1176 | ret = perf_evlist__parse_sample(session->evlist, event, &sample); |
1087 | session->header.needs_swap); | ||
1088 | if (ret) | 1177 | if (ret) |
1089 | return ret; | 1178 | return ret; |
1090 | 1179 | ||
@@ -1369,7 +1458,7 @@ more: | |||
1369 | err = 0; | 1458 | err = 0; |
1370 | /* do the final flush for ordered samples */ | 1459 | /* do the final flush for ordered samples */ |
1371 | session->ordered_samples.next_flush = ULLONG_MAX; | 1460 | session->ordered_samples.next_flush = ULLONG_MAX; |
1372 | flush_sample_queue(session, tool); | 1461 | err = flush_sample_queue(session, tool); |
1373 | out_err: | 1462 | out_err: |
1374 | perf_session__warn_about_errors(session, tool); | 1463 | perf_session__warn_about_errors(session, tool); |
1375 | perf_session_free_sample_buffers(session); | 1464 | perf_session_free_sample_buffers(session); |
@@ -1498,9 +1587,9 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | |||
1498 | return NULL; | 1587 | return NULL; |
1499 | } | 1588 | } |
1500 | 1589 | ||
1501 | void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | 1590 | void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, |
1502 | struct machine *machine, int print_sym, | 1591 | struct perf_sample *sample, struct machine *machine, |
1503 | int print_dso, int print_symoffset) | 1592 | int print_sym, int print_dso, int print_symoffset) |
1504 | { | 1593 | { |
1505 | struct addr_location al; | 1594 | struct addr_location al; |
1506 | struct callchain_cursor_node *node; | 1595 | struct callchain_cursor_node *node; |
@@ -1514,8 +1603,9 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | |||
1514 | 1603 | ||
1515 | if (symbol_conf.use_callchain && sample->callchain) { | 1604 | if (symbol_conf.use_callchain && sample->callchain) { |
1516 | 1605 | ||
1517 | if (machine__resolve_callchain(machine, al.thread, | 1606 | |
1518 | sample->callchain, NULL) != 0) { | 1607 | if (machine__resolve_callchain(machine, evsel, al.thread, |
1608 | sample, NULL) != 0) { | ||
1519 | if (verbose) | 1609 | if (verbose) |
1520 | error("Failed to resolve callchain. Skipping\n"); | 1610 | error("Failed to resolve callchain. Skipping\n"); |
1521 | return; | 1611 | return; |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 1f7ec87db7d7..aab414fbb64b 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -36,9 +36,7 @@ struct perf_session { | |||
36 | struct pevent *pevent; | 36 | struct pevent *pevent; |
37 | /* | 37 | /* |
38 | * FIXME: Need to split this up further, we need global | 38 | * FIXME: Need to split this up further, we need global |
39 | * stats + per event stats. 'perf diff' also needs | 39 | * stats + per event stats. |
40 | * to properly support multiple events in a single | ||
41 | * perf.data file. | ||
42 | */ | 40 | */ |
43 | struct hists hists; | 41 | struct hists hists; |
44 | int fd; | 42 | int fd; |
@@ -129,9 +127,9 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); | |||
129 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | 127 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, |
130 | unsigned int type); | 128 | unsigned int type); |
131 | 129 | ||
132 | void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | 130 | void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, |
133 | struct machine *machine, int print_sym, | 131 | struct perf_sample *sample, struct machine *machine, |
134 | int print_dso, int print_symoffset); | 132 | int print_sym, int print_dso, int print_symoffset); |
135 | 133 | ||
136 | int perf_session__cpu_bitmap(struct perf_session *session, | 134 | int perf_session__cpu_bitmap(struct perf_session *session, |
137 | const char *cpu_list, unsigned long *cpu_bitmap); | 135 | const char *cpu_list, unsigned long *cpu_bitmap); |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 0f5a0a496bc4..b5b1b9211960 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -8,12 +8,11 @@ const char default_sort_order[] = "comm,dso,symbol"; | |||
8 | const char *sort_order = default_sort_order; | 8 | const char *sort_order = default_sort_order; |
9 | int sort__need_collapse = 0; | 9 | int sort__need_collapse = 0; |
10 | int sort__has_parent = 0; | 10 | int sort__has_parent = 0; |
11 | int sort__has_sym = 0; | ||
11 | int sort__branch_mode = -1; /* -1 = means not set */ | 12 | int sort__branch_mode = -1; /* -1 = means not set */ |
12 | 13 | ||
13 | enum sort_type sort__first_dimension; | 14 | enum sort_type sort__first_dimension; |
14 | 15 | ||
15 | char * field_sep; | ||
16 | |||
17 | LIST_HEAD(hist_entry__sort_list); | 16 | LIST_HEAD(hist_entry__sort_list); |
18 | 17 | ||
19 | static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) | 18 | static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) |
@@ -23,11 +22,11 @@ static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) | |||
23 | 22 | ||
24 | va_start(ap, fmt); | 23 | va_start(ap, fmt); |
25 | n = vsnprintf(bf, size, fmt, ap); | 24 | n = vsnprintf(bf, size, fmt, ap); |
26 | if (field_sep && n > 0) { | 25 | if (symbol_conf.field_sep && n > 0) { |
27 | char *sep = bf; | 26 | char *sep = bf; |
28 | 27 | ||
29 | while (1) { | 28 | while (1) { |
30 | sep = strchr(sep, *field_sep); | 29 | sep = strchr(sep, *symbol_conf.field_sep); |
31 | if (sep == NULL) | 30 | if (sep == NULL) |
32 | break; | 31 | break; |
33 | *sep = '.'; | 32 | *sep = '.'; |
@@ -172,7 +171,7 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, | |||
172 | 171 | ||
173 | static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, | 172 | static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, |
174 | u64 ip, char level, char *bf, size_t size, | 173 | u64 ip, char level, char *bf, size_t size, |
175 | unsigned int width __used) | 174 | unsigned int width __maybe_unused) |
176 | { | 175 | { |
177 | size_t ret = 0; | 176 | size_t ret = 0; |
178 | 177 | ||
@@ -207,7 +206,8 @@ struct sort_entry sort_dso = { | |||
207 | }; | 206 | }; |
208 | 207 | ||
209 | static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, | 208 | static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, |
210 | size_t size, unsigned int width __used) | 209 | size_t size, |
210 | unsigned int width __maybe_unused) | ||
211 | { | 211 | { |
212 | return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, | 212 | return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, |
213 | self->level, bf, size, width); | 213 | self->level, bf, size, width); |
@@ -250,7 +250,8 @@ sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) | |||
250 | } | 250 | } |
251 | 251 | ||
252 | static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, | 252 | static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, |
253 | size_t size, unsigned int width __used) | 253 | size_t size, |
254 | unsigned int width __maybe_unused) | ||
254 | { | 255 | { |
255 | FILE *fp; | 256 | FILE *fp; |
256 | char cmd[PATH_MAX + 2], *path = self->srcline, *nl; | 257 | char cmd[PATH_MAX + 2], *path = self->srcline, *nl; |
@@ -399,7 +400,8 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) | |||
399 | } | 400 | } |
400 | 401 | ||
401 | static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, | 402 | static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, |
402 | size_t size, unsigned int width __used) | 403 | size_t size, |
404 | unsigned int width __maybe_unused) | ||
403 | { | 405 | { |
404 | struct addr_map_symbol *from = &self->branch_info->from; | 406 | struct addr_map_symbol *from = &self->branch_info->from; |
405 | return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, | 407 | return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, |
@@ -408,7 +410,8 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, | |||
408 | } | 410 | } |
409 | 411 | ||
410 | static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, | 412 | static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, |
411 | size_t size, unsigned int width __used) | 413 | size_t size, |
414 | unsigned int width __maybe_unused) | ||
412 | { | 415 | { |
413 | struct addr_map_symbol *to = &self->branch_info->to; | 416 | struct addr_map_symbol *to = &self->branch_info->to; |
414 | return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, | 417 | return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, |
@@ -509,6 +512,10 @@ int sort_dimension__add(const char *tok) | |||
509 | return -EINVAL; | 512 | return -EINVAL; |
510 | } | 513 | } |
511 | sort__has_parent = 1; | 514 | sort__has_parent = 1; |
515 | } else if (sd->entry == &sort_sym || | ||
516 | sd->entry == &sort_sym_from || | ||
517 | sd->entry == &sort_sym_to) { | ||
518 | sort__has_sym = 1; | ||
512 | } | 519 | } |
513 | 520 | ||
514 | if (sd->taken) | 521 | if (sd->taken) |
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index e724b26acd51..12d634792de5 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h | |||
@@ -31,8 +31,8 @@ extern const char *parent_pattern; | |||
31 | extern const char default_sort_order[]; | 31 | extern const char default_sort_order[]; |
32 | extern int sort__need_collapse; | 32 | extern int sort__need_collapse; |
33 | extern int sort__has_parent; | 33 | extern int sort__has_parent; |
34 | extern int sort__has_sym; | ||
34 | extern int sort__branch_mode; | 35 | extern int sort__branch_mode; |
35 | extern char *field_sep; | ||
36 | extern struct sort_entry sort_comm; | 36 | extern struct sort_entry sort_comm; |
37 | extern struct sort_entry sort_dso; | 37 | extern struct sort_entry sort_dso; |
38 | extern struct sort_entry sort_sym; | 38 | extern struct sort_entry sort_sym; |
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c new file mode 100644 index 000000000000..23742126f47c --- /dev/null +++ b/tools/perf/util/stat.c | |||
@@ -0,0 +1,57 @@ | |||
1 | #include <math.h> | ||
2 | |||
3 | #include "stat.h" | ||
4 | |||
5 | void update_stats(struct stats *stats, u64 val) | ||
6 | { | ||
7 | double delta; | ||
8 | |||
9 | stats->n++; | ||
10 | delta = val - stats->mean; | ||
11 | stats->mean += delta / stats->n; | ||
12 | stats->M2 += delta*(val - stats->mean); | ||
13 | } | ||
14 | |||
15 | double avg_stats(struct stats *stats) | ||
16 | { | ||
17 | return stats->mean; | ||
18 | } | ||
19 | |||
20 | /* | ||
21 | * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance | ||
22 | * | ||
23 | * (\Sum n_i^2) - ((\Sum n_i)^2)/n | ||
24 | * s^2 = ------------------------------- | ||
25 | * n - 1 | ||
26 | * | ||
27 | * http://en.wikipedia.org/wiki/Stddev | ||
28 | * | ||
29 | * The std dev of the mean is related to the std dev by: | ||
30 | * | ||
31 | * s | ||
32 | * s_mean = ------- | ||
33 | * sqrt(n) | ||
34 | * | ||
35 | */ | ||
36 | double stddev_stats(struct stats *stats) | ||
37 | { | ||
38 | double variance, variance_mean; | ||
39 | |||
40 | if (!stats->n) | ||
41 | return 0.0; | ||
42 | |||
43 | variance = stats->M2 / (stats->n - 1); | ||
44 | variance_mean = variance / stats->n; | ||
45 | |||
46 | return sqrt(variance_mean); | ||
47 | } | ||
48 | |||
49 | double rel_stddev_stats(double stddev, double avg) | ||
50 | { | ||
51 | double pct = 0.0; | ||
52 | |||
53 | if (avg) | ||
54 | pct = 100.0 * stddev/avg; | ||
55 | |||
56 | return pct; | ||
57 | } | ||
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h new file mode 100644 index 000000000000..588367c3c767 --- /dev/null +++ b/tools/perf/util/stat.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef __PERF_STATS_H | ||
2 | #define __PERF_STATS_H | ||
3 | |||
4 | #include "types.h" | ||
5 | |||
6 | struct stats | ||
7 | { | ||
8 | double n, mean, M2; | ||
9 | }; | ||
10 | |||
11 | void update_stats(struct stats *stats, u64 val); | ||
12 | double avg_stats(struct stats *stats); | ||
13 | double stddev_stats(struct stats *stats); | ||
14 | double rel_stddev_stats(double stddev, double avg); | ||
15 | |||
16 | #endif | ||
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 199bc4d8905d..32170590892d 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include "util.h" | 1 | #include "util.h" |
2 | #include "string.h" | 2 | #include "linux/string.h" |
3 | 3 | ||
4 | #define K 1024LL | 4 | #define K 1024LL |
5 | /* | 5 | /* |
@@ -335,3 +335,19 @@ char *rtrim(char *s) | |||
335 | 335 | ||
336 | return s; | 336 | return s; |
337 | } | 337 | } |
338 | |||
339 | /** | ||
340 | * memdup - duplicate region of memory | ||
341 | * @src: memory region to duplicate | ||
342 | * @len: memory region length | ||
343 | */ | ||
344 | void *memdup(const void *src, size_t len) | ||
345 | { | ||
346 | void *p; | ||
347 | |||
348 | p = malloc(len); | ||
349 | if (p) | ||
350 | memcpy(p, src, len); | ||
351 | |||
352 | return p; | ||
353 | } | ||
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c index 95856ff3dda4..155d8b7078a7 100644 --- a/tools/perf/util/strlist.c +++ b/tools/perf/util/strlist.c | |||
@@ -93,7 +93,7 @@ out: | |||
93 | 93 | ||
94 | void strlist__remove(struct strlist *slist, struct str_node *snode) | 94 | void strlist__remove(struct strlist *slist, struct str_node *snode) |
95 | { | 95 | { |
96 | str_node__delete(snode, slist->dupstr); | 96 | rblist__remove_node(&slist->rblist, &snode->rb_node); |
97 | } | 97 | } |
98 | 98 | ||
99 | struct str_node *strlist__find(struct strlist *slist, const char *entry) | 99 | struct str_node *strlist__find(struct strlist *slist, const char *entry) |
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c new file mode 100644 index 000000000000..db0cc92cf2ea --- /dev/null +++ b/tools/perf/util/symbol-elf.c | |||
@@ -0,0 +1,841 @@ | |||
1 | #include <libelf.h> | ||
2 | #include <gelf.h> | ||
3 | #include <elf.h> | ||
4 | #include <fcntl.h> | ||
5 | #include <stdio.h> | ||
6 | #include <errno.h> | ||
7 | #include <string.h> | ||
8 | #include <unistd.h> | ||
9 | #include <inttypes.h> | ||
10 | |||
11 | #include "symbol.h" | ||
12 | #include "debug.h" | ||
13 | |||
14 | #ifndef NT_GNU_BUILD_ID | ||
15 | #define NT_GNU_BUILD_ID 3 | ||
16 | #endif | ||
17 | |||
18 | /** | ||
19 | * elf_symtab__for_each_symbol - iterate thru all the symbols | ||
20 | * | ||
21 | * @syms: struct elf_symtab instance to iterate | ||
22 | * @idx: uint32_t idx | ||
23 | * @sym: GElf_Sym iterator | ||
24 | */ | ||
25 | #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ | ||
26 | for (idx = 0, gelf_getsym(syms, idx, &sym);\ | ||
27 | idx < nr_syms; \ | ||
28 | idx++, gelf_getsym(syms, idx, &sym)) | ||
29 | |||
30 | static inline uint8_t elf_sym__type(const GElf_Sym *sym) | ||
31 | { | ||
32 | return GELF_ST_TYPE(sym->st_info); | ||
33 | } | ||
34 | |||
35 | static inline int elf_sym__is_function(const GElf_Sym *sym) | ||
36 | { | ||
37 | return elf_sym__type(sym) == STT_FUNC && | ||
38 | sym->st_name != 0 && | ||
39 | sym->st_shndx != SHN_UNDEF; | ||
40 | } | ||
41 | |||
42 | static inline bool elf_sym__is_object(const GElf_Sym *sym) | ||
43 | { | ||
44 | return elf_sym__type(sym) == STT_OBJECT && | ||
45 | sym->st_name != 0 && | ||
46 | sym->st_shndx != SHN_UNDEF; | ||
47 | } | ||
48 | |||
49 | static inline int elf_sym__is_label(const GElf_Sym *sym) | ||
50 | { | ||
51 | return elf_sym__type(sym) == STT_NOTYPE && | ||
52 | sym->st_name != 0 && | ||
53 | sym->st_shndx != SHN_UNDEF && | ||
54 | sym->st_shndx != SHN_ABS; | ||
55 | } | ||
56 | |||
57 | static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) | ||
58 | { | ||
59 | switch (type) { | ||
60 | case MAP__FUNCTION: | ||
61 | return elf_sym__is_function(sym); | ||
62 | case MAP__VARIABLE: | ||
63 | return elf_sym__is_object(sym); | ||
64 | default: | ||
65 | return false; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static inline const char *elf_sym__name(const GElf_Sym *sym, | ||
70 | const Elf_Data *symstrs) | ||
71 | { | ||
72 | return symstrs->d_buf + sym->st_name; | ||
73 | } | ||
74 | |||
75 | static inline const char *elf_sec__name(const GElf_Shdr *shdr, | ||
76 | const Elf_Data *secstrs) | ||
77 | { | ||
78 | return secstrs->d_buf + shdr->sh_name; | ||
79 | } | ||
80 | |||
81 | static inline int elf_sec__is_text(const GElf_Shdr *shdr, | ||
82 | const Elf_Data *secstrs) | ||
83 | { | ||
84 | return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; | ||
85 | } | ||
86 | |||
87 | static inline bool elf_sec__is_data(const GElf_Shdr *shdr, | ||
88 | const Elf_Data *secstrs) | ||
89 | { | ||
90 | return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; | ||
91 | } | ||
92 | |||
93 | static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, | ||
94 | enum map_type type) | ||
95 | { | ||
96 | switch (type) { | ||
97 | case MAP__FUNCTION: | ||
98 | return elf_sec__is_text(shdr, secstrs); | ||
99 | case MAP__VARIABLE: | ||
100 | return elf_sec__is_data(shdr, secstrs); | ||
101 | default: | ||
102 | return false; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) | ||
107 | { | ||
108 | Elf_Scn *sec = NULL; | ||
109 | GElf_Shdr shdr; | ||
110 | size_t cnt = 1; | ||
111 | |||
112 | while ((sec = elf_nextscn(elf, sec)) != NULL) { | ||
113 | gelf_getshdr(sec, &shdr); | ||
114 | |||
115 | if ((addr >= shdr.sh_addr) && | ||
116 | (addr < (shdr.sh_addr + shdr.sh_size))) | ||
117 | return cnt; | ||
118 | |||
119 | ++cnt; | ||
120 | } | ||
121 | |||
122 | return -1; | ||
123 | } | ||
124 | |||
125 | static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, | ||
126 | GElf_Shdr *shp, const char *name, | ||
127 | size_t *idx) | ||
128 | { | ||
129 | Elf_Scn *sec = NULL; | ||
130 | size_t cnt = 1; | ||
131 | |||
132 | /* Elf is corrupted/truncated, avoid calling elf_strptr. */ | ||
133 | if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) | ||
134 | return NULL; | ||
135 | |||
136 | while ((sec = elf_nextscn(elf, sec)) != NULL) { | ||
137 | char *str; | ||
138 | |||
139 | gelf_getshdr(sec, shp); | ||
140 | str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); | ||
141 | if (!strcmp(name, str)) { | ||
142 | if (idx) | ||
143 | *idx = cnt; | ||
144 | break; | ||
145 | } | ||
146 | ++cnt; | ||
147 | } | ||
148 | |||
149 | return sec; | ||
150 | } | ||
151 | |||
152 | #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ | ||
153 | for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ | ||
154 | idx < nr_entries; \ | ||
155 | ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) | ||
156 | |||
157 | #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ | ||
158 | for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ | ||
159 | idx < nr_entries; \ | ||
160 | ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) | ||
161 | |||
162 | /* | ||
163 | * We need to check if we have a .dynsym, so that we can handle the | ||
164 | * .plt, synthesizing its symbols, that aren't on the symtabs (be it | ||
165 | * .dynsym or .symtab). | ||
166 | * And always look at the original dso, not at debuginfo packages, that | ||
167 | * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). | ||
168 | */ | ||
169 | int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map, | ||
170 | symbol_filter_t filter) | ||
171 | { | ||
172 | uint32_t nr_rel_entries, idx; | ||
173 | GElf_Sym sym; | ||
174 | u64 plt_offset; | ||
175 | GElf_Shdr shdr_plt; | ||
176 | struct symbol *f; | ||
177 | GElf_Shdr shdr_rel_plt, shdr_dynsym; | ||
178 | Elf_Data *reldata, *syms, *symstrs; | ||
179 | Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; | ||
180 | size_t dynsym_idx; | ||
181 | GElf_Ehdr ehdr; | ||
182 | char sympltname[1024]; | ||
183 | Elf *elf; | ||
184 | int nr = 0, symidx, err = 0; | ||
185 | |||
186 | if (!ss->dynsym) | ||
187 | return 0; | ||
188 | |||
189 | elf = ss->elf; | ||
190 | ehdr = ss->ehdr; | ||
191 | |||
192 | scn_dynsym = ss->dynsym; | ||
193 | shdr_dynsym = ss->dynshdr; | ||
194 | dynsym_idx = ss->dynsym_idx; | ||
195 | |||
196 | if (scn_dynsym == NULL) | ||
197 | goto out_elf_end; | ||
198 | |||
199 | scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, | ||
200 | ".rela.plt", NULL); | ||
201 | if (scn_plt_rel == NULL) { | ||
202 | scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, | ||
203 | ".rel.plt", NULL); | ||
204 | if (scn_plt_rel == NULL) | ||
205 | goto out_elf_end; | ||
206 | } | ||
207 | |||
208 | err = -1; | ||
209 | |||
210 | if (shdr_rel_plt.sh_link != dynsym_idx) | ||
211 | goto out_elf_end; | ||
212 | |||
213 | if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) | ||
214 | goto out_elf_end; | ||
215 | |||
216 | /* | ||
217 | * Fetch the relocation section to find the idxes to the GOT | ||
218 | * and the symbols in the .dynsym they refer to. | ||
219 | */ | ||
220 | reldata = elf_getdata(scn_plt_rel, NULL); | ||
221 | if (reldata == NULL) | ||
222 | goto out_elf_end; | ||
223 | |||
224 | syms = elf_getdata(scn_dynsym, NULL); | ||
225 | if (syms == NULL) | ||
226 | goto out_elf_end; | ||
227 | |||
228 | scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); | ||
229 | if (scn_symstrs == NULL) | ||
230 | goto out_elf_end; | ||
231 | |||
232 | symstrs = elf_getdata(scn_symstrs, NULL); | ||
233 | if (symstrs == NULL) | ||
234 | goto out_elf_end; | ||
235 | |||
236 | if (symstrs->d_size == 0) | ||
237 | goto out_elf_end; | ||
238 | |||
239 | nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; | ||
240 | plt_offset = shdr_plt.sh_offset; | ||
241 | |||
242 | if (shdr_rel_plt.sh_type == SHT_RELA) { | ||
243 | GElf_Rela pos_mem, *pos; | ||
244 | |||
245 | elf_section__for_each_rela(reldata, pos, pos_mem, idx, | ||
246 | nr_rel_entries) { | ||
247 | symidx = GELF_R_SYM(pos->r_info); | ||
248 | plt_offset += shdr_plt.sh_entsize; | ||
249 | gelf_getsym(syms, symidx, &sym); | ||
250 | snprintf(sympltname, sizeof(sympltname), | ||
251 | "%s@plt", elf_sym__name(&sym, symstrs)); | ||
252 | |||
253 | f = symbol__new(plt_offset, shdr_plt.sh_entsize, | ||
254 | STB_GLOBAL, sympltname); | ||
255 | if (!f) | ||
256 | goto out_elf_end; | ||
257 | |||
258 | if (filter && filter(map, f)) | ||
259 | symbol__delete(f); | ||
260 | else { | ||
261 | symbols__insert(&dso->symbols[map->type], f); | ||
262 | ++nr; | ||
263 | } | ||
264 | } | ||
265 | } else if (shdr_rel_plt.sh_type == SHT_REL) { | ||
266 | GElf_Rel pos_mem, *pos; | ||
267 | elf_section__for_each_rel(reldata, pos, pos_mem, idx, | ||
268 | nr_rel_entries) { | ||
269 | symidx = GELF_R_SYM(pos->r_info); | ||
270 | plt_offset += shdr_plt.sh_entsize; | ||
271 | gelf_getsym(syms, symidx, &sym); | ||
272 | snprintf(sympltname, sizeof(sympltname), | ||
273 | "%s@plt", elf_sym__name(&sym, symstrs)); | ||
274 | |||
275 | f = symbol__new(plt_offset, shdr_plt.sh_entsize, | ||
276 | STB_GLOBAL, sympltname); | ||
277 | if (!f) | ||
278 | goto out_elf_end; | ||
279 | |||
280 | if (filter && filter(map, f)) | ||
281 | symbol__delete(f); | ||
282 | else { | ||
283 | symbols__insert(&dso->symbols[map->type], f); | ||
284 | ++nr; | ||
285 | } | ||
286 | } | ||
287 | } | ||
288 | |||
289 | err = 0; | ||
290 | out_elf_end: | ||
291 | if (err == 0) | ||
292 | return nr; | ||
293 | pr_debug("%s: problems reading %s PLT info.\n", | ||
294 | __func__, dso->long_name); | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Align offset to 4 bytes as needed for note name and descriptor data. | ||
300 | */ | ||
301 | #define NOTE_ALIGN(n) (((n) + 3) & -4U) | ||
302 | |||
303 | static int elf_read_build_id(Elf *elf, void *bf, size_t size) | ||
304 | { | ||
305 | int err = -1; | ||
306 | GElf_Ehdr ehdr; | ||
307 | GElf_Shdr shdr; | ||
308 | Elf_Data *data; | ||
309 | Elf_Scn *sec; | ||
310 | Elf_Kind ek; | ||
311 | void *ptr; | ||
312 | |||
313 | if (size < BUILD_ID_SIZE) | ||
314 | goto out; | ||
315 | |||
316 | ek = elf_kind(elf); | ||
317 | if (ek != ELF_K_ELF) | ||
318 | goto out; | ||
319 | |||
320 | if (gelf_getehdr(elf, &ehdr) == NULL) { | ||
321 | pr_err("%s: cannot get elf header.\n", __func__); | ||
322 | goto out; | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * Check following sections for notes: | ||
327 | * '.note.gnu.build-id' | ||
328 | * '.notes' | ||
329 | * '.note' (VDSO specific) | ||
330 | */ | ||
331 | do { | ||
332 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
333 | ".note.gnu.build-id", NULL); | ||
334 | if (sec) | ||
335 | break; | ||
336 | |||
337 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
338 | ".notes", NULL); | ||
339 | if (sec) | ||
340 | break; | ||
341 | |||
342 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
343 | ".note", NULL); | ||
344 | if (sec) | ||
345 | break; | ||
346 | |||
347 | return err; | ||
348 | |||
349 | } while (0); | ||
350 | |||
351 | data = elf_getdata(sec, NULL); | ||
352 | if (data == NULL) | ||
353 | goto out; | ||
354 | |||
355 | ptr = data->d_buf; | ||
356 | while (ptr < (data->d_buf + data->d_size)) { | ||
357 | GElf_Nhdr *nhdr = ptr; | ||
358 | size_t namesz = NOTE_ALIGN(nhdr->n_namesz), | ||
359 | descsz = NOTE_ALIGN(nhdr->n_descsz); | ||
360 | const char *name; | ||
361 | |||
362 | ptr += sizeof(*nhdr); | ||
363 | name = ptr; | ||
364 | ptr += namesz; | ||
365 | if (nhdr->n_type == NT_GNU_BUILD_ID && | ||
366 | nhdr->n_namesz == sizeof("GNU")) { | ||
367 | if (memcmp(name, "GNU", sizeof("GNU")) == 0) { | ||
368 | size_t sz = min(size, descsz); | ||
369 | memcpy(bf, ptr, sz); | ||
370 | memset(bf + sz, 0, size - sz); | ||
371 | err = descsz; | ||
372 | break; | ||
373 | } | ||
374 | } | ||
375 | ptr += descsz; | ||
376 | } | ||
377 | |||
378 | out: | ||
379 | return err; | ||
380 | } | ||
381 | |||
382 | int filename__read_build_id(const char *filename, void *bf, size_t size) | ||
383 | { | ||
384 | int fd, err = -1; | ||
385 | Elf *elf; | ||
386 | |||
387 | if (size < BUILD_ID_SIZE) | ||
388 | goto out; | ||
389 | |||
390 | fd = open(filename, O_RDONLY); | ||
391 | if (fd < 0) | ||
392 | goto out; | ||
393 | |||
394 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
395 | if (elf == NULL) { | ||
396 | pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); | ||
397 | goto out_close; | ||
398 | } | ||
399 | |||
400 | err = elf_read_build_id(elf, bf, size); | ||
401 | |||
402 | elf_end(elf); | ||
403 | out_close: | ||
404 | close(fd); | ||
405 | out: | ||
406 | return err; | ||
407 | } | ||
408 | |||
409 | int sysfs__read_build_id(const char *filename, void *build_id, size_t size) | ||
410 | { | ||
411 | int fd, err = -1; | ||
412 | |||
413 | if (size < BUILD_ID_SIZE) | ||
414 | goto out; | ||
415 | |||
416 | fd = open(filename, O_RDONLY); | ||
417 | if (fd < 0) | ||
418 | goto out; | ||
419 | |||
420 | while (1) { | ||
421 | char bf[BUFSIZ]; | ||
422 | GElf_Nhdr nhdr; | ||
423 | size_t namesz, descsz; | ||
424 | |||
425 | if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) | ||
426 | break; | ||
427 | |||
428 | namesz = NOTE_ALIGN(nhdr.n_namesz); | ||
429 | descsz = NOTE_ALIGN(nhdr.n_descsz); | ||
430 | if (nhdr.n_type == NT_GNU_BUILD_ID && | ||
431 | nhdr.n_namesz == sizeof("GNU")) { | ||
432 | if (read(fd, bf, namesz) != (ssize_t)namesz) | ||
433 | break; | ||
434 | if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { | ||
435 | size_t sz = min(descsz, size); | ||
436 | if (read(fd, build_id, sz) == (ssize_t)sz) { | ||
437 | memset(build_id + sz, 0, size - sz); | ||
438 | err = 0; | ||
439 | break; | ||
440 | } | ||
441 | } else if (read(fd, bf, descsz) != (ssize_t)descsz) | ||
442 | break; | ||
443 | } else { | ||
444 | int n = namesz + descsz; | ||
445 | if (read(fd, bf, n) != n) | ||
446 | break; | ||
447 | } | ||
448 | } | ||
449 | close(fd); | ||
450 | out: | ||
451 | return err; | ||
452 | } | ||
453 | |||
454 | int filename__read_debuglink(const char *filename, char *debuglink, | ||
455 | size_t size) | ||
456 | { | ||
457 | int fd, err = -1; | ||
458 | Elf *elf; | ||
459 | GElf_Ehdr ehdr; | ||
460 | GElf_Shdr shdr; | ||
461 | Elf_Data *data; | ||
462 | Elf_Scn *sec; | ||
463 | Elf_Kind ek; | ||
464 | |||
465 | fd = open(filename, O_RDONLY); | ||
466 | if (fd < 0) | ||
467 | goto out; | ||
468 | |||
469 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
470 | if (elf == NULL) { | ||
471 | pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); | ||
472 | goto out_close; | ||
473 | } | ||
474 | |||
475 | ek = elf_kind(elf); | ||
476 | if (ek != ELF_K_ELF) | ||
477 | goto out_close; | ||
478 | |||
479 | if (gelf_getehdr(elf, &ehdr) == NULL) { | ||
480 | pr_err("%s: cannot get elf header.\n", __func__); | ||
481 | goto out_close; | ||
482 | } | ||
483 | |||
484 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
485 | ".gnu_debuglink", NULL); | ||
486 | if (sec == NULL) | ||
487 | goto out_close; | ||
488 | |||
489 | data = elf_getdata(sec, NULL); | ||
490 | if (data == NULL) | ||
491 | goto out_close; | ||
492 | |||
493 | /* the start of this section is a zero-terminated string */ | ||
494 | strncpy(debuglink, data->d_buf, size); | ||
495 | |||
496 | elf_end(elf); | ||
497 | |||
498 | out_close: | ||
499 | close(fd); | ||
500 | out: | ||
501 | return err; | ||
502 | } | ||
503 | |||
504 | static int dso__swap_init(struct dso *dso, unsigned char eidata) | ||
505 | { | ||
506 | static unsigned int const endian = 1; | ||
507 | |||
508 | dso->needs_swap = DSO_SWAP__NO; | ||
509 | |||
510 | switch (eidata) { | ||
511 | case ELFDATA2LSB: | ||
512 | /* We are big endian, DSO is little endian. */ | ||
513 | if (*(unsigned char const *)&endian != 1) | ||
514 | dso->needs_swap = DSO_SWAP__YES; | ||
515 | break; | ||
516 | |||
517 | case ELFDATA2MSB: | ||
518 | /* We are little endian, DSO is big endian. */ | ||
519 | if (*(unsigned char const *)&endian != 0) | ||
520 | dso->needs_swap = DSO_SWAP__YES; | ||
521 | break; | ||
522 | |||
523 | default: | ||
524 | pr_err("unrecognized DSO data encoding %d\n", eidata); | ||
525 | return -EINVAL; | ||
526 | } | ||
527 | |||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | bool symsrc__possibly_runtime(struct symsrc *ss) | ||
532 | { | ||
533 | return ss->dynsym || ss->opdsec; | ||
534 | } | ||
535 | |||
536 | bool symsrc__has_symtab(struct symsrc *ss) | ||
537 | { | ||
538 | return ss->symtab != NULL; | ||
539 | } | ||
540 | |||
541 | void symsrc__destroy(struct symsrc *ss) | ||
542 | { | ||
543 | free(ss->name); | ||
544 | elf_end(ss->elf); | ||
545 | close(ss->fd); | ||
546 | } | ||
547 | |||
548 | int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, | ||
549 | enum dso_binary_type type) | ||
550 | { | ||
551 | int err = -1; | ||
552 | GElf_Ehdr ehdr; | ||
553 | Elf *elf; | ||
554 | int fd; | ||
555 | |||
556 | fd = open(name, O_RDONLY); | ||
557 | if (fd < 0) | ||
558 | return -1; | ||
559 | |||
560 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
561 | if (elf == NULL) { | ||
562 | pr_debug("%s: cannot read %s ELF file.\n", __func__, name); | ||
563 | goto out_close; | ||
564 | } | ||
565 | |||
566 | if (gelf_getehdr(elf, &ehdr) == NULL) { | ||
567 | pr_debug("%s: cannot get elf header.\n", __func__); | ||
568 | goto out_elf_end; | ||
569 | } | ||
570 | |||
571 | if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) | ||
572 | goto out_elf_end; | ||
573 | |||
574 | /* Always reject images with a mismatched build-id: */ | ||
575 | if (dso->has_build_id) { | ||
576 | u8 build_id[BUILD_ID_SIZE]; | ||
577 | |||
578 | if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) | ||
579 | goto out_elf_end; | ||
580 | |||
581 | if (!dso__build_id_equal(dso, build_id)) | ||
582 | goto out_elf_end; | ||
583 | } | ||
584 | |||
585 | ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", | ||
586 | NULL); | ||
587 | if (ss->symshdr.sh_type != SHT_SYMTAB) | ||
588 | ss->symtab = NULL; | ||
589 | |||
590 | ss->dynsym_idx = 0; | ||
591 | ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", | ||
592 | &ss->dynsym_idx); | ||
593 | if (ss->dynshdr.sh_type != SHT_DYNSYM) | ||
594 | ss->dynsym = NULL; | ||
595 | |||
596 | ss->opdidx = 0; | ||
597 | ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", | ||
598 | &ss->opdidx); | ||
599 | if (ss->opdshdr.sh_type != SHT_PROGBITS) | ||
600 | ss->opdsec = NULL; | ||
601 | |||
602 | if (dso->kernel == DSO_TYPE_USER) { | ||
603 | GElf_Shdr shdr; | ||
604 | ss->adjust_symbols = (ehdr.e_type == ET_EXEC || | ||
605 | elf_section_by_name(elf, &ehdr, &shdr, | ||
606 | ".gnu.prelink_undo", | ||
607 | NULL) != NULL); | ||
608 | } else { | ||
609 | ss->adjust_symbols = 0; | ||
610 | } | ||
611 | |||
612 | ss->name = strdup(name); | ||
613 | if (!ss->name) | ||
614 | goto out_elf_end; | ||
615 | |||
616 | ss->elf = elf; | ||
617 | ss->fd = fd; | ||
618 | ss->ehdr = ehdr; | ||
619 | ss->type = type; | ||
620 | |||
621 | return 0; | ||
622 | |||
623 | out_elf_end: | ||
624 | elf_end(elf); | ||
625 | out_close: | ||
626 | close(fd); | ||
627 | return err; | ||
628 | } | ||
629 | |||
630 | int dso__load_sym(struct dso *dso, struct map *map, | ||
631 | struct symsrc *syms_ss, struct symsrc *runtime_ss, | ||
632 | symbol_filter_t filter, int kmodule) | ||
633 | { | ||
634 | struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; | ||
635 | struct map *curr_map = map; | ||
636 | struct dso *curr_dso = dso; | ||
637 | Elf_Data *symstrs, *secstrs; | ||
638 | uint32_t nr_syms; | ||
639 | int err = -1; | ||
640 | uint32_t idx; | ||
641 | GElf_Ehdr ehdr; | ||
642 | GElf_Shdr shdr; | ||
643 | Elf_Data *syms, *opddata = NULL; | ||
644 | GElf_Sym sym; | ||
645 | Elf_Scn *sec, *sec_strndx; | ||
646 | Elf *elf; | ||
647 | int nr = 0; | ||
648 | |||
649 | dso->symtab_type = syms_ss->type; | ||
650 | |||
651 | if (!syms_ss->symtab) { | ||
652 | syms_ss->symtab = syms_ss->dynsym; | ||
653 | syms_ss->symshdr = syms_ss->dynshdr; | ||
654 | } | ||
655 | |||
656 | elf = syms_ss->elf; | ||
657 | ehdr = syms_ss->ehdr; | ||
658 | sec = syms_ss->symtab; | ||
659 | shdr = syms_ss->symshdr; | ||
660 | |||
661 | if (runtime_ss->opdsec) | ||
662 | opddata = elf_rawdata(runtime_ss->opdsec, NULL); | ||
663 | |||
664 | syms = elf_getdata(sec, NULL); | ||
665 | if (syms == NULL) | ||
666 | goto out_elf_end; | ||
667 | |||
668 | sec = elf_getscn(elf, shdr.sh_link); | ||
669 | if (sec == NULL) | ||
670 | goto out_elf_end; | ||
671 | |||
672 | symstrs = elf_getdata(sec, NULL); | ||
673 | if (symstrs == NULL) | ||
674 | goto out_elf_end; | ||
675 | |||
676 | sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); | ||
677 | if (sec_strndx == NULL) | ||
678 | goto out_elf_end; | ||
679 | |||
680 | secstrs = elf_getdata(sec_strndx, NULL); | ||
681 | if (secstrs == NULL) | ||
682 | goto out_elf_end; | ||
683 | |||
684 | nr_syms = shdr.sh_size / shdr.sh_entsize; | ||
685 | |||
686 | memset(&sym, 0, sizeof(sym)); | ||
687 | dso->adjust_symbols = runtime_ss->adjust_symbols; | ||
688 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { | ||
689 | struct symbol *f; | ||
690 | const char *elf_name = elf_sym__name(&sym, symstrs); | ||
691 | char *demangled = NULL; | ||
692 | int is_label = elf_sym__is_label(&sym); | ||
693 | const char *section_name; | ||
694 | bool used_opd = false; | ||
695 | |||
696 | if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && | ||
697 | strcmp(elf_name, kmap->ref_reloc_sym->name) == 0) | ||
698 | kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; | ||
699 | |||
700 | if (!is_label && !elf_sym__is_a(&sym, map->type)) | ||
701 | continue; | ||
702 | |||
703 | /* Reject ARM ELF "mapping symbols": these aren't unique and | ||
704 | * don't identify functions, so will confuse the profile | ||
705 | * output: */ | ||
706 | if (ehdr.e_machine == EM_ARM) { | ||
707 | if (!strcmp(elf_name, "$a") || | ||
708 | !strcmp(elf_name, "$d") || | ||
709 | !strcmp(elf_name, "$t")) | ||
710 | continue; | ||
711 | } | ||
712 | |||
713 | if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { | ||
714 | u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; | ||
715 | u64 *opd = opddata->d_buf + offset; | ||
716 | sym.st_value = DSO__SWAP(dso, u64, *opd); | ||
717 | sym.st_shndx = elf_addr_to_index(runtime_ss->elf, | ||
718 | sym.st_value); | ||
719 | used_opd = true; | ||
720 | } | ||
721 | |||
722 | sec = elf_getscn(runtime_ss->elf, sym.st_shndx); | ||
723 | if (!sec) | ||
724 | goto out_elf_end; | ||
725 | |||
726 | gelf_getshdr(sec, &shdr); | ||
727 | |||
728 | if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) | ||
729 | continue; | ||
730 | |||
731 | section_name = elf_sec__name(&shdr, secstrs); | ||
732 | |||
733 | /* On ARM, symbols for thumb functions have 1 added to | ||
734 | * the symbol address as a flag - remove it */ | ||
735 | if ((ehdr.e_machine == EM_ARM) && | ||
736 | (map->type == MAP__FUNCTION) && | ||
737 | (sym.st_value & 1)) | ||
738 | --sym.st_value; | ||
739 | |||
740 | if (dso->kernel != DSO_TYPE_USER || kmodule) { | ||
741 | char dso_name[PATH_MAX]; | ||
742 | |||
743 | if (strcmp(section_name, | ||
744 | (curr_dso->short_name + | ||
745 | dso->short_name_len)) == 0) | ||
746 | goto new_symbol; | ||
747 | |||
748 | if (strcmp(section_name, ".text") == 0) { | ||
749 | curr_map = map; | ||
750 | curr_dso = dso; | ||
751 | goto new_symbol; | ||
752 | } | ||
753 | |||
754 | snprintf(dso_name, sizeof(dso_name), | ||
755 | "%s%s", dso->short_name, section_name); | ||
756 | |||
757 | curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); | ||
758 | if (curr_map == NULL) { | ||
759 | u64 start = sym.st_value; | ||
760 | |||
761 | if (kmodule) | ||
762 | start += map->start + shdr.sh_offset; | ||
763 | |||
764 | curr_dso = dso__new(dso_name); | ||
765 | if (curr_dso == NULL) | ||
766 | goto out_elf_end; | ||
767 | curr_dso->kernel = dso->kernel; | ||
768 | curr_dso->long_name = dso->long_name; | ||
769 | curr_dso->long_name_len = dso->long_name_len; | ||
770 | curr_map = map__new2(start, curr_dso, | ||
771 | map->type); | ||
772 | if (curr_map == NULL) { | ||
773 | dso__delete(curr_dso); | ||
774 | goto out_elf_end; | ||
775 | } | ||
776 | curr_map->map_ip = identity__map_ip; | ||
777 | curr_map->unmap_ip = identity__map_ip; | ||
778 | curr_dso->symtab_type = dso->symtab_type; | ||
779 | map_groups__insert(kmap->kmaps, curr_map); | ||
780 | dsos__add(&dso->node, curr_dso); | ||
781 | dso__set_loaded(curr_dso, map->type); | ||
782 | } else | ||
783 | curr_dso = curr_map->dso; | ||
784 | |||
785 | goto new_symbol; | ||
786 | } | ||
787 | |||
788 | if ((used_opd && runtime_ss->adjust_symbols) | ||
789 | || (!used_opd && syms_ss->adjust_symbols)) { | ||
790 | pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " | ||
791 | "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, | ||
792 | (u64)sym.st_value, (u64)shdr.sh_addr, | ||
793 | (u64)shdr.sh_offset); | ||
794 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; | ||
795 | } | ||
796 | /* | ||
797 | * We need to figure out if the object was created from C++ sources | ||
798 | * DWARF DW_compile_unit has this, but we don't always have access | ||
799 | * to it... | ||
800 | */ | ||
801 | demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); | ||
802 | if (demangled != NULL) | ||
803 | elf_name = demangled; | ||
804 | new_symbol: | ||
805 | f = symbol__new(sym.st_value, sym.st_size, | ||
806 | GELF_ST_BIND(sym.st_info), elf_name); | ||
807 | free(demangled); | ||
808 | if (!f) | ||
809 | goto out_elf_end; | ||
810 | |||
811 | if (filter && filter(curr_map, f)) | ||
812 | symbol__delete(f); | ||
813 | else { | ||
814 | symbols__insert(&curr_dso->symbols[curr_map->type], f); | ||
815 | nr++; | ||
816 | } | ||
817 | } | ||
818 | |||
819 | /* | ||
820 | * For misannotated, zeroed, ASM function sizes. | ||
821 | */ | ||
822 | if (nr > 0) { | ||
823 | symbols__fixup_duplicate(&dso->symbols[map->type]); | ||
824 | symbols__fixup_end(&dso->symbols[map->type]); | ||
825 | if (kmap) { | ||
826 | /* | ||
827 | * We need to fixup this here too because we create new | ||
828 | * maps here, for things like vsyscall sections. | ||
829 | */ | ||
830 | __map_groups__fixup_end(kmap->kmaps, map->type); | ||
831 | } | ||
832 | } | ||
833 | err = nr; | ||
834 | out_elf_end: | ||
835 | return err; | ||
836 | } | ||
837 | |||
838 | void symbol__elf_init(void) | ||
839 | { | ||
840 | elf_version(EV_CURRENT); | ||
841 | } | ||
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c new file mode 100644 index 000000000000..259f8f2ea9c9 --- /dev/null +++ b/tools/perf/util/symbol-minimal.c | |||
@@ -0,0 +1,307 @@ | |||
1 | #include "symbol.h" | ||
2 | |||
3 | #include <elf.h> | ||
4 | #include <stdio.h> | ||
5 | #include <fcntl.h> | ||
6 | #include <string.h> | ||
7 | #include <byteswap.h> | ||
8 | #include <sys/stat.h> | ||
9 | |||
10 | |||
11 | static bool check_need_swap(int file_endian) | ||
12 | { | ||
13 | const int data = 1; | ||
14 | u8 *check = (u8 *)&data; | ||
15 | int host_endian; | ||
16 | |||
17 | if (check[0] == 1) | ||
18 | host_endian = ELFDATA2LSB; | ||
19 | else | ||
20 | host_endian = ELFDATA2MSB; | ||
21 | |||
22 | return host_endian != file_endian; | ||
23 | } | ||
24 | |||
25 | #define NOTE_ALIGN(sz) (((sz) + 3) & ~3) | ||
26 | |||
27 | #define NT_GNU_BUILD_ID 3 | ||
28 | |||
29 | static int read_build_id(void *note_data, size_t note_len, void *bf, | ||
30 | size_t size, bool need_swap) | ||
31 | { | ||
32 | struct { | ||
33 | u32 n_namesz; | ||
34 | u32 n_descsz; | ||
35 | u32 n_type; | ||
36 | } *nhdr; | ||
37 | void *ptr; | ||
38 | |||
39 | ptr = note_data; | ||
40 | while (ptr < (note_data + note_len)) { | ||
41 | const char *name; | ||
42 | size_t namesz, descsz; | ||
43 | |||
44 | nhdr = ptr; | ||
45 | if (need_swap) { | ||
46 | nhdr->n_namesz = bswap_32(nhdr->n_namesz); | ||
47 | nhdr->n_descsz = bswap_32(nhdr->n_descsz); | ||
48 | nhdr->n_type = bswap_32(nhdr->n_type); | ||
49 | } | ||
50 | |||
51 | namesz = NOTE_ALIGN(nhdr->n_namesz); | ||
52 | descsz = NOTE_ALIGN(nhdr->n_descsz); | ||
53 | |||
54 | ptr += sizeof(*nhdr); | ||
55 | name = ptr; | ||
56 | ptr += namesz; | ||
57 | if (nhdr->n_type == NT_GNU_BUILD_ID && | ||
58 | nhdr->n_namesz == sizeof("GNU")) { | ||
59 | if (memcmp(name, "GNU", sizeof("GNU")) == 0) { | ||
60 | size_t sz = min(size, descsz); | ||
61 | memcpy(bf, ptr, sz); | ||
62 | memset(bf + sz, 0, size - sz); | ||
63 | return 0; | ||
64 | } | ||
65 | } | ||
66 | ptr += descsz; | ||
67 | } | ||
68 | |||
69 | return -1; | ||
70 | } | ||
71 | |||
72 | int filename__read_debuglink(const char *filename __maybe_unused, | ||
73 | char *debuglink __maybe_unused, | ||
74 | size_t size __maybe_unused) | ||
75 | { | ||
76 | return -1; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Just try PT_NOTE header otherwise fails | ||
81 | */ | ||
82 | int filename__read_build_id(const char *filename, void *bf, size_t size) | ||
83 | { | ||
84 | FILE *fp; | ||
85 | int ret = -1; | ||
86 | bool need_swap = false; | ||
87 | u8 e_ident[EI_NIDENT]; | ||
88 | size_t buf_size; | ||
89 | void *buf; | ||
90 | int i; | ||
91 | |||
92 | fp = fopen(filename, "r"); | ||
93 | if (fp == NULL) | ||
94 | return -1; | ||
95 | |||
96 | if (fread(e_ident, sizeof(e_ident), 1, fp) != 1) | ||
97 | goto out; | ||
98 | |||
99 | if (memcmp(e_ident, ELFMAG, SELFMAG) || | ||
100 | e_ident[EI_VERSION] != EV_CURRENT) | ||
101 | goto out; | ||
102 | |||
103 | need_swap = check_need_swap(e_ident[EI_DATA]); | ||
104 | |||
105 | /* for simplicity */ | ||
106 | fseek(fp, 0, SEEK_SET); | ||
107 | |||
108 | if (e_ident[EI_CLASS] == ELFCLASS32) { | ||
109 | Elf32_Ehdr ehdr; | ||
110 | Elf32_Phdr *phdr; | ||
111 | |||
112 | if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) | ||
113 | goto out; | ||
114 | |||
115 | if (need_swap) { | ||
116 | ehdr.e_phoff = bswap_32(ehdr.e_phoff); | ||
117 | ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); | ||
118 | ehdr.e_phnum = bswap_16(ehdr.e_phnum); | ||
119 | } | ||
120 | |||
121 | buf_size = ehdr.e_phentsize * ehdr.e_phnum; | ||
122 | buf = malloc(buf_size); | ||
123 | if (buf == NULL) | ||
124 | goto out; | ||
125 | |||
126 | fseek(fp, ehdr.e_phoff, SEEK_SET); | ||
127 | if (fread(buf, buf_size, 1, fp) != 1) | ||
128 | goto out_free; | ||
129 | |||
130 | for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { | ||
131 | void *tmp; | ||
132 | |||
133 | if (need_swap) { | ||
134 | phdr->p_type = bswap_32(phdr->p_type); | ||
135 | phdr->p_offset = bswap_32(phdr->p_offset); | ||
136 | phdr->p_filesz = bswap_32(phdr->p_filesz); | ||
137 | } | ||
138 | |||
139 | if (phdr->p_type != PT_NOTE) | ||
140 | continue; | ||
141 | |||
142 | buf_size = phdr->p_filesz; | ||
143 | tmp = realloc(buf, buf_size); | ||
144 | if (tmp == NULL) | ||
145 | goto out_free; | ||
146 | |||
147 | buf = tmp; | ||
148 | fseek(fp, phdr->p_offset, SEEK_SET); | ||
149 | if (fread(buf, buf_size, 1, fp) != 1) | ||
150 | goto out_free; | ||
151 | |||
152 | ret = read_build_id(buf, buf_size, bf, size, need_swap); | ||
153 | if (ret == 0) | ||
154 | ret = size; | ||
155 | break; | ||
156 | } | ||
157 | } else { | ||
158 | Elf64_Ehdr ehdr; | ||
159 | Elf64_Phdr *phdr; | ||
160 | |||
161 | if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) | ||
162 | goto out; | ||
163 | |||
164 | if (need_swap) { | ||
165 | ehdr.e_phoff = bswap_64(ehdr.e_phoff); | ||
166 | ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); | ||
167 | ehdr.e_phnum = bswap_16(ehdr.e_phnum); | ||
168 | } | ||
169 | |||
170 | buf_size = ehdr.e_phentsize * ehdr.e_phnum; | ||
171 | buf = malloc(buf_size); | ||
172 | if (buf == NULL) | ||
173 | goto out; | ||
174 | |||
175 | fseek(fp, ehdr.e_phoff, SEEK_SET); | ||
176 | if (fread(buf, buf_size, 1, fp) != 1) | ||
177 | goto out_free; | ||
178 | |||
179 | for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { | ||
180 | void *tmp; | ||
181 | |||
182 | if (need_swap) { | ||
183 | phdr->p_type = bswap_32(phdr->p_type); | ||
184 | phdr->p_offset = bswap_64(phdr->p_offset); | ||
185 | phdr->p_filesz = bswap_64(phdr->p_filesz); | ||
186 | } | ||
187 | |||
188 | if (phdr->p_type != PT_NOTE) | ||
189 | continue; | ||
190 | |||
191 | buf_size = phdr->p_filesz; | ||
192 | tmp = realloc(buf, buf_size); | ||
193 | if (tmp == NULL) | ||
194 | goto out_free; | ||
195 | |||
196 | buf = tmp; | ||
197 | fseek(fp, phdr->p_offset, SEEK_SET); | ||
198 | if (fread(buf, buf_size, 1, fp) != 1) | ||
199 | goto out_free; | ||
200 | |||
201 | ret = read_build_id(buf, buf_size, bf, size, need_swap); | ||
202 | if (ret == 0) | ||
203 | ret = size; | ||
204 | break; | ||
205 | } | ||
206 | } | ||
207 | out_free: | ||
208 | free(buf); | ||
209 | out: | ||
210 | fclose(fp); | ||
211 | return ret; | ||
212 | } | ||
213 | |||
214 | int sysfs__read_build_id(const char *filename, void *build_id, size_t size) | ||
215 | { | ||
216 | int fd; | ||
217 | int ret = -1; | ||
218 | struct stat stbuf; | ||
219 | size_t buf_size; | ||
220 | void *buf; | ||
221 | |||
222 | fd = open(filename, O_RDONLY); | ||
223 | if (fd < 0) | ||
224 | return -1; | ||
225 | |||
226 | if (fstat(fd, &stbuf) < 0) | ||
227 | goto out; | ||
228 | |||
229 | buf_size = stbuf.st_size; | ||
230 | buf = malloc(buf_size); | ||
231 | if (buf == NULL) | ||
232 | goto out; | ||
233 | |||
234 | if (read(fd, buf, buf_size) != (ssize_t) buf_size) | ||
235 | goto out_free; | ||
236 | |||
237 | ret = read_build_id(buf, buf_size, build_id, size, false); | ||
238 | out_free: | ||
239 | free(buf); | ||
240 | out: | ||
241 | close(fd); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused, | ||
246 | const char *name, | ||
247 | enum dso_binary_type type) | ||
248 | { | ||
249 | int fd = open(name, O_RDONLY); | ||
250 | if (fd < 0) | ||
251 | return -1; | ||
252 | |||
253 | ss->name = strdup(name); | ||
254 | if (!ss->name) | ||
255 | goto out_close; | ||
256 | |||
257 | ss->type = type; | ||
258 | |||
259 | return 0; | ||
260 | out_close: | ||
261 | close(fd); | ||
262 | return -1; | ||
263 | } | ||
264 | |||
265 | bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused) | ||
266 | { | ||
267 | /* Assume all sym sources could be a runtime image. */ | ||
268 | return true; | ||
269 | } | ||
270 | |||
271 | bool symsrc__has_symtab(struct symsrc *ss __maybe_unused) | ||
272 | { | ||
273 | return false; | ||
274 | } | ||
275 | |||
276 | void symsrc__destroy(struct symsrc *ss) | ||
277 | { | ||
278 | free(ss->name); | ||
279 | close(ss->fd); | ||
280 | } | ||
281 | |||
282 | int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused, | ||
283 | struct symsrc *ss __maybe_unused, | ||
284 | struct map *map __maybe_unused, | ||
285 | symbol_filter_t filter __maybe_unused) | ||
286 | { | ||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, | ||
291 | struct symsrc *ss, | ||
292 | struct symsrc *runtime_ss __maybe_unused, | ||
293 | symbol_filter_t filter __maybe_unused, | ||
294 | int kmodule __maybe_unused) | ||
295 | { | ||
296 | unsigned char *build_id[BUILD_ID_SIZE]; | ||
297 | |||
298 | if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) { | ||
299 | dso__set_build_id(dso, build_id); | ||
300 | return 1; | ||
301 | } | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | void symbol__elf_init(void) | ||
306 | { | ||
307 | } | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 8b63b678e127..e2e8c697cffe 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -15,8 +15,6 @@ | |||
15 | #include "symbol.h" | 15 | #include "symbol.h" |
16 | #include "strlist.h" | 16 | #include "strlist.h" |
17 | 17 | ||
18 | #include <libelf.h> | ||
19 | #include <gelf.h> | ||
20 | #include <elf.h> | 18 | #include <elf.h> |
21 | #include <limits.h> | 19 | #include <limits.h> |
22 | #include <sys/utsname.h> | 20 | #include <sys/utsname.h> |
@@ -25,15 +23,7 @@ | |||
25 | #define KSYM_NAME_LEN 256 | 23 | #define KSYM_NAME_LEN 256 |
26 | #endif | 24 | #endif |
27 | 25 | ||
28 | #ifndef NT_GNU_BUILD_ID | ||
29 | #define NT_GNU_BUILD_ID 3 | ||
30 | #endif | ||
31 | |||
32 | static void dso_cache__free(struct rb_root *root); | 26 | static void dso_cache__free(struct rb_root *root); |
33 | static bool dso__build_id_equal(const struct dso *dso, u8 *build_id); | ||
34 | static int elf_read_build_id(Elf *elf, void *bf, size_t size); | ||
35 | static void dsos__add(struct list_head *head, struct dso *dso); | ||
36 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); | ||
37 | static int dso__load_kernel_sym(struct dso *dso, struct map *map, | 27 | static int dso__load_kernel_sym(struct dso *dso, struct map *map, |
38 | symbol_filter_t filter); | 28 | symbol_filter_t filter); |
39 | static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, | 29 | static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, |
@@ -170,7 +160,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) | |||
170 | return SYMBOL_B; | 160 | return SYMBOL_B; |
171 | } | 161 | } |
172 | 162 | ||
173 | static void symbols__fixup_duplicate(struct rb_root *symbols) | 163 | void symbols__fixup_duplicate(struct rb_root *symbols) |
174 | { | 164 | { |
175 | struct rb_node *nd; | 165 | struct rb_node *nd; |
176 | struct symbol *curr, *next; | 166 | struct symbol *curr, *next; |
@@ -199,7 +189,7 @@ again: | |||
199 | } | 189 | } |
200 | } | 190 | } |
201 | 191 | ||
202 | static void symbols__fixup_end(struct rb_root *symbols) | 192 | void symbols__fixup_end(struct rb_root *symbols) |
203 | { | 193 | { |
204 | struct rb_node *nd, *prevnd = rb_first(symbols); | 194 | struct rb_node *nd, *prevnd = rb_first(symbols); |
205 | struct symbol *curr, *prev; | 195 | struct symbol *curr, *prev; |
@@ -222,7 +212,7 @@ static void symbols__fixup_end(struct rb_root *symbols) | |||
222 | curr->end = roundup(curr->start, 4096); | 212 | curr->end = roundup(curr->start, 4096); |
223 | } | 213 | } |
224 | 214 | ||
225 | static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) | 215 | void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) |
226 | { | 216 | { |
227 | struct map *prev, *curr; | 217 | struct map *prev, *curr; |
228 | struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); | 218 | struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); |
@@ -252,8 +242,7 @@ static void map_groups__fixup_end(struct map_groups *mg) | |||
252 | __map_groups__fixup_end(mg, i); | 242 | __map_groups__fixup_end(mg, i); |
253 | } | 243 | } |
254 | 244 | ||
255 | static struct symbol *symbol__new(u64 start, u64 len, u8 binding, | 245 | struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) |
256 | const char *name) | ||
257 | { | 246 | { |
258 | size_t namelen = strlen(name) + 1; | 247 | size_t namelen = strlen(name) + 1; |
259 | struct symbol *sym = calloc(1, (symbol_conf.priv_size + | 248 | struct symbol *sym = calloc(1, (symbol_conf.priv_size + |
@@ -390,7 +379,7 @@ void dso__set_build_id(struct dso *dso, void *build_id) | |||
390 | dso->has_build_id = 1; | 379 | dso->has_build_id = 1; |
391 | } | 380 | } |
392 | 381 | ||
393 | static void symbols__insert(struct rb_root *symbols, struct symbol *sym) | 382 | void symbols__insert(struct rb_root *symbols, struct symbol *sym) |
394 | { | 383 | { |
395 | struct rb_node **p = &symbols->rb_node; | 384 | struct rb_node **p = &symbols->rb_node; |
396 | struct rb_node *parent = NULL; | 385 | struct rb_node *parent = NULL; |
@@ -574,7 +563,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) | |||
574 | 563 | ||
575 | int kallsyms__parse(const char *filename, void *arg, | 564 | int kallsyms__parse(const char *filename, void *arg, |
576 | int (*process_symbol)(void *arg, const char *name, | 565 | int (*process_symbol)(void *arg, const char *name, |
577 | char type, u64 start, u64 end)) | 566 | char type, u64 start)) |
578 | { | 567 | { |
579 | char *line = NULL; | 568 | char *line = NULL; |
580 | size_t n; | 569 | size_t n; |
@@ -614,13 +603,8 @@ int kallsyms__parse(const char *filename, void *arg, | |||
614 | break; | 603 | break; |
615 | } | 604 | } |
616 | 605 | ||
617 | /* | ||
618 | * module symbols are not sorted so we add all | ||
619 | * symbols with zero length and rely on | ||
620 | * symbols__fixup_end() to fix it up. | ||
621 | */ | ||
622 | err = process_symbol(arg, symbol_name, | 606 | err = process_symbol(arg, symbol_name, |
623 | symbol_type, start, start); | 607 | symbol_type, start); |
624 | if (err) | 608 | if (err) |
625 | break; | 609 | break; |
626 | } | 610 | } |
@@ -647,7 +631,7 @@ static u8 kallsyms2elf_type(char type) | |||
647 | } | 631 | } |
648 | 632 | ||
649 | static int map__process_kallsym_symbol(void *arg, const char *name, | 633 | static int map__process_kallsym_symbol(void *arg, const char *name, |
650 | char type, u64 start, u64 end) | 634 | char type, u64 start) |
651 | { | 635 | { |
652 | struct symbol *sym; | 636 | struct symbol *sym; |
653 | struct process_kallsyms_args *a = arg; | 637 | struct process_kallsyms_args *a = arg; |
@@ -656,8 +640,12 @@ static int map__process_kallsym_symbol(void *arg, const char *name, | |||
656 | if (!symbol_type__is_a(type, a->map->type)) | 640 | if (!symbol_type__is_a(type, a->map->type)) |
657 | return 0; | 641 | return 0; |
658 | 642 | ||
659 | sym = symbol__new(start, end - start + 1, | 643 | /* |
660 | kallsyms2elf_type(type), name); | 644 | * module symbols are not sorted so we add all |
645 | * symbols, setting length to 0, and rely on | ||
646 | * symbols__fixup_end() to fix it up. | ||
647 | */ | ||
648 | sym = symbol__new(start, 0, kallsyms2elf_type(type), name); | ||
661 | if (sym == NULL) | 649 | if (sym == NULL) |
662 | return -ENOMEM; | 650 | return -ENOMEM; |
663 | /* | 651 | /* |
@@ -904,556 +892,7 @@ out_failure: | |||
904 | return -1; | 892 | return -1; |
905 | } | 893 | } |
906 | 894 | ||
907 | /** | 895 | bool dso__build_id_equal(const struct dso *dso, u8 *build_id) |
908 | * elf_symtab__for_each_symbol - iterate thru all the symbols | ||
909 | * | ||
910 | * @syms: struct elf_symtab instance to iterate | ||
911 | * @idx: uint32_t idx | ||
912 | * @sym: GElf_Sym iterator | ||
913 | */ | ||
914 | #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ | ||
915 | for (idx = 0, gelf_getsym(syms, idx, &sym);\ | ||
916 | idx < nr_syms; \ | ||
917 | idx++, gelf_getsym(syms, idx, &sym)) | ||
918 | |||
919 | static inline uint8_t elf_sym__type(const GElf_Sym *sym) | ||
920 | { | ||
921 | return GELF_ST_TYPE(sym->st_info); | ||
922 | } | ||
923 | |||
924 | static inline int elf_sym__is_function(const GElf_Sym *sym) | ||
925 | { | ||
926 | return elf_sym__type(sym) == STT_FUNC && | ||
927 | sym->st_name != 0 && | ||
928 | sym->st_shndx != SHN_UNDEF; | ||
929 | } | ||
930 | |||
931 | static inline bool elf_sym__is_object(const GElf_Sym *sym) | ||
932 | { | ||
933 | return elf_sym__type(sym) == STT_OBJECT && | ||
934 | sym->st_name != 0 && | ||
935 | sym->st_shndx != SHN_UNDEF; | ||
936 | } | ||
937 | |||
938 | static inline int elf_sym__is_label(const GElf_Sym *sym) | ||
939 | { | ||
940 | return elf_sym__type(sym) == STT_NOTYPE && | ||
941 | sym->st_name != 0 && | ||
942 | sym->st_shndx != SHN_UNDEF && | ||
943 | sym->st_shndx != SHN_ABS; | ||
944 | } | ||
945 | |||
946 | static inline const char *elf_sec__name(const GElf_Shdr *shdr, | ||
947 | const Elf_Data *secstrs) | ||
948 | { | ||
949 | return secstrs->d_buf + shdr->sh_name; | ||
950 | } | ||
951 | |||
952 | static inline int elf_sec__is_text(const GElf_Shdr *shdr, | ||
953 | const Elf_Data *secstrs) | ||
954 | { | ||
955 | return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; | ||
956 | } | ||
957 | |||
958 | static inline bool elf_sec__is_data(const GElf_Shdr *shdr, | ||
959 | const Elf_Data *secstrs) | ||
960 | { | ||
961 | return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; | ||
962 | } | ||
963 | |||
964 | static inline const char *elf_sym__name(const GElf_Sym *sym, | ||
965 | const Elf_Data *symstrs) | ||
966 | { | ||
967 | return symstrs->d_buf + sym->st_name; | ||
968 | } | ||
969 | |||
970 | static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, | ||
971 | GElf_Shdr *shp, const char *name, | ||
972 | size_t *idx) | ||
973 | { | ||
974 | Elf_Scn *sec = NULL; | ||
975 | size_t cnt = 1; | ||
976 | |||
977 | while ((sec = elf_nextscn(elf, sec)) != NULL) { | ||
978 | char *str; | ||
979 | |||
980 | gelf_getshdr(sec, shp); | ||
981 | str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); | ||
982 | if (!strcmp(name, str)) { | ||
983 | if (idx) | ||
984 | *idx = cnt; | ||
985 | break; | ||
986 | } | ||
987 | ++cnt; | ||
988 | } | ||
989 | |||
990 | return sec; | ||
991 | } | ||
992 | |||
993 | #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ | ||
994 | for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ | ||
995 | idx < nr_entries; \ | ||
996 | ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) | ||
997 | |||
998 | #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ | ||
999 | for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ | ||
1000 | idx < nr_entries; \ | ||
1001 | ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) | ||
1002 | |||
1003 | /* | ||
1004 | * We need to check if we have a .dynsym, so that we can handle the | ||
1005 | * .plt, synthesizing its symbols, that aren't on the symtabs (be it | ||
1006 | * .dynsym or .symtab). | ||
1007 | * And always look at the original dso, not at debuginfo packages, that | ||
1008 | * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). | ||
1009 | */ | ||
1010 | static int | ||
1011 | dso__synthesize_plt_symbols(struct dso *dso, char *name, struct map *map, | ||
1012 | symbol_filter_t filter) | ||
1013 | { | ||
1014 | uint32_t nr_rel_entries, idx; | ||
1015 | GElf_Sym sym; | ||
1016 | u64 plt_offset; | ||
1017 | GElf_Shdr shdr_plt; | ||
1018 | struct symbol *f; | ||
1019 | GElf_Shdr shdr_rel_plt, shdr_dynsym; | ||
1020 | Elf_Data *reldata, *syms, *symstrs; | ||
1021 | Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; | ||
1022 | size_t dynsym_idx; | ||
1023 | GElf_Ehdr ehdr; | ||
1024 | char sympltname[1024]; | ||
1025 | Elf *elf; | ||
1026 | int nr = 0, symidx, fd, err = 0; | ||
1027 | |||
1028 | fd = open(name, O_RDONLY); | ||
1029 | if (fd < 0) | ||
1030 | goto out; | ||
1031 | |||
1032 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
1033 | if (elf == NULL) | ||
1034 | goto out_close; | ||
1035 | |||
1036 | if (gelf_getehdr(elf, &ehdr) == NULL) | ||
1037 | goto out_elf_end; | ||
1038 | |||
1039 | scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym, | ||
1040 | ".dynsym", &dynsym_idx); | ||
1041 | if (scn_dynsym == NULL) | ||
1042 | goto out_elf_end; | ||
1043 | |||
1044 | scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, | ||
1045 | ".rela.plt", NULL); | ||
1046 | if (scn_plt_rel == NULL) { | ||
1047 | scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, | ||
1048 | ".rel.plt", NULL); | ||
1049 | if (scn_plt_rel == NULL) | ||
1050 | goto out_elf_end; | ||
1051 | } | ||
1052 | |||
1053 | err = -1; | ||
1054 | |||
1055 | if (shdr_rel_plt.sh_link != dynsym_idx) | ||
1056 | goto out_elf_end; | ||
1057 | |||
1058 | if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) | ||
1059 | goto out_elf_end; | ||
1060 | |||
1061 | /* | ||
1062 | * Fetch the relocation section to find the idxes to the GOT | ||
1063 | * and the symbols in the .dynsym they refer to. | ||
1064 | */ | ||
1065 | reldata = elf_getdata(scn_plt_rel, NULL); | ||
1066 | if (reldata == NULL) | ||
1067 | goto out_elf_end; | ||
1068 | |||
1069 | syms = elf_getdata(scn_dynsym, NULL); | ||
1070 | if (syms == NULL) | ||
1071 | goto out_elf_end; | ||
1072 | |||
1073 | scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); | ||
1074 | if (scn_symstrs == NULL) | ||
1075 | goto out_elf_end; | ||
1076 | |||
1077 | symstrs = elf_getdata(scn_symstrs, NULL); | ||
1078 | if (symstrs == NULL) | ||
1079 | goto out_elf_end; | ||
1080 | |||
1081 | nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; | ||
1082 | plt_offset = shdr_plt.sh_offset; | ||
1083 | |||
1084 | if (shdr_rel_plt.sh_type == SHT_RELA) { | ||
1085 | GElf_Rela pos_mem, *pos; | ||
1086 | |||
1087 | elf_section__for_each_rela(reldata, pos, pos_mem, idx, | ||
1088 | nr_rel_entries) { | ||
1089 | symidx = GELF_R_SYM(pos->r_info); | ||
1090 | plt_offset += shdr_plt.sh_entsize; | ||
1091 | gelf_getsym(syms, symidx, &sym); | ||
1092 | snprintf(sympltname, sizeof(sympltname), | ||
1093 | "%s@plt", elf_sym__name(&sym, symstrs)); | ||
1094 | |||
1095 | f = symbol__new(plt_offset, shdr_plt.sh_entsize, | ||
1096 | STB_GLOBAL, sympltname); | ||
1097 | if (!f) | ||
1098 | goto out_elf_end; | ||
1099 | |||
1100 | if (filter && filter(map, f)) | ||
1101 | symbol__delete(f); | ||
1102 | else { | ||
1103 | symbols__insert(&dso->symbols[map->type], f); | ||
1104 | ++nr; | ||
1105 | } | ||
1106 | } | ||
1107 | } else if (shdr_rel_plt.sh_type == SHT_REL) { | ||
1108 | GElf_Rel pos_mem, *pos; | ||
1109 | elf_section__for_each_rel(reldata, pos, pos_mem, idx, | ||
1110 | nr_rel_entries) { | ||
1111 | symidx = GELF_R_SYM(pos->r_info); | ||
1112 | plt_offset += shdr_plt.sh_entsize; | ||
1113 | gelf_getsym(syms, symidx, &sym); | ||
1114 | snprintf(sympltname, sizeof(sympltname), | ||
1115 | "%s@plt", elf_sym__name(&sym, symstrs)); | ||
1116 | |||
1117 | f = symbol__new(plt_offset, shdr_plt.sh_entsize, | ||
1118 | STB_GLOBAL, sympltname); | ||
1119 | if (!f) | ||
1120 | goto out_elf_end; | ||
1121 | |||
1122 | if (filter && filter(map, f)) | ||
1123 | symbol__delete(f); | ||
1124 | else { | ||
1125 | symbols__insert(&dso->symbols[map->type], f); | ||
1126 | ++nr; | ||
1127 | } | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | err = 0; | ||
1132 | out_elf_end: | ||
1133 | elf_end(elf); | ||
1134 | out_close: | ||
1135 | close(fd); | ||
1136 | |||
1137 | if (err == 0) | ||
1138 | return nr; | ||
1139 | out: | ||
1140 | pr_debug("%s: problems reading %s PLT info.\n", | ||
1141 | __func__, dso->long_name); | ||
1142 | return 0; | ||
1143 | } | ||
1144 | |||
1145 | static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) | ||
1146 | { | ||
1147 | switch (type) { | ||
1148 | case MAP__FUNCTION: | ||
1149 | return elf_sym__is_function(sym); | ||
1150 | case MAP__VARIABLE: | ||
1151 | return elf_sym__is_object(sym); | ||
1152 | default: | ||
1153 | return false; | ||
1154 | } | ||
1155 | } | ||
1156 | |||
1157 | static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, | ||
1158 | enum map_type type) | ||
1159 | { | ||
1160 | switch (type) { | ||
1161 | case MAP__FUNCTION: | ||
1162 | return elf_sec__is_text(shdr, secstrs); | ||
1163 | case MAP__VARIABLE: | ||
1164 | return elf_sec__is_data(shdr, secstrs); | ||
1165 | default: | ||
1166 | return false; | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) | ||
1171 | { | ||
1172 | Elf_Scn *sec = NULL; | ||
1173 | GElf_Shdr shdr; | ||
1174 | size_t cnt = 1; | ||
1175 | |||
1176 | while ((sec = elf_nextscn(elf, sec)) != NULL) { | ||
1177 | gelf_getshdr(sec, &shdr); | ||
1178 | |||
1179 | if ((addr >= shdr.sh_addr) && | ||
1180 | (addr < (shdr.sh_addr + shdr.sh_size))) | ||
1181 | return cnt; | ||
1182 | |||
1183 | ++cnt; | ||
1184 | } | ||
1185 | |||
1186 | return -1; | ||
1187 | } | ||
1188 | |||
1189 | static int dso__swap_init(struct dso *dso, unsigned char eidata) | ||
1190 | { | ||
1191 | static unsigned int const endian = 1; | ||
1192 | |||
1193 | dso->needs_swap = DSO_SWAP__NO; | ||
1194 | |||
1195 | switch (eidata) { | ||
1196 | case ELFDATA2LSB: | ||
1197 | /* We are big endian, DSO is little endian. */ | ||
1198 | if (*(unsigned char const *)&endian != 1) | ||
1199 | dso->needs_swap = DSO_SWAP__YES; | ||
1200 | break; | ||
1201 | |||
1202 | case ELFDATA2MSB: | ||
1203 | /* We are little endian, DSO is big endian. */ | ||
1204 | if (*(unsigned char const *)&endian != 0) | ||
1205 | dso->needs_swap = DSO_SWAP__YES; | ||
1206 | break; | ||
1207 | |||
1208 | default: | ||
1209 | pr_err("unrecognized DSO data encoding %d\n", eidata); | ||
1210 | return -EINVAL; | ||
1211 | } | ||
1212 | |||
1213 | return 0; | ||
1214 | } | ||
1215 | |||
1216 | static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | ||
1217 | int fd, symbol_filter_t filter, int kmodule, | ||
1218 | int want_symtab) | ||
1219 | { | ||
1220 | struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; | ||
1221 | struct map *curr_map = map; | ||
1222 | struct dso *curr_dso = dso; | ||
1223 | Elf_Data *symstrs, *secstrs; | ||
1224 | uint32_t nr_syms; | ||
1225 | int err = -1; | ||
1226 | uint32_t idx; | ||
1227 | GElf_Ehdr ehdr; | ||
1228 | GElf_Shdr shdr, opdshdr; | ||
1229 | Elf_Data *syms, *opddata = NULL; | ||
1230 | GElf_Sym sym; | ||
1231 | Elf_Scn *sec, *sec_strndx, *opdsec; | ||
1232 | Elf *elf; | ||
1233 | int nr = 0; | ||
1234 | size_t opdidx = 0; | ||
1235 | |||
1236 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
1237 | if (elf == NULL) { | ||
1238 | pr_debug("%s: cannot read %s ELF file.\n", __func__, name); | ||
1239 | goto out_close; | ||
1240 | } | ||
1241 | |||
1242 | if (gelf_getehdr(elf, &ehdr) == NULL) { | ||
1243 | pr_debug("%s: cannot get elf header.\n", __func__); | ||
1244 | goto out_elf_end; | ||
1245 | } | ||
1246 | |||
1247 | if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) | ||
1248 | goto out_elf_end; | ||
1249 | |||
1250 | /* Always reject images with a mismatched build-id: */ | ||
1251 | if (dso->has_build_id) { | ||
1252 | u8 build_id[BUILD_ID_SIZE]; | ||
1253 | |||
1254 | if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) | ||
1255 | goto out_elf_end; | ||
1256 | |||
1257 | if (!dso__build_id_equal(dso, build_id)) | ||
1258 | goto out_elf_end; | ||
1259 | } | ||
1260 | |||
1261 | sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); | ||
1262 | if (sec == NULL) { | ||
1263 | if (want_symtab) | ||
1264 | goto out_elf_end; | ||
1265 | |||
1266 | sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL); | ||
1267 | if (sec == NULL) | ||
1268 | goto out_elf_end; | ||
1269 | } | ||
1270 | |||
1271 | opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx); | ||
1272 | if (opdshdr.sh_type != SHT_PROGBITS) | ||
1273 | opdsec = NULL; | ||
1274 | if (opdsec) | ||
1275 | opddata = elf_rawdata(opdsec, NULL); | ||
1276 | |||
1277 | syms = elf_getdata(sec, NULL); | ||
1278 | if (syms == NULL) | ||
1279 | goto out_elf_end; | ||
1280 | |||
1281 | sec = elf_getscn(elf, shdr.sh_link); | ||
1282 | if (sec == NULL) | ||
1283 | goto out_elf_end; | ||
1284 | |||
1285 | symstrs = elf_getdata(sec, NULL); | ||
1286 | if (symstrs == NULL) | ||
1287 | goto out_elf_end; | ||
1288 | |||
1289 | sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); | ||
1290 | if (sec_strndx == NULL) | ||
1291 | goto out_elf_end; | ||
1292 | |||
1293 | secstrs = elf_getdata(sec_strndx, NULL); | ||
1294 | if (secstrs == NULL) | ||
1295 | goto out_elf_end; | ||
1296 | |||
1297 | nr_syms = shdr.sh_size / shdr.sh_entsize; | ||
1298 | |||
1299 | memset(&sym, 0, sizeof(sym)); | ||
1300 | if (dso->kernel == DSO_TYPE_USER) { | ||
1301 | dso->adjust_symbols = (ehdr.e_type == ET_EXEC || | ||
1302 | elf_section_by_name(elf, &ehdr, &shdr, | ||
1303 | ".gnu.prelink_undo", | ||
1304 | NULL) != NULL); | ||
1305 | } else { | ||
1306 | dso->adjust_symbols = 0; | ||
1307 | } | ||
1308 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { | ||
1309 | struct symbol *f; | ||
1310 | const char *elf_name = elf_sym__name(&sym, symstrs); | ||
1311 | char *demangled = NULL; | ||
1312 | int is_label = elf_sym__is_label(&sym); | ||
1313 | const char *section_name; | ||
1314 | |||
1315 | if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && | ||
1316 | strcmp(elf_name, kmap->ref_reloc_sym->name) == 0) | ||
1317 | kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; | ||
1318 | |||
1319 | if (!is_label && !elf_sym__is_a(&sym, map->type)) | ||
1320 | continue; | ||
1321 | |||
1322 | /* Reject ARM ELF "mapping symbols": these aren't unique and | ||
1323 | * don't identify functions, so will confuse the profile | ||
1324 | * output: */ | ||
1325 | if (ehdr.e_machine == EM_ARM) { | ||
1326 | if (!strcmp(elf_name, "$a") || | ||
1327 | !strcmp(elf_name, "$d") || | ||
1328 | !strcmp(elf_name, "$t")) | ||
1329 | continue; | ||
1330 | } | ||
1331 | |||
1332 | if (opdsec && sym.st_shndx == opdidx) { | ||
1333 | u32 offset = sym.st_value - opdshdr.sh_addr; | ||
1334 | u64 *opd = opddata->d_buf + offset; | ||
1335 | sym.st_value = DSO__SWAP(dso, u64, *opd); | ||
1336 | sym.st_shndx = elf_addr_to_index(elf, sym.st_value); | ||
1337 | } | ||
1338 | |||
1339 | sec = elf_getscn(elf, sym.st_shndx); | ||
1340 | if (!sec) | ||
1341 | goto out_elf_end; | ||
1342 | |||
1343 | gelf_getshdr(sec, &shdr); | ||
1344 | |||
1345 | if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) | ||
1346 | continue; | ||
1347 | |||
1348 | section_name = elf_sec__name(&shdr, secstrs); | ||
1349 | |||
1350 | /* On ARM, symbols for thumb functions have 1 added to | ||
1351 | * the symbol address as a flag - remove it */ | ||
1352 | if ((ehdr.e_machine == EM_ARM) && | ||
1353 | (map->type == MAP__FUNCTION) && | ||
1354 | (sym.st_value & 1)) | ||
1355 | --sym.st_value; | ||
1356 | |||
1357 | if (dso->kernel != DSO_TYPE_USER || kmodule) { | ||
1358 | char dso_name[PATH_MAX]; | ||
1359 | |||
1360 | if (strcmp(section_name, | ||
1361 | (curr_dso->short_name + | ||
1362 | dso->short_name_len)) == 0) | ||
1363 | goto new_symbol; | ||
1364 | |||
1365 | if (strcmp(section_name, ".text") == 0) { | ||
1366 | curr_map = map; | ||
1367 | curr_dso = dso; | ||
1368 | goto new_symbol; | ||
1369 | } | ||
1370 | |||
1371 | snprintf(dso_name, sizeof(dso_name), | ||
1372 | "%s%s", dso->short_name, section_name); | ||
1373 | |||
1374 | curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); | ||
1375 | if (curr_map == NULL) { | ||
1376 | u64 start = sym.st_value; | ||
1377 | |||
1378 | if (kmodule) | ||
1379 | start += map->start + shdr.sh_offset; | ||
1380 | |||
1381 | curr_dso = dso__new(dso_name); | ||
1382 | if (curr_dso == NULL) | ||
1383 | goto out_elf_end; | ||
1384 | curr_dso->kernel = dso->kernel; | ||
1385 | curr_dso->long_name = dso->long_name; | ||
1386 | curr_dso->long_name_len = dso->long_name_len; | ||
1387 | curr_map = map__new2(start, curr_dso, | ||
1388 | map->type); | ||
1389 | if (curr_map == NULL) { | ||
1390 | dso__delete(curr_dso); | ||
1391 | goto out_elf_end; | ||
1392 | } | ||
1393 | curr_map->map_ip = identity__map_ip; | ||
1394 | curr_map->unmap_ip = identity__map_ip; | ||
1395 | curr_dso->symtab_type = dso->symtab_type; | ||
1396 | map_groups__insert(kmap->kmaps, curr_map); | ||
1397 | dsos__add(&dso->node, curr_dso); | ||
1398 | dso__set_loaded(curr_dso, map->type); | ||
1399 | } else | ||
1400 | curr_dso = curr_map->dso; | ||
1401 | |||
1402 | goto new_symbol; | ||
1403 | } | ||
1404 | |||
1405 | if (curr_dso->adjust_symbols) { | ||
1406 | pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " | ||
1407 | "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, | ||
1408 | (u64)sym.st_value, (u64)shdr.sh_addr, | ||
1409 | (u64)shdr.sh_offset); | ||
1410 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; | ||
1411 | } | ||
1412 | /* | ||
1413 | * We need to figure out if the object was created from C++ sources | ||
1414 | * DWARF DW_compile_unit has this, but we don't always have access | ||
1415 | * to it... | ||
1416 | */ | ||
1417 | demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); | ||
1418 | if (demangled != NULL) | ||
1419 | elf_name = demangled; | ||
1420 | new_symbol: | ||
1421 | f = symbol__new(sym.st_value, sym.st_size, | ||
1422 | GELF_ST_BIND(sym.st_info), elf_name); | ||
1423 | free(demangled); | ||
1424 | if (!f) | ||
1425 | goto out_elf_end; | ||
1426 | |||
1427 | if (filter && filter(curr_map, f)) | ||
1428 | symbol__delete(f); | ||
1429 | else { | ||
1430 | symbols__insert(&curr_dso->symbols[curr_map->type], f); | ||
1431 | nr++; | ||
1432 | } | ||
1433 | } | ||
1434 | |||
1435 | /* | ||
1436 | * For misannotated, zeroed, ASM function sizes. | ||
1437 | */ | ||
1438 | if (nr > 0) { | ||
1439 | symbols__fixup_duplicate(&dso->symbols[map->type]); | ||
1440 | symbols__fixup_end(&dso->symbols[map->type]); | ||
1441 | if (kmap) { | ||
1442 | /* | ||
1443 | * We need to fixup this here too because we create new | ||
1444 | * maps here, for things like vsyscall sections. | ||
1445 | */ | ||
1446 | __map_groups__fixup_end(kmap->kmaps, map->type); | ||
1447 | } | ||
1448 | } | ||
1449 | err = nr; | ||
1450 | out_elf_end: | ||
1451 | elf_end(elf); | ||
1452 | out_close: | ||
1453 | return err; | ||
1454 | } | ||
1455 | |||
1456 | static bool dso__build_id_equal(const struct dso *dso, u8 *build_id) | ||
1457 | { | 896 | { |
1458 | return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; | 897 | return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; |
1459 | } | 898 | } |
@@ -1480,216 +919,11 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits) | |||
1480 | return have_build_id; | 919 | return have_build_id; |
1481 | } | 920 | } |
1482 | 921 | ||
1483 | /* | ||
1484 | * Align offset to 4 bytes as needed for note name and descriptor data. | ||
1485 | */ | ||
1486 | #define NOTE_ALIGN(n) (((n) + 3) & -4U) | ||
1487 | |||
1488 | static int elf_read_build_id(Elf *elf, void *bf, size_t size) | ||
1489 | { | ||
1490 | int err = -1; | ||
1491 | GElf_Ehdr ehdr; | ||
1492 | GElf_Shdr shdr; | ||
1493 | Elf_Data *data; | ||
1494 | Elf_Scn *sec; | ||
1495 | Elf_Kind ek; | ||
1496 | void *ptr; | ||
1497 | |||
1498 | if (size < BUILD_ID_SIZE) | ||
1499 | goto out; | ||
1500 | |||
1501 | ek = elf_kind(elf); | ||
1502 | if (ek != ELF_K_ELF) | ||
1503 | goto out; | ||
1504 | |||
1505 | if (gelf_getehdr(elf, &ehdr) == NULL) { | ||
1506 | pr_err("%s: cannot get elf header.\n", __func__); | ||
1507 | goto out; | ||
1508 | } | ||
1509 | |||
1510 | /* | ||
1511 | * Check following sections for notes: | ||
1512 | * '.note.gnu.build-id' | ||
1513 | * '.notes' | ||
1514 | * '.note' (VDSO specific) | ||
1515 | */ | ||
1516 | do { | ||
1517 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
1518 | ".note.gnu.build-id", NULL); | ||
1519 | if (sec) | ||
1520 | break; | ||
1521 | |||
1522 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
1523 | ".notes", NULL); | ||
1524 | if (sec) | ||
1525 | break; | ||
1526 | |||
1527 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
1528 | ".note", NULL); | ||
1529 | if (sec) | ||
1530 | break; | ||
1531 | |||
1532 | return err; | ||
1533 | |||
1534 | } while (0); | ||
1535 | |||
1536 | data = elf_getdata(sec, NULL); | ||
1537 | if (data == NULL) | ||
1538 | goto out; | ||
1539 | |||
1540 | ptr = data->d_buf; | ||
1541 | while (ptr < (data->d_buf + data->d_size)) { | ||
1542 | GElf_Nhdr *nhdr = ptr; | ||
1543 | size_t namesz = NOTE_ALIGN(nhdr->n_namesz), | ||
1544 | descsz = NOTE_ALIGN(nhdr->n_descsz); | ||
1545 | const char *name; | ||
1546 | |||
1547 | ptr += sizeof(*nhdr); | ||
1548 | name = ptr; | ||
1549 | ptr += namesz; | ||
1550 | if (nhdr->n_type == NT_GNU_BUILD_ID && | ||
1551 | nhdr->n_namesz == sizeof("GNU")) { | ||
1552 | if (memcmp(name, "GNU", sizeof("GNU")) == 0) { | ||
1553 | size_t sz = min(size, descsz); | ||
1554 | memcpy(bf, ptr, sz); | ||
1555 | memset(bf + sz, 0, size - sz); | ||
1556 | err = descsz; | ||
1557 | break; | ||
1558 | } | ||
1559 | } | ||
1560 | ptr += descsz; | ||
1561 | } | ||
1562 | |||
1563 | out: | ||
1564 | return err; | ||
1565 | } | ||
1566 | |||
1567 | int filename__read_build_id(const char *filename, void *bf, size_t size) | ||
1568 | { | ||
1569 | int fd, err = -1; | ||
1570 | Elf *elf; | ||
1571 | |||
1572 | if (size < BUILD_ID_SIZE) | ||
1573 | goto out; | ||
1574 | |||
1575 | fd = open(filename, O_RDONLY); | ||
1576 | if (fd < 0) | ||
1577 | goto out; | ||
1578 | |||
1579 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
1580 | if (elf == NULL) { | ||
1581 | pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); | ||
1582 | goto out_close; | ||
1583 | } | ||
1584 | |||
1585 | err = elf_read_build_id(elf, bf, size); | ||
1586 | |||
1587 | elf_end(elf); | ||
1588 | out_close: | ||
1589 | close(fd); | ||
1590 | out: | ||
1591 | return err; | ||
1592 | } | ||
1593 | |||
1594 | int sysfs__read_build_id(const char *filename, void *build_id, size_t size) | ||
1595 | { | ||
1596 | int fd, err = -1; | ||
1597 | |||
1598 | if (size < BUILD_ID_SIZE) | ||
1599 | goto out; | ||
1600 | |||
1601 | fd = open(filename, O_RDONLY); | ||
1602 | if (fd < 0) | ||
1603 | goto out; | ||
1604 | |||
1605 | while (1) { | ||
1606 | char bf[BUFSIZ]; | ||
1607 | GElf_Nhdr nhdr; | ||
1608 | size_t namesz, descsz; | ||
1609 | |||
1610 | if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) | ||
1611 | break; | ||
1612 | |||
1613 | namesz = NOTE_ALIGN(nhdr.n_namesz); | ||
1614 | descsz = NOTE_ALIGN(nhdr.n_descsz); | ||
1615 | if (nhdr.n_type == NT_GNU_BUILD_ID && | ||
1616 | nhdr.n_namesz == sizeof("GNU")) { | ||
1617 | if (read(fd, bf, namesz) != (ssize_t)namesz) | ||
1618 | break; | ||
1619 | if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { | ||
1620 | size_t sz = min(descsz, size); | ||
1621 | if (read(fd, build_id, sz) == (ssize_t)sz) { | ||
1622 | memset(build_id + sz, 0, size - sz); | ||
1623 | err = 0; | ||
1624 | break; | ||
1625 | } | ||
1626 | } else if (read(fd, bf, descsz) != (ssize_t)descsz) | ||
1627 | break; | ||
1628 | } else { | ||
1629 | int n = namesz + descsz; | ||
1630 | if (read(fd, bf, n) != n) | ||
1631 | break; | ||
1632 | } | ||
1633 | } | ||
1634 | close(fd); | ||
1635 | out: | ||
1636 | return err; | ||
1637 | } | ||
1638 | |||
1639 | static int filename__read_debuglink(const char *filename, | ||
1640 | char *debuglink, size_t size) | ||
1641 | { | ||
1642 | int fd, err = -1; | ||
1643 | Elf *elf; | ||
1644 | GElf_Ehdr ehdr; | ||
1645 | GElf_Shdr shdr; | ||
1646 | Elf_Data *data; | ||
1647 | Elf_Scn *sec; | ||
1648 | Elf_Kind ek; | ||
1649 | |||
1650 | fd = open(filename, O_RDONLY); | ||
1651 | if (fd < 0) | ||
1652 | goto out; | ||
1653 | |||
1654 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
1655 | if (elf == NULL) { | ||
1656 | pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); | ||
1657 | goto out_close; | ||
1658 | } | ||
1659 | |||
1660 | ek = elf_kind(elf); | ||
1661 | if (ek != ELF_K_ELF) | ||
1662 | goto out_close; | ||
1663 | |||
1664 | if (gelf_getehdr(elf, &ehdr) == NULL) { | ||
1665 | pr_err("%s: cannot get elf header.\n", __func__); | ||
1666 | goto out_close; | ||
1667 | } | ||
1668 | |||
1669 | sec = elf_section_by_name(elf, &ehdr, &shdr, | ||
1670 | ".gnu_debuglink", NULL); | ||
1671 | if (sec == NULL) | ||
1672 | goto out_close; | ||
1673 | |||
1674 | data = elf_getdata(sec, NULL); | ||
1675 | if (data == NULL) | ||
1676 | goto out_close; | ||
1677 | |||
1678 | /* the start of this section is a zero-terminated string */ | ||
1679 | strncpy(debuglink, data->d_buf, size); | ||
1680 | |||
1681 | elf_end(elf); | ||
1682 | |||
1683 | out_close: | ||
1684 | close(fd); | ||
1685 | out: | ||
1686 | return err; | ||
1687 | } | ||
1688 | |||
1689 | char dso__symtab_origin(const struct dso *dso) | 922 | char dso__symtab_origin(const struct dso *dso) |
1690 | { | 923 | { |
1691 | static const char origin[] = { | 924 | static const char origin[] = { |
1692 | [DSO_BINARY_TYPE__KALLSYMS] = 'k', | 925 | [DSO_BINARY_TYPE__KALLSYMS] = 'k', |
926 | [DSO_BINARY_TYPE__VMLINUX] = 'v', | ||
1693 | [DSO_BINARY_TYPE__JAVA_JIT] = 'j', | 927 | [DSO_BINARY_TYPE__JAVA_JIT] = 'j', |
1694 | [DSO_BINARY_TYPE__DEBUGLINK] = 'l', | 928 | [DSO_BINARY_TYPE__DEBUGLINK] = 'l', |
1695 | [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', | 929 | [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', |
@@ -1700,6 +934,7 @@ char dso__symtab_origin(const struct dso *dso) | |||
1700 | [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', | 934 | [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', |
1701 | [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', | 935 | [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', |
1702 | [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', | 936 | [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', |
937 | [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', | ||
1703 | }; | 938 | }; |
1704 | 939 | ||
1705 | if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) | 940 | if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) |
@@ -1775,7 +1010,9 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, | |||
1775 | 1010 | ||
1776 | default: | 1011 | default: |
1777 | case DSO_BINARY_TYPE__KALLSYMS: | 1012 | case DSO_BINARY_TYPE__KALLSYMS: |
1013 | case DSO_BINARY_TYPE__VMLINUX: | ||
1778 | case DSO_BINARY_TYPE__GUEST_KALLSYMS: | 1014 | case DSO_BINARY_TYPE__GUEST_KALLSYMS: |
1015 | case DSO_BINARY_TYPE__GUEST_VMLINUX: | ||
1779 | case DSO_BINARY_TYPE__JAVA_JIT: | 1016 | case DSO_BINARY_TYPE__JAVA_JIT: |
1780 | case DSO_BINARY_TYPE__NOT_FOUND: | 1017 | case DSO_BINARY_TYPE__NOT_FOUND: |
1781 | ret = -1; | 1018 | ret = -1; |
@@ -1789,11 +1026,12 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) | |||
1789 | { | 1026 | { |
1790 | char *name; | 1027 | char *name; |
1791 | int ret = -1; | 1028 | int ret = -1; |
1792 | int fd; | ||
1793 | u_int i; | 1029 | u_int i; |
1794 | struct machine *machine; | 1030 | struct machine *machine; |
1795 | char *root_dir = (char *) ""; | 1031 | char *root_dir = (char *) ""; |
1796 | int want_symtab; | 1032 | int ss_pos = 0; |
1033 | struct symsrc ss_[2]; | ||
1034 | struct symsrc *syms_ss = NULL, *runtime_ss = NULL; | ||
1797 | 1035 | ||
1798 | dso__set_loaded(dso, map->type); | 1036 | dso__set_loaded(dso, map->type); |
1799 | 1037 | ||
@@ -1835,54 +1073,69 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) | |||
1835 | root_dir = machine->root_dir; | 1073 | root_dir = machine->root_dir; |
1836 | 1074 | ||
1837 | /* Iterate over candidate debug images. | 1075 | /* Iterate over candidate debug images. |
1838 | * On the first pass, only load images if they have a full symtab. | 1076 | * Keep track of "interesting" ones (those which have a symtab, dynsym, |
1839 | * Failing that, do a second pass where we accept .dynsym also | 1077 | * and/or opd section) for processing. |
1840 | */ | 1078 | */ |
1841 | want_symtab = 1; | ||
1842 | restart: | ||
1843 | for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { | 1079 | for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { |
1080 | struct symsrc *ss = &ss_[ss_pos]; | ||
1081 | bool next_slot = false; | ||
1844 | 1082 | ||
1845 | dso->symtab_type = binary_type_symtab[i]; | 1083 | enum dso_binary_type symtab_type = binary_type_symtab[i]; |
1846 | 1084 | ||
1847 | if (dso__binary_type_file(dso, dso->symtab_type, | 1085 | if (dso__binary_type_file(dso, symtab_type, |
1848 | root_dir, name, PATH_MAX)) | 1086 | root_dir, name, PATH_MAX)) |
1849 | continue; | 1087 | continue; |
1850 | 1088 | ||
1851 | /* Name is now the name of the next image to try */ | 1089 | /* Name is now the name of the next image to try */ |
1852 | fd = open(name, O_RDONLY); | 1090 | if (symsrc__init(ss, dso, name, symtab_type) < 0) |
1853 | if (fd < 0) | ||
1854 | continue; | 1091 | continue; |
1855 | 1092 | ||
1856 | ret = dso__load_sym(dso, map, name, fd, filter, 0, | 1093 | if (!syms_ss && symsrc__has_symtab(ss)) { |
1857 | want_symtab); | 1094 | syms_ss = ss; |
1858 | close(fd); | 1095 | next_slot = true; |
1096 | } | ||
1859 | 1097 | ||
1860 | /* | 1098 | if (!runtime_ss && symsrc__possibly_runtime(ss)) { |
1861 | * Some people seem to have debuginfo files _WITHOUT_ debug | 1099 | runtime_ss = ss; |
1862 | * info!?!? | 1100 | next_slot = true; |
1863 | */ | 1101 | } |
1864 | if (!ret) | ||
1865 | continue; | ||
1866 | 1102 | ||
1867 | if (ret > 0) { | 1103 | if (next_slot) { |
1868 | int nr_plt; | 1104 | ss_pos++; |
1869 | 1105 | ||
1870 | nr_plt = dso__synthesize_plt_symbols(dso, name, map, filter); | 1106 | if (syms_ss && runtime_ss) |
1871 | if (nr_plt > 0) | 1107 | break; |
1872 | ret += nr_plt; | ||
1873 | break; | ||
1874 | } | 1108 | } |
1109 | |||
1875 | } | 1110 | } |
1876 | 1111 | ||
1877 | /* | 1112 | if (!runtime_ss && !syms_ss) |
1878 | * If we wanted a full symtab but no image had one, | 1113 | goto out_free; |
1879 | * relax our requirements and repeat the search. | 1114 | |
1880 | */ | 1115 | if (runtime_ss && !syms_ss) { |
1881 | if (ret <= 0 && want_symtab) { | 1116 | syms_ss = runtime_ss; |
1882 | want_symtab = 0; | 1117 | } |
1883 | goto restart; | 1118 | |
1119 | /* We'll have to hope for the best */ | ||
1120 | if (!runtime_ss && syms_ss) | ||
1121 | runtime_ss = syms_ss; | ||
1122 | |||
1123 | if (syms_ss) | ||
1124 | ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, 0); | ||
1125 | else | ||
1126 | ret = -1; | ||
1127 | |||
1128 | if (ret > 0) { | ||
1129 | int nr_plt; | ||
1130 | |||
1131 | nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter); | ||
1132 | if (nr_plt > 0) | ||
1133 | ret += nr_plt; | ||
1884 | } | 1134 | } |
1885 | 1135 | ||
1136 | for (; ss_pos > 0; ss_pos--) | ||
1137 | symsrc__destroy(&ss_[ss_pos - 1]); | ||
1138 | out_free: | ||
1886 | free(name); | 1139 | free(name); |
1887 | if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) | 1140 | if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) |
1888 | return 0; | 1141 | return 0; |
@@ -2030,25 +1283,6 @@ static int machine__set_modules_path(struct machine *machine) | |||
2030 | return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); | 1283 | return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); |
2031 | } | 1284 | } |
2032 | 1285 | ||
2033 | /* | ||
2034 | * Constructor variant for modules (where we know from /proc/modules where | ||
2035 | * they are loaded) and for vmlinux, where only after we load all the | ||
2036 | * symbols we'll know where it starts and ends. | ||
2037 | */ | ||
2038 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) | ||
2039 | { | ||
2040 | struct map *map = calloc(1, (sizeof(*map) + | ||
2041 | (dso->kernel ? sizeof(struct kmap) : 0))); | ||
2042 | if (map != NULL) { | ||
2043 | /* | ||
2044 | * ->end will be filled after we load all the symbols | ||
2045 | */ | ||
2046 | map__init(map, type, start, 0, 0, dso); | ||
2047 | } | ||
2048 | |||
2049 | return map; | ||
2050 | } | ||
2051 | |||
2052 | struct map *machine__new_module(struct machine *machine, u64 start, | 1286 | struct map *machine__new_module(struct machine *machine, u64 start, |
2053 | const char *filename) | 1287 | const char *filename) |
2054 | { | 1288 | { |
@@ -2141,22 +1375,30 @@ out_failure: | |||
2141 | int dso__load_vmlinux(struct dso *dso, struct map *map, | 1375 | int dso__load_vmlinux(struct dso *dso, struct map *map, |
2142 | const char *vmlinux, symbol_filter_t filter) | 1376 | const char *vmlinux, symbol_filter_t filter) |
2143 | { | 1377 | { |
2144 | int err = -1, fd; | 1378 | int err = -1; |
1379 | struct symsrc ss; | ||
2145 | char symfs_vmlinux[PATH_MAX]; | 1380 | char symfs_vmlinux[PATH_MAX]; |
1381 | enum dso_binary_type symtab_type; | ||
2146 | 1382 | ||
2147 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", | 1383 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", |
2148 | symbol_conf.symfs, vmlinux); | 1384 | symbol_conf.symfs, vmlinux); |
2149 | fd = open(symfs_vmlinux, O_RDONLY); | 1385 | |
2150 | if (fd < 0) | 1386 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
1387 | symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; | ||
1388 | else | ||
1389 | symtab_type = DSO_BINARY_TYPE__VMLINUX; | ||
1390 | |||
1391 | if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) | ||
2151 | return -1; | 1392 | return -1; |
2152 | 1393 | ||
2153 | dso__set_long_name(dso, (char *)vmlinux); | 1394 | err = dso__load_sym(dso, map, &ss, &ss, filter, 0); |
2154 | dso__set_loaded(dso, map->type); | 1395 | symsrc__destroy(&ss); |
2155 | err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0); | ||
2156 | close(fd); | ||
2157 | 1396 | ||
2158 | if (err > 0) | 1397 | if (err > 0) { |
1398 | dso__set_long_name(dso, (char *)vmlinux); | ||
1399 | dso__set_loaded(dso, map->type); | ||
2159 | pr_debug("Using %s for symbols\n", symfs_vmlinux); | 1400 | pr_debug("Using %s for symbols\n", symfs_vmlinux); |
1401 | } | ||
2160 | 1402 | ||
2161 | return err; | 1403 | return err; |
2162 | } | 1404 | } |
@@ -2173,10 +1415,8 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, | |||
2173 | filename = dso__build_id_filename(dso, NULL, 0); | 1415 | filename = dso__build_id_filename(dso, NULL, 0); |
2174 | if (filename != NULL) { | 1416 | if (filename != NULL) { |
2175 | err = dso__load_vmlinux(dso, map, filename, filter); | 1417 | err = dso__load_vmlinux(dso, map, filename, filter); |
2176 | if (err > 0) { | 1418 | if (err > 0) |
2177 | dso__set_long_name(dso, filename); | ||
2178 | goto out; | 1419 | goto out; |
2179 | } | ||
2180 | free(filename); | 1420 | free(filename); |
2181 | } | 1421 | } |
2182 | 1422 | ||
@@ -2291,9 +1531,8 @@ do_kallsyms: | |||
2291 | free(kallsyms_allocated_filename); | 1531 | free(kallsyms_allocated_filename); |
2292 | 1532 | ||
2293 | if (err > 0) { | 1533 | if (err > 0) { |
1534 | dso__set_long_name(dso, strdup("[kernel.kallsyms]")); | ||
2294 | out_fixup: | 1535 | out_fixup: |
2295 | if (kallsyms_filename != NULL) | ||
2296 | dso__set_long_name(dso, strdup("[kernel.kallsyms]")); | ||
2297 | map__fixup_start(map); | 1536 | map__fixup_start(map); |
2298 | map__fixup_end(map); | 1537 | map__fixup_end(map); |
2299 | } | 1538 | } |
@@ -2352,12 +1591,12 @@ out_try_fixup: | |||
2352 | return err; | 1591 | return err; |
2353 | } | 1592 | } |
2354 | 1593 | ||
2355 | static void dsos__add(struct list_head *head, struct dso *dso) | 1594 | void dsos__add(struct list_head *head, struct dso *dso) |
2356 | { | 1595 | { |
2357 | list_add_tail(&dso->node, head); | 1596 | list_add_tail(&dso->node, head); |
2358 | } | 1597 | } |
2359 | 1598 | ||
2360 | static struct dso *dsos__find(struct list_head *head, const char *name) | 1599 | struct dso *dsos__find(struct list_head *head, const char *name) |
2361 | { | 1600 | { |
2362 | struct dso *pos; | 1601 | struct dso *pos; |
2363 | 1602 | ||
@@ -2516,7 +1755,7 @@ struct process_args { | |||
2516 | }; | 1755 | }; |
2517 | 1756 | ||
2518 | static int symbol__in_kernel(void *arg, const char *name, | 1757 | static int symbol__in_kernel(void *arg, const char *name, |
2519 | char type __used, u64 start, u64 end __used) | 1758 | char type __maybe_unused, u64 start) |
2520 | { | 1759 | { |
2521 | struct process_args *args = arg; | 1760 | struct process_args *args = arg; |
2522 | 1761 | ||
@@ -2752,9 +1991,10 @@ int symbol__init(void) | |||
2752 | if (symbol_conf.initialized) | 1991 | if (symbol_conf.initialized) |
2753 | return 0; | 1992 | return 0; |
2754 | 1993 | ||
2755 | symbol_conf.priv_size = ALIGN(symbol_conf.priv_size, sizeof(u64)); | 1994 | symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); |
1995 | |||
1996 | symbol__elf_init(); | ||
2756 | 1997 | ||
2757 | elf_version(EV_CURRENT); | ||
2758 | if (symbol_conf.sort_by_name) | 1998 | if (symbol_conf.sort_by_name) |
2759 | symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - | 1999 | symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - |
2760 | sizeof(struct symbol)); | 2000 | sizeof(struct symbol)); |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 1fe733a1e21f..b441b07172b7 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -10,22 +10,31 @@ | |||
10 | #include <linux/rbtree.h> | 10 | #include <linux/rbtree.h> |
11 | #include <stdio.h> | 11 | #include <stdio.h> |
12 | #include <byteswap.h> | 12 | #include <byteswap.h> |
13 | #include <libgen.h> | ||
14 | |||
15 | #ifndef NO_LIBELF_SUPPORT | ||
16 | #include <libelf.h> | ||
17 | #include <gelf.h> | ||
18 | #include <elf.h> | ||
19 | #endif | ||
13 | 20 | ||
14 | #ifdef HAVE_CPLUS_DEMANGLE | 21 | #ifdef HAVE_CPLUS_DEMANGLE |
15 | extern char *cplus_demangle(const char *, int); | 22 | extern char *cplus_demangle(const char *, int); |
16 | 23 | ||
17 | static inline char *bfd_demangle(void __used *v, const char *c, int i) | 24 | static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) |
18 | { | 25 | { |
19 | return cplus_demangle(c, i); | 26 | return cplus_demangle(c, i); |
20 | } | 27 | } |
21 | #else | 28 | #else |
22 | #ifdef NO_DEMANGLE | 29 | #ifdef NO_DEMANGLE |
23 | static inline char *bfd_demangle(void __used *v, const char __used *c, | 30 | static inline char *bfd_demangle(void __maybe_unused *v, |
24 | int __used i) | 31 | const char __maybe_unused *c, |
32 | int __maybe_unused i) | ||
25 | { | 33 | { |
26 | return NULL; | 34 | return NULL; |
27 | } | 35 | } |
28 | #else | 36 | #else |
37 | #define PACKAGE 'perf' | ||
29 | #include <bfd.h> | 38 | #include <bfd.h> |
30 | #endif | 39 | #endif |
31 | #endif | 40 | #endif |
@@ -158,6 +167,8 @@ struct addr_location { | |||
158 | enum dso_binary_type { | 167 | enum dso_binary_type { |
159 | DSO_BINARY_TYPE__KALLSYMS = 0, | 168 | DSO_BINARY_TYPE__KALLSYMS = 0, |
160 | DSO_BINARY_TYPE__GUEST_KALLSYMS, | 169 | DSO_BINARY_TYPE__GUEST_KALLSYMS, |
170 | DSO_BINARY_TYPE__VMLINUX, | ||
171 | DSO_BINARY_TYPE__GUEST_VMLINUX, | ||
161 | DSO_BINARY_TYPE__JAVA_JIT, | 172 | DSO_BINARY_TYPE__JAVA_JIT, |
162 | DSO_BINARY_TYPE__DEBUGLINK, | 173 | DSO_BINARY_TYPE__DEBUGLINK, |
163 | DSO_BINARY_TYPE__BUILD_ID_CACHE, | 174 | DSO_BINARY_TYPE__BUILD_ID_CACHE, |
@@ -217,6 +228,36 @@ struct dso { | |||
217 | char name[0]; | 228 | char name[0]; |
218 | }; | 229 | }; |
219 | 230 | ||
231 | struct symsrc { | ||
232 | char *name; | ||
233 | int fd; | ||
234 | enum dso_binary_type type; | ||
235 | |||
236 | #ifndef NO_LIBELF_SUPPORT | ||
237 | Elf *elf; | ||
238 | GElf_Ehdr ehdr; | ||
239 | |||
240 | Elf_Scn *opdsec; | ||
241 | size_t opdidx; | ||
242 | GElf_Shdr opdshdr; | ||
243 | |||
244 | Elf_Scn *symtab; | ||
245 | GElf_Shdr symshdr; | ||
246 | |||
247 | Elf_Scn *dynsym; | ||
248 | size_t dynsym_idx; | ||
249 | GElf_Shdr dynshdr; | ||
250 | |||
251 | bool adjust_symbols; | ||
252 | #endif | ||
253 | }; | ||
254 | |||
255 | void symsrc__destroy(struct symsrc *ss); | ||
256 | int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, | ||
257 | enum dso_binary_type type); | ||
258 | bool symsrc__has_symtab(struct symsrc *ss); | ||
259 | bool symsrc__possibly_runtime(struct symsrc *ss); | ||
260 | |||
220 | #define DSO__SWAP(dso, type, val) \ | 261 | #define DSO__SWAP(dso, type, val) \ |
221 | ({ \ | 262 | ({ \ |
222 | type ____r = val; \ | 263 | type ____r = val; \ |
@@ -254,6 +295,8 @@ static inline void dso__set_loaded(struct dso *dso, enum map_type type) | |||
254 | 295 | ||
255 | void dso__sort_by_name(struct dso *dso, enum map_type type); | 296 | void dso__sort_by_name(struct dso *dso, enum map_type type); |
256 | 297 | ||
298 | void dsos__add(struct list_head *head, struct dso *dso); | ||
299 | struct dso *dsos__find(struct list_head *head, const char *name); | ||
257 | struct dso *__dsos__findnew(struct list_head *head, const char *name); | 300 | struct dso *__dsos__findnew(struct list_head *head, const char *name); |
258 | 301 | ||
259 | int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); | 302 | int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); |
@@ -283,6 +326,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); | |||
283 | char dso__symtab_origin(const struct dso *dso); | 326 | char dso__symtab_origin(const struct dso *dso); |
284 | void dso__set_long_name(struct dso *dso, char *name); | 327 | void dso__set_long_name(struct dso *dso, char *name); |
285 | void dso__set_build_id(struct dso *dso, void *build_id); | 328 | void dso__set_build_id(struct dso *dso, void *build_id); |
329 | bool dso__build_id_equal(const struct dso *dso, u8 *build_id); | ||
286 | void dso__read_running_kernel_build_id(struct dso *dso, | 330 | void dso__read_running_kernel_build_id(struct dso *dso, |
287 | struct machine *machine); | 331 | struct machine *machine); |
288 | struct map *dso__new_map(const char *name); | 332 | struct map *dso__new_map(const char *name); |
@@ -297,7 +341,9 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits); | |||
297 | int build_id__sprintf(const u8 *build_id, int len, char *bf); | 341 | int build_id__sprintf(const u8 *build_id, int len, char *bf); |
298 | int kallsyms__parse(const char *filename, void *arg, | 342 | int kallsyms__parse(const char *filename, void *arg, |
299 | int (*process_symbol)(void *arg, const char *name, | 343 | int (*process_symbol)(void *arg, const char *name, |
300 | char type, u64 start, u64 end)); | 344 | char type, u64 start)); |
345 | int filename__read_debuglink(const char *filename, char *debuglink, | ||
346 | size_t size); | ||
301 | 347 | ||
302 | void machine__destroy_kernel_maps(struct machine *machine); | 348 | void machine__destroy_kernel_maps(struct machine *machine); |
303 | int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); | 349 | int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); |
@@ -309,6 +355,8 @@ void machines__destroy_guest_kernel_maps(struct rb_root *machines); | |||
309 | 355 | ||
310 | int symbol__init(void); | 356 | int symbol__init(void); |
311 | void symbol__exit(void); | 357 | void symbol__exit(void); |
358 | void symbol__elf_init(void); | ||
359 | struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name); | ||
312 | size_t symbol__fprintf_symname_offs(const struct symbol *sym, | 360 | size_t symbol__fprintf_symname_offs(const struct symbol *sym, |
313 | const struct addr_location *al, FILE *fp); | 361 | const struct addr_location *al, FILE *fp); |
314 | size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); | 362 | size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); |
@@ -326,4 +374,15 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map, | |||
326 | struct machine *machine, u64 addr, | 374 | struct machine *machine, u64 addr, |
327 | u8 *data, ssize_t size); | 375 | u8 *data, ssize_t size); |
328 | int dso__test_data(void); | 376 | int dso__test_data(void); |
377 | int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, | ||
378 | struct symsrc *runtime_ss, symbol_filter_t filter, | ||
379 | int kmodule); | ||
380 | int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, | ||
381 | struct map *map, symbol_filter_t filter); | ||
382 | |||
383 | void symbols__insert(struct rb_root *symbols, struct symbol *sym); | ||
384 | void symbols__fixup_duplicate(struct rb_root *symbols); | ||
385 | void symbols__fixup_end(struct rb_root *symbols); | ||
386 | void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); | ||
387 | |||
329 | #endif /* __PERF_SYMBOL */ | 388 | #endif /* __PERF_SYMBOL */ |
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c index 051eaa68095e..065528b7563e 100644 --- a/tools/perf/util/target.c +++ b/tools/perf/util/target.c | |||
@@ -117,8 +117,8 @@ int perf_target__strerror(struct perf_target *target, int errnum, | |||
117 | 117 | ||
118 | if (err != buf) { | 118 | if (err != buf) { |
119 | size_t len = strlen(err); | 119 | size_t len = strlen(err); |
120 | char *c = mempcpy(buf, err, min(buflen - 1, len)); | 120 | memcpy(buf, err, min(buflen - 1, len)); |
121 | *c = '\0'; | 121 | *(buf + min(buflen - 1, len)) = '\0'; |
122 | } | 122 | } |
123 | 123 | ||
124 | return 0; | 124 | return 0; |
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 70c2c13ff679..f66610b7bacf 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h | |||
@@ -16,6 +16,8 @@ struct thread { | |||
16 | bool comm_set; | 16 | bool comm_set; |
17 | char *comm; | 17 | char *comm; |
18 | int comm_len; | 18 | int comm_len; |
19 | |||
20 | void *priv; | ||
19 | }; | 21 | }; |
20 | 22 | ||
21 | struct machine; | 23 | struct machine; |
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index 7eeebcee291c..884dde9b9bc1 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c | |||
@@ -58,8 +58,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) | |||
58 | } | 58 | } |
59 | 59 | ||
60 | if (top->evlist->nr_entries == 1) { | 60 | if (top->evlist->nr_entries == 1) { |
61 | struct perf_evsel *first; | 61 | struct perf_evsel *first = perf_evlist__first(top->evlist); |
62 | first = list_entry(top->evlist->entries.next, struct perf_evsel, node); | ||
63 | ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", | 62 | ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", |
64 | (uint64_t)first->attr.sample_period, | 63 | (uint64_t)first->attr.sample_period, |
65 | top->freq ? "Hz" : ""); | 64 | top->freq ? "Hz" : ""); |
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 33347ca89ee4..86ff1b15059b 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include "types.h" | 5 | #include "types.h" |
6 | #include <stddef.h> | 6 | #include <stddef.h> |
7 | #include <stdbool.h> | 7 | #include <stdbool.h> |
8 | #include <termios.h> | ||
8 | 9 | ||
9 | struct perf_evlist; | 10 | struct perf_evlist; |
10 | struct perf_evsel; | 11 | struct perf_evsel; |
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 0715c843c2e7..3aabcd687cd5 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
@@ -162,25 +162,16 @@ int trace_parse_common_pid(struct pevent *pevent, void *data) | |||
162 | return pevent_data_pid(pevent, &record); | 162 | return pevent_data_pid(pevent, &record); |
163 | } | 163 | } |
164 | 164 | ||
165 | unsigned long long read_size(struct pevent *pevent, void *ptr, int size) | 165 | unsigned long long read_size(struct event_format *event, void *ptr, int size) |
166 | { | 166 | { |
167 | return pevent_read_number(pevent, ptr, size); | 167 | return pevent_read_number(event->pevent, ptr, size); |
168 | } | 168 | } |
169 | 169 | ||
170 | void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) | 170 | void event_format__print(struct event_format *event, |
171 | int cpu, void *data, int size) | ||
171 | { | 172 | { |
172 | struct event_format *event; | ||
173 | struct pevent_record record; | 173 | struct pevent_record record; |
174 | struct trace_seq s; | 174 | struct trace_seq s; |
175 | int type; | ||
176 | |||
177 | type = trace_parse_common_type(pevent, data); | ||
178 | |||
179 | event = pevent_find_event(pevent, type); | ||
180 | if (!event) { | ||
181 | warning("ug! no event found for type %d", type); | ||
182 | return; | ||
183 | } | ||
184 | 175 | ||
185 | memset(&record, 0, sizeof(record)); | 176 | memset(&record, 0, sizeof(record)); |
186 | record.cpu = cpu; | 177 | record.cpu = cpu; |
@@ -192,6 +183,19 @@ void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) | |||
192 | trace_seq_do_printf(&s); | 183 | trace_seq_do_printf(&s); |
193 | } | 184 | } |
194 | 185 | ||
186 | void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) | ||
187 | { | ||
188 | int type = trace_parse_common_type(pevent, data); | ||
189 | struct event_format *event = pevent_find_event(pevent, type); | ||
190 | |||
191 | if (!event) { | ||
192 | warning("ug! no event found for type %d", type); | ||
193 | return; | ||
194 | } | ||
195 | |||
196 | event_format__print(event, cpu, data, size); | ||
197 | } | ||
198 | |||
195 | void print_event(struct pevent *pevent, int cpu, void *data, int size, | 199 | void print_event(struct pevent *pevent, int cpu, void *data, int size, |
196 | unsigned long long nsecs, char *comm) | 200 | unsigned long long nsecs, char *comm) |
197 | { | 201 | { |
@@ -217,7 +221,7 @@ void print_event(struct pevent *pevent, int cpu, void *data, int size, | |||
217 | } | 221 | } |
218 | 222 | ||
219 | void parse_proc_kallsyms(struct pevent *pevent, | 223 | void parse_proc_kallsyms(struct pevent *pevent, |
220 | char *file, unsigned int size __unused) | 224 | char *file, unsigned int size __maybe_unused) |
221 | { | 225 | { |
222 | unsigned long long addr; | 226 | unsigned long long addr; |
223 | char *func; | 227 | char *func; |
@@ -225,31 +229,29 @@ void parse_proc_kallsyms(struct pevent *pevent, | |||
225 | char *next = NULL; | 229 | char *next = NULL; |
226 | char *addr_str; | 230 | char *addr_str; |
227 | char *mod; | 231 | char *mod; |
228 | char ch; | 232 | char *fmt; |
229 | 233 | ||
230 | line = strtok_r(file, "\n", &next); | 234 | line = strtok_r(file, "\n", &next); |
231 | while (line) { | 235 | while (line) { |
232 | mod = NULL; | 236 | mod = NULL; |
233 | sscanf(line, "%as %c %as\t[%as", | 237 | addr_str = strtok_r(line, " ", &fmt); |
234 | (float *)(void *)&addr_str, /* workaround gcc warning */ | ||
235 | &ch, (float *)(void *)&func, (float *)(void *)&mod); | ||
236 | addr = strtoull(addr_str, NULL, 16); | 238 | addr = strtoull(addr_str, NULL, 16); |
237 | free(addr_str); | 239 | /* skip character */ |
238 | 240 | strtok_r(NULL, " ", &fmt); | |
239 | /* truncate the extra ']' */ | 241 | func = strtok_r(NULL, "\t", &fmt); |
242 | mod = strtok_r(NULL, "]", &fmt); | ||
243 | /* truncate the extra '[' */ | ||
240 | if (mod) | 244 | if (mod) |
241 | mod[strlen(mod) - 1] = 0; | 245 | mod = mod + 1; |
242 | 246 | ||
243 | pevent_register_function(pevent, func, addr, mod); | 247 | pevent_register_function(pevent, func, addr, mod); |
244 | free(func); | ||
245 | free(mod); | ||
246 | 248 | ||
247 | line = strtok_r(NULL, "\n", &next); | 249 | line = strtok_r(NULL, "\n", &next); |
248 | } | 250 | } |
249 | } | 251 | } |
250 | 252 | ||
251 | void parse_ftrace_printk(struct pevent *pevent, | 253 | void parse_ftrace_printk(struct pevent *pevent, |
252 | char *file, unsigned int size __unused) | 254 | char *file, unsigned int size __maybe_unused) |
253 | { | 255 | { |
254 | unsigned long long addr; | 256 | unsigned long long addr; |
255 | char *printk; | 257 | char *printk; |
@@ -289,7 +291,7 @@ struct event_format *trace_find_next_event(struct pevent *pevent, | |||
289 | { | 291 | { |
290 | static int idx; | 292 | static int idx; |
291 | 293 | ||
292 | if (!pevent->events) | 294 | if (!pevent || !pevent->events) |
293 | return NULL; | 295 | return NULL; |
294 | 296 | ||
295 | if (!event) { | 297 | if (!event) { |
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 474aa7a7df43..8715a1006d00 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c | |||
@@ -35,12 +35,11 @@ static int stop_script_unsupported(void) | |||
35 | return 0; | 35 | return 0; |
36 | } | 36 | } |
37 | 37 | ||
38 | static void process_event_unsupported(union perf_event *event __unused, | 38 | static void process_event_unsupported(union perf_event *event __maybe_unused, |
39 | struct pevent *pevent __unused, | 39 | struct perf_sample *sample __maybe_unused, |
40 | struct perf_sample *sample __unused, | 40 | struct perf_evsel *evsel __maybe_unused, |
41 | struct perf_evsel *evsel __unused, | 41 | struct machine *machine __maybe_unused, |
42 | struct machine *machine __unused, | 42 | struct addr_location *al __maybe_unused) |
43 | struct thread *thread __unused) | ||
44 | { | 43 | { |
45 | } | 44 | } |
46 | 45 | ||
@@ -53,17 +52,19 @@ static void print_python_unsupported_msg(void) | |||
53 | "\n etc.\n"); | 52 | "\n etc.\n"); |
54 | } | 53 | } |
55 | 54 | ||
56 | static int python_start_script_unsupported(const char *script __unused, | 55 | static int python_start_script_unsupported(const char *script __maybe_unused, |
57 | int argc __unused, | 56 | int argc __maybe_unused, |
58 | const char **argv __unused) | 57 | const char **argv __maybe_unused) |
59 | { | 58 | { |
60 | print_python_unsupported_msg(); | 59 | print_python_unsupported_msg(); |
61 | 60 | ||
62 | return -1; | 61 | return -1; |
63 | } | 62 | } |
64 | 63 | ||
65 | static int python_generate_script_unsupported(struct pevent *pevent __unused, | 64 | static int python_generate_script_unsupported(struct pevent *pevent |
66 | const char *outfile __unused) | 65 | __maybe_unused, |
66 | const char *outfile | ||
67 | __maybe_unused) | ||
67 | { | 68 | { |
68 | print_python_unsupported_msg(); | 69 | print_python_unsupported_msg(); |
69 | 70 | ||
@@ -115,17 +116,18 @@ static void print_perl_unsupported_msg(void) | |||
115 | "\n etc.\n"); | 116 | "\n etc.\n"); |
116 | } | 117 | } |
117 | 118 | ||
118 | static int perl_start_script_unsupported(const char *script __unused, | 119 | static int perl_start_script_unsupported(const char *script __maybe_unused, |
119 | int argc __unused, | 120 | int argc __maybe_unused, |
120 | const char **argv __unused) | 121 | const char **argv __maybe_unused) |
121 | { | 122 | { |
122 | print_perl_unsupported_msg(); | 123 | print_perl_unsupported_msg(); |
123 | 124 | ||
124 | return -1; | 125 | return -1; |
125 | } | 126 | } |
126 | 127 | ||
127 | static int perl_generate_script_unsupported(struct pevent *pevent __unused, | 128 | static int perl_generate_script_unsupported(struct pevent *pevent |
128 | const char *outfile __unused) | 129 | __maybe_unused, |
130 | const char *outfile __maybe_unused) | ||
129 | { | 131 | { |
130 | print_perl_unsupported_msg(); | 132 | print_perl_unsupported_msg(); |
131 | 133 | ||
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 8fef1d6687b7..a55fd37ffea1 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h | |||
@@ -9,7 +9,6 @@ struct machine; | |||
9 | struct perf_sample; | 9 | struct perf_sample; |
10 | union perf_event; | 10 | union perf_event; |
11 | struct perf_tool; | 11 | struct perf_tool; |
12 | struct thread; | ||
13 | 12 | ||
14 | extern int header_page_size_size; | 13 | extern int header_page_size_size; |
15 | extern int header_page_ts_size; | 14 | extern int header_page_ts_size; |
@@ -32,6 +31,8 @@ int bigendian(void); | |||
32 | 31 | ||
33 | struct pevent *read_trace_init(int file_bigendian, int host_bigendian); | 32 | struct pevent *read_trace_init(int file_bigendian, int host_bigendian); |
34 | void print_trace_event(struct pevent *pevent, int cpu, void *data, int size); | 33 | void print_trace_event(struct pevent *pevent, int cpu, void *data, int size); |
34 | void event_format__print(struct event_format *event, | ||
35 | int cpu, void *data, int size); | ||
35 | 36 | ||
36 | void print_event(struct pevent *pevent, int cpu, void *data, int size, | 37 | void print_event(struct pevent *pevent, int cpu, void *data, int size, |
37 | unsigned long long nsecs, char *comm); | 38 | unsigned long long nsecs, char *comm); |
@@ -56,7 +57,7 @@ int trace_parse_common_pid(struct pevent *pevent, void *data); | |||
56 | 57 | ||
57 | struct event_format *trace_find_next_event(struct pevent *pevent, | 58 | struct event_format *trace_find_next_event(struct pevent *pevent, |
58 | struct event_format *event); | 59 | struct event_format *event); |
59 | unsigned long long read_size(struct pevent *pevent, void *ptr, int size); | 60 | unsigned long long read_size(struct event_format *event, void *ptr, int size); |
60 | unsigned long long eval_flag(const char *flag); | 61 | unsigned long long eval_flag(const char *flag); |
61 | 62 | ||
62 | struct pevent_record *trace_read_data(struct pevent *pevent, int cpu); | 63 | struct pevent_record *trace_read_data(struct pevent *pevent, int cpu); |
@@ -74,16 +75,19 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs, | |||
74 | void tracing_data_put(struct tracing_data *tdata); | 75 | void tracing_data_put(struct tracing_data *tdata); |
75 | 76 | ||
76 | 77 | ||
78 | struct addr_location; | ||
79 | |||
80 | struct perf_session; | ||
81 | |||
77 | struct scripting_ops { | 82 | struct scripting_ops { |
78 | const char *name; | 83 | const char *name; |
79 | int (*start_script) (const char *script, int argc, const char **argv); | 84 | int (*start_script) (const char *script, int argc, const char **argv); |
80 | int (*stop_script) (void); | 85 | int (*stop_script) (void); |
81 | void (*process_event) (union perf_event *event, | 86 | void (*process_event) (union perf_event *event, |
82 | struct pevent *pevent, | ||
83 | struct perf_sample *sample, | 87 | struct perf_sample *sample, |
84 | struct perf_evsel *evsel, | 88 | struct perf_evsel *evsel, |
85 | struct machine *machine, | 89 | struct machine *machine, |
86 | struct thread *thread); | 90 | struct addr_location *al); |
87 | int (*generate_script) (struct pevent *pevent, const char *outfile); | 91 | int (*generate_script) (struct pevent *pevent, const char *outfile); |
88 | }; | 92 | }; |
89 | 93 | ||
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c new file mode 100644 index 000000000000..958723ba3d2e --- /dev/null +++ b/tools/perf/util/unwind.c | |||
@@ -0,0 +1,571 @@ | |||
1 | /* | ||
2 | * Post mortem Dwarf CFI based unwinding on top of regs and stack dumps. | ||
3 | * | ||
4 | * Lots of this code have been borrowed or heavily inspired from parts of | ||
5 | * the libunwind 0.99 code which are (amongst other contributors I may have | ||
6 | * forgotten): | ||
7 | * | ||
8 | * Copyright (C) 2002-2007 Hewlett-Packard Co | ||
9 | * Contributed by David Mosberger-Tang <davidm@hpl.hp.com> | ||
10 | * | ||
11 | * And the bugs have been added by: | ||
12 | * | ||
13 | * Copyright (C) 2010, Frederic Weisbecker <fweisbec@gmail.com> | ||
14 | * Copyright (C) 2012, Jiri Olsa <jolsa@redhat.com> | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <elf.h> | ||
19 | #include <gelf.h> | ||
20 | #include <fcntl.h> | ||
21 | #include <string.h> | ||
22 | #include <unistd.h> | ||
23 | #include <sys/mman.h> | ||
24 | #include <linux/list.h> | ||
25 | #include <libunwind.h> | ||
26 | #include <libunwind-ptrace.h> | ||
27 | #include "thread.h" | ||
28 | #include "session.h" | ||
29 | #include "perf_regs.h" | ||
30 | #include "unwind.h" | ||
31 | #include "util.h" | ||
32 | |||
33 | extern int | ||
34 | UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, | ||
35 | unw_word_t ip, | ||
36 | unw_dyn_info_t *di, | ||
37 | unw_proc_info_t *pi, | ||
38 | int need_unwind_info, void *arg); | ||
39 | |||
40 | #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) | ||
41 | |||
42 | #define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */ | ||
43 | #define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */ | ||
44 | |||
45 | /* Pointer-encoding formats: */ | ||
46 | #define DW_EH_PE_omit 0xff | ||
47 | #define DW_EH_PE_ptr 0x00 /* pointer-sized unsigned value */ | ||
48 | #define DW_EH_PE_udata4 0x03 /* unsigned 32-bit value */ | ||
49 | #define DW_EH_PE_udata8 0x04 /* unsigned 64-bit value */ | ||
50 | #define DW_EH_PE_sdata4 0x0b /* signed 32-bit value */ | ||
51 | #define DW_EH_PE_sdata8 0x0c /* signed 64-bit value */ | ||
52 | |||
53 | /* Pointer-encoding application: */ | ||
54 | #define DW_EH_PE_absptr 0x00 /* absolute value */ | ||
55 | #define DW_EH_PE_pcrel 0x10 /* rel. to addr. of encoded value */ | ||
56 | |||
57 | /* | ||
58 | * The following are not documented by LSB v1.3, yet they are used by | ||
59 | * GCC, presumably they aren't documented by LSB since they aren't | ||
60 | * used on Linux: | ||
61 | */ | ||
62 | #define DW_EH_PE_funcrel 0x40 /* start-of-procedure-relative */ | ||
63 | #define DW_EH_PE_aligned 0x50 /* aligned pointer */ | ||
64 | |||
65 | /* Flags intentionaly not handled, since they're not needed: | ||
66 | * #define DW_EH_PE_indirect 0x80 | ||
67 | * #define DW_EH_PE_uleb128 0x01 | ||
68 | * #define DW_EH_PE_udata2 0x02 | ||
69 | * #define DW_EH_PE_sleb128 0x09 | ||
70 | * #define DW_EH_PE_sdata2 0x0a | ||
71 | * #define DW_EH_PE_textrel 0x20 | ||
72 | * #define DW_EH_PE_datarel 0x30 | ||
73 | */ | ||
74 | |||
75 | struct unwind_info { | ||
76 | struct perf_sample *sample; | ||
77 | struct machine *machine; | ||
78 | struct thread *thread; | ||
79 | u64 sample_uregs; | ||
80 | }; | ||
81 | |||
82 | #define dw_read(ptr, type, end) ({ \ | ||
83 | type *__p = (type *) ptr; \ | ||
84 | type __v; \ | ||
85 | if ((__p + 1) > (type *) end) \ | ||
86 | return -EINVAL; \ | ||
87 | __v = *__p++; \ | ||
88 | ptr = (typeof(ptr)) __p; \ | ||
89 | __v; \ | ||
90 | }) | ||
91 | |||
92 | static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val, | ||
93 | u8 encoding) | ||
94 | { | ||
95 | u8 *cur = *p; | ||
96 | *val = 0; | ||
97 | |||
98 | switch (encoding) { | ||
99 | case DW_EH_PE_omit: | ||
100 | *val = 0; | ||
101 | goto out; | ||
102 | case DW_EH_PE_ptr: | ||
103 | *val = dw_read(cur, unsigned long, end); | ||
104 | goto out; | ||
105 | default: | ||
106 | break; | ||
107 | } | ||
108 | |||
109 | switch (encoding & DW_EH_PE_APPL_MASK) { | ||
110 | case DW_EH_PE_absptr: | ||
111 | break; | ||
112 | case DW_EH_PE_pcrel: | ||
113 | *val = (unsigned long) cur; | ||
114 | break; | ||
115 | default: | ||
116 | return -EINVAL; | ||
117 | } | ||
118 | |||
119 | if ((encoding & 0x07) == 0x00) | ||
120 | encoding |= DW_EH_PE_udata4; | ||
121 | |||
122 | switch (encoding & DW_EH_PE_FORMAT_MASK) { | ||
123 | case DW_EH_PE_sdata4: | ||
124 | *val += dw_read(cur, s32, end); | ||
125 | break; | ||
126 | case DW_EH_PE_udata4: | ||
127 | *val += dw_read(cur, u32, end); | ||
128 | break; | ||
129 | case DW_EH_PE_sdata8: | ||
130 | *val += dw_read(cur, s64, end); | ||
131 | break; | ||
132 | case DW_EH_PE_udata8: | ||
133 | *val += dw_read(cur, u64, end); | ||
134 | break; | ||
135 | default: | ||
136 | return -EINVAL; | ||
137 | } | ||
138 | |||
139 | out: | ||
140 | *p = cur; | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | #define dw_read_encoded_value(ptr, end, enc) ({ \ | ||
145 | u64 __v; \ | ||
146 | if (__dw_read_encoded_value(&ptr, end, &__v, enc)) { \ | ||
147 | return -EINVAL; \ | ||
148 | } \ | ||
149 | __v; \ | ||
150 | }) | ||
151 | |||
152 | static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, | ||
153 | GElf_Shdr *shp, const char *name) | ||
154 | { | ||
155 | Elf_Scn *sec = NULL; | ||
156 | |||
157 | while ((sec = elf_nextscn(elf, sec)) != NULL) { | ||
158 | char *str; | ||
159 | |||
160 | gelf_getshdr(sec, shp); | ||
161 | str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); | ||
162 | if (!strcmp(name, str)) | ||
163 | break; | ||
164 | } | ||
165 | |||
166 | return sec; | ||
167 | } | ||
168 | |||
169 | static u64 elf_section_offset(int fd, const char *name) | ||
170 | { | ||
171 | Elf *elf; | ||
172 | GElf_Ehdr ehdr; | ||
173 | GElf_Shdr shdr; | ||
174 | u64 offset = 0; | ||
175 | |||
176 | elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); | ||
177 | if (elf == NULL) | ||
178 | return 0; | ||
179 | |||
180 | do { | ||
181 | if (gelf_getehdr(elf, &ehdr) == NULL) | ||
182 | break; | ||
183 | |||
184 | if (!elf_section_by_name(elf, &ehdr, &shdr, name)) | ||
185 | break; | ||
186 | |||
187 | offset = shdr.sh_offset; | ||
188 | } while (0); | ||
189 | |||
190 | elf_end(elf); | ||
191 | return offset; | ||
192 | } | ||
193 | |||
194 | struct table_entry { | ||
195 | u32 start_ip_offset; | ||
196 | u32 fde_offset; | ||
197 | }; | ||
198 | |||
199 | struct eh_frame_hdr { | ||
200 | unsigned char version; | ||
201 | unsigned char eh_frame_ptr_enc; | ||
202 | unsigned char fde_count_enc; | ||
203 | unsigned char table_enc; | ||
204 | |||
205 | /* | ||
206 | * The rest of the header is variable-length and consists of the | ||
207 | * following members: | ||
208 | * | ||
209 | * encoded_t eh_frame_ptr; | ||
210 | * encoded_t fde_count; | ||
211 | */ | ||
212 | |||
213 | /* A single encoded pointer should not be more than 8 bytes. */ | ||
214 | u64 enc[2]; | ||
215 | |||
216 | /* | ||
217 | * struct { | ||
218 | * encoded_t start_ip; | ||
219 | * encoded_t fde_addr; | ||
220 | * } binary_search_table[fde_count]; | ||
221 | */ | ||
222 | char data[0]; | ||
223 | } __packed; | ||
224 | |||
225 | static int unwind_spec_ehframe(struct dso *dso, struct machine *machine, | ||
226 | u64 offset, u64 *table_data, u64 *segbase, | ||
227 | u64 *fde_count) | ||
228 | { | ||
229 | struct eh_frame_hdr hdr; | ||
230 | u8 *enc = (u8 *) &hdr.enc; | ||
231 | u8 *end = (u8 *) &hdr.data; | ||
232 | ssize_t r; | ||
233 | |||
234 | r = dso__data_read_offset(dso, machine, offset, | ||
235 | (u8 *) &hdr, sizeof(hdr)); | ||
236 | if (r != sizeof(hdr)) | ||
237 | return -EINVAL; | ||
238 | |||
239 | /* We dont need eh_frame_ptr, just skip it. */ | ||
240 | dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc); | ||
241 | |||
242 | *fde_count = dw_read_encoded_value(enc, end, hdr.fde_count_enc); | ||
243 | *segbase = offset; | ||
244 | *table_data = (enc - (u8 *) &hdr) + offset; | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int read_unwind_spec(struct dso *dso, struct machine *machine, | ||
249 | u64 *table_data, u64 *segbase, u64 *fde_count) | ||
250 | { | ||
251 | int ret = -EINVAL, fd; | ||
252 | u64 offset; | ||
253 | |||
254 | fd = dso__data_fd(dso, machine); | ||
255 | if (fd < 0) | ||
256 | return -EINVAL; | ||
257 | |||
258 | offset = elf_section_offset(fd, ".eh_frame_hdr"); | ||
259 | close(fd); | ||
260 | |||
261 | if (offset) | ||
262 | ret = unwind_spec_ehframe(dso, machine, offset, | ||
263 | table_data, segbase, | ||
264 | fde_count); | ||
265 | |||
266 | /* TODO .debug_frame check if eh_frame_hdr fails */ | ||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | static struct map *find_map(unw_word_t ip, struct unwind_info *ui) | ||
271 | { | ||
272 | struct addr_location al; | ||
273 | |||
274 | thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, | ||
275 | MAP__FUNCTION, ip, &al); | ||
276 | return al.map; | ||
277 | } | ||
278 | |||
279 | static int | ||
280 | find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, | ||
281 | int need_unwind_info, void *arg) | ||
282 | { | ||
283 | struct unwind_info *ui = arg; | ||
284 | struct map *map; | ||
285 | unw_dyn_info_t di; | ||
286 | u64 table_data, segbase, fde_count; | ||
287 | |||
288 | map = find_map(ip, ui); | ||
289 | if (!map || !map->dso) | ||
290 | return -EINVAL; | ||
291 | |||
292 | pr_debug("unwind: find_proc_info dso %s\n", map->dso->name); | ||
293 | |||
294 | if (read_unwind_spec(map->dso, ui->machine, | ||
295 | &table_data, &segbase, &fde_count)) | ||
296 | return -EINVAL; | ||
297 | |||
298 | memset(&di, 0, sizeof(di)); | ||
299 | di.format = UNW_INFO_FORMAT_REMOTE_TABLE; | ||
300 | di.start_ip = map->start; | ||
301 | di.end_ip = map->end; | ||
302 | di.u.rti.segbase = map->start + segbase; | ||
303 | di.u.rti.table_data = map->start + table_data; | ||
304 | di.u.rti.table_len = fde_count * sizeof(struct table_entry) | ||
305 | / sizeof(unw_word_t); | ||
306 | return dwarf_search_unwind_table(as, ip, &di, pi, | ||
307 | need_unwind_info, arg); | ||
308 | } | ||
309 | |||
310 | static int access_fpreg(unw_addr_space_t __maybe_unused as, | ||
311 | unw_regnum_t __maybe_unused num, | ||
312 | unw_fpreg_t __maybe_unused *val, | ||
313 | int __maybe_unused __write, | ||
314 | void __maybe_unused *arg) | ||
315 | { | ||
316 | pr_err("unwind: access_fpreg unsupported\n"); | ||
317 | return -UNW_EINVAL; | ||
318 | } | ||
319 | |||
320 | static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as, | ||
321 | unw_word_t __maybe_unused *dil_addr, | ||
322 | void __maybe_unused *arg) | ||
323 | { | ||
324 | return -UNW_ENOINFO; | ||
325 | } | ||
326 | |||
327 | static int resume(unw_addr_space_t __maybe_unused as, | ||
328 | unw_cursor_t __maybe_unused *cu, | ||
329 | void __maybe_unused *arg) | ||
330 | { | ||
331 | pr_err("unwind: resume unsupported\n"); | ||
332 | return -UNW_EINVAL; | ||
333 | } | ||
334 | |||
335 | static int | ||
336 | get_proc_name(unw_addr_space_t __maybe_unused as, | ||
337 | unw_word_t __maybe_unused addr, | ||
338 | char __maybe_unused *bufp, size_t __maybe_unused buf_len, | ||
339 | unw_word_t __maybe_unused *offp, void __maybe_unused *arg) | ||
340 | { | ||
341 | pr_err("unwind: get_proc_name unsupported\n"); | ||
342 | return -UNW_EINVAL; | ||
343 | } | ||
344 | |||
345 | static int access_dso_mem(struct unwind_info *ui, unw_word_t addr, | ||
346 | unw_word_t *data) | ||
347 | { | ||
348 | struct addr_location al; | ||
349 | ssize_t size; | ||
350 | |||
351 | thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, | ||
352 | MAP__FUNCTION, addr, &al); | ||
353 | if (!al.map) { | ||
354 | pr_debug("unwind: no map for %lx\n", (unsigned long)addr); | ||
355 | return -1; | ||
356 | } | ||
357 | |||
358 | if (!al.map->dso) | ||
359 | return -1; | ||
360 | |||
361 | size = dso__data_read_addr(al.map->dso, al.map, ui->machine, | ||
362 | addr, (u8 *) data, sizeof(*data)); | ||
363 | |||
364 | return !(size == sizeof(*data)); | ||
365 | } | ||
366 | |||
367 | static int reg_value(unw_word_t *valp, struct regs_dump *regs, int id, | ||
368 | u64 sample_regs) | ||
369 | { | ||
370 | int i, idx = 0; | ||
371 | |||
372 | if (!(sample_regs & (1 << id))) | ||
373 | return -EINVAL; | ||
374 | |||
375 | for (i = 0; i < id; i++) { | ||
376 | if (sample_regs & (1 << i)) | ||
377 | idx++; | ||
378 | } | ||
379 | |||
380 | *valp = regs->regs[idx]; | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | static int access_mem(unw_addr_space_t __maybe_unused as, | ||
385 | unw_word_t addr, unw_word_t *valp, | ||
386 | int __write, void *arg) | ||
387 | { | ||
388 | struct unwind_info *ui = arg; | ||
389 | struct stack_dump *stack = &ui->sample->user_stack; | ||
390 | unw_word_t start, end; | ||
391 | int offset; | ||
392 | int ret; | ||
393 | |||
394 | /* Don't support write, probably not needed. */ | ||
395 | if (__write || !stack || !ui->sample->user_regs.regs) { | ||
396 | *valp = 0; | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | ret = reg_value(&start, &ui->sample->user_regs, PERF_REG_SP, | ||
401 | ui->sample_uregs); | ||
402 | if (ret) | ||
403 | return ret; | ||
404 | |||
405 | end = start + stack->size; | ||
406 | |||
407 | /* Check overflow. */ | ||
408 | if (addr + sizeof(unw_word_t) < addr) | ||
409 | return -EINVAL; | ||
410 | |||
411 | if (addr < start || addr + sizeof(unw_word_t) >= end) { | ||
412 | ret = access_dso_mem(ui, addr, valp); | ||
413 | if (ret) { | ||
414 | pr_debug("unwind: access_mem %p not inside range %p-%p\n", | ||
415 | (void *)addr, (void *)start, (void *)end); | ||
416 | *valp = 0; | ||
417 | return ret; | ||
418 | } | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | offset = addr - start; | ||
423 | *valp = *(unw_word_t *)&stack->data[offset]; | ||
424 | pr_debug("unwind: access_mem addr %p, val %lx, offset %d\n", | ||
425 | (void *)addr, (unsigned long)*valp, offset); | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static int access_reg(unw_addr_space_t __maybe_unused as, | ||
430 | unw_regnum_t regnum, unw_word_t *valp, | ||
431 | int __write, void *arg) | ||
432 | { | ||
433 | struct unwind_info *ui = arg; | ||
434 | int id, ret; | ||
435 | |||
436 | /* Don't support write, I suspect we don't need it. */ | ||
437 | if (__write) { | ||
438 | pr_err("unwind: access_reg w %d\n", regnum); | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | if (!ui->sample->user_regs.regs) { | ||
443 | *valp = 0; | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | id = unwind__arch_reg_id(regnum); | ||
448 | if (id < 0) | ||
449 | return -EINVAL; | ||
450 | |||
451 | ret = reg_value(valp, &ui->sample->user_regs, id, ui->sample_uregs); | ||
452 | if (ret) { | ||
453 | pr_err("unwind: can't read reg %d\n", regnum); | ||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp); | ||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | static void put_unwind_info(unw_addr_space_t __maybe_unused as, | ||
462 | unw_proc_info_t *pi __maybe_unused, | ||
463 | void *arg __maybe_unused) | ||
464 | { | ||
465 | pr_debug("unwind: put_unwind_info called\n"); | ||
466 | } | ||
467 | |||
468 | static int entry(u64 ip, struct thread *thread, struct machine *machine, | ||
469 | unwind_entry_cb_t cb, void *arg) | ||
470 | { | ||
471 | struct unwind_entry e; | ||
472 | struct addr_location al; | ||
473 | |||
474 | thread__find_addr_location(thread, machine, | ||
475 | PERF_RECORD_MISC_USER, | ||
476 | MAP__FUNCTION, ip, &al, NULL); | ||
477 | |||
478 | e.ip = ip; | ||
479 | e.map = al.map; | ||
480 | e.sym = al.sym; | ||
481 | |||
482 | pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", | ||
483 | al.sym ? al.sym->name : "''", | ||
484 | ip, | ||
485 | al.map ? al.map->map_ip(al.map, ip) : (u64) 0); | ||
486 | |||
487 | return cb(&e, arg); | ||
488 | } | ||
489 | |||
490 | static void display_error(int err) | ||
491 | { | ||
492 | switch (err) { | ||
493 | case UNW_EINVAL: | ||
494 | pr_err("unwind: Only supports local.\n"); | ||
495 | break; | ||
496 | case UNW_EUNSPEC: | ||
497 | pr_err("unwind: Unspecified error.\n"); | ||
498 | break; | ||
499 | case UNW_EBADREG: | ||
500 | pr_err("unwind: Register unavailable.\n"); | ||
501 | break; | ||
502 | default: | ||
503 | break; | ||
504 | } | ||
505 | } | ||
506 | |||
507 | static unw_accessors_t accessors = { | ||
508 | .find_proc_info = find_proc_info, | ||
509 | .put_unwind_info = put_unwind_info, | ||
510 | .get_dyn_info_list_addr = get_dyn_info_list_addr, | ||
511 | .access_mem = access_mem, | ||
512 | .access_reg = access_reg, | ||
513 | .access_fpreg = access_fpreg, | ||
514 | .resume = resume, | ||
515 | .get_proc_name = get_proc_name, | ||
516 | }; | ||
517 | |||
518 | static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, | ||
519 | void *arg) | ||
520 | { | ||
521 | unw_addr_space_t addr_space; | ||
522 | unw_cursor_t c; | ||
523 | int ret; | ||
524 | |||
525 | addr_space = unw_create_addr_space(&accessors, 0); | ||
526 | if (!addr_space) { | ||
527 | pr_err("unwind: Can't create unwind address space.\n"); | ||
528 | return -ENOMEM; | ||
529 | } | ||
530 | |||
531 | ret = unw_init_remote(&c, addr_space, ui); | ||
532 | if (ret) | ||
533 | display_error(ret); | ||
534 | |||
535 | while (!ret && (unw_step(&c) > 0)) { | ||
536 | unw_word_t ip; | ||
537 | |||
538 | unw_get_reg(&c, UNW_REG_IP, &ip); | ||
539 | ret = entry(ip, ui->thread, ui->machine, cb, arg); | ||
540 | } | ||
541 | |||
542 | unw_destroy_addr_space(addr_space); | ||
543 | return ret; | ||
544 | } | ||
545 | |||
546 | int unwind__get_entries(unwind_entry_cb_t cb, void *arg, | ||
547 | struct machine *machine, struct thread *thread, | ||
548 | u64 sample_uregs, struct perf_sample *data) | ||
549 | { | ||
550 | unw_word_t ip; | ||
551 | struct unwind_info ui = { | ||
552 | .sample = data, | ||
553 | .sample_uregs = sample_uregs, | ||
554 | .thread = thread, | ||
555 | .machine = machine, | ||
556 | }; | ||
557 | int ret; | ||
558 | |||
559 | if (!data->user_regs.regs) | ||
560 | return -EINVAL; | ||
561 | |||
562 | ret = reg_value(&ip, &data->user_regs, PERF_REG_IP, sample_uregs); | ||
563 | if (ret) | ||
564 | return ret; | ||
565 | |||
566 | ret = entry(ip, thread, machine, cb, arg); | ||
567 | if (ret) | ||
568 | return -ENOMEM; | ||
569 | |||
570 | return get_entries(&ui, cb, arg); | ||
571 | } | ||
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h new file mode 100644 index 000000000000..a78c8b303bb5 --- /dev/null +++ b/tools/perf/util/unwind.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef __UNWIND_H | ||
2 | #define __UNWIND_H | ||
3 | |||
4 | #include "types.h" | ||
5 | #include "event.h" | ||
6 | #include "symbol.h" | ||
7 | |||
8 | struct unwind_entry { | ||
9 | struct map *map; | ||
10 | struct symbol *sym; | ||
11 | u64 ip; | ||
12 | }; | ||
13 | |||
14 | typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg); | ||
15 | |||
16 | #ifndef NO_LIBUNWIND_SUPPORT | ||
17 | int unwind__get_entries(unwind_entry_cb_t cb, void *arg, | ||
18 | struct machine *machine, | ||
19 | struct thread *thread, | ||
20 | u64 sample_uregs, | ||
21 | struct perf_sample *data); | ||
22 | int unwind__arch_reg_id(int regnum); | ||
23 | #else | ||
24 | static inline int | ||
25 | unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, | ||
26 | void *arg __maybe_unused, | ||
27 | struct machine *machine __maybe_unused, | ||
28 | struct thread *thread __maybe_unused, | ||
29 | u64 sample_uregs __maybe_unused, | ||
30 | struct perf_sample *data __maybe_unused) | ||
31 | { | ||
32 | return 0; | ||
33 | } | ||
34 | #endif /* NO_LIBUNWIND_SUPPORT */ | ||
35 | #endif /* __UNWIND_H */ | ||
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index d03599fbe78b..2055cf38041c 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c | |||
@@ -1,6 +1,11 @@ | |||
1 | #include "../perf.h" | 1 | #include "../perf.h" |
2 | #include "util.h" | 2 | #include "util.h" |
3 | #include <sys/mman.h> | 3 | #include <sys/mman.h> |
4 | #ifndef NO_BACKTRACE | ||
5 | #include <execinfo.h> | ||
6 | #endif | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
4 | 9 | ||
5 | /* | 10 | /* |
6 | * XXX We need to find a better place for these things... | 11 | * XXX We need to find a better place for these things... |
@@ -158,3 +163,23 @@ size_t hex_width(u64 v) | |||
158 | 163 | ||
159 | return n; | 164 | return n; |
160 | } | 165 | } |
166 | |||
167 | /* Obtain a backtrace and print it to stdout. */ | ||
168 | #ifndef NO_BACKTRACE | ||
169 | void dump_stack(void) | ||
170 | { | ||
171 | void *array[16]; | ||
172 | size_t size = backtrace(array, ARRAY_SIZE(array)); | ||
173 | char **strings = backtrace_symbols(array, size); | ||
174 | size_t i; | ||
175 | |||
176 | printf("Obtained %zd stack frames.\n", size); | ||
177 | |||
178 | for (i = 0; i < size; i++) | ||
179 | printf("%s\n", strings[i]); | ||
180 | |||
181 | free(strings); | ||
182 | } | ||
183 | #else | ||
184 | void dump_stack(void) {} | ||
185 | #endif | ||
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index b13c7331eaf8..70fa70b535b2 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h | |||
@@ -69,13 +69,8 @@ | |||
69 | #include <sys/poll.h> | 69 | #include <sys/poll.h> |
70 | #include <sys/socket.h> | 70 | #include <sys/socket.h> |
71 | #include <sys/ioctl.h> | 71 | #include <sys/ioctl.h> |
72 | #include <sys/select.h> | ||
73 | #include <netinet/in.h> | ||
74 | #include <netinet/tcp.h> | ||
75 | #include <arpa/inet.h> | ||
76 | #include <netdb.h> | ||
77 | #include <inttypes.h> | 72 | #include <inttypes.h> |
78 | #include "../../../include/linux/magic.h" | 73 | #include <linux/magic.h> |
79 | #include "types.h" | 74 | #include "types.h" |
80 | #include <sys/ttydefaults.h> | 75 | #include <sys/ttydefaults.h> |
81 | 76 | ||
@@ -266,4 +261,6 @@ size_t hex_width(u64 v); | |||
266 | 261 | ||
267 | char *rtrim(char *s); | 262 | char *rtrim(char *s); |
268 | 263 | ||
264 | void dump_stack(void); | ||
265 | |||
269 | #endif | 266 | #endif |
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c new file mode 100644 index 000000000000..e60951fcdb12 --- /dev/null +++ b/tools/perf/util/vdso.c | |||
@@ -0,0 +1,111 @@ | |||
1 | |||
2 | #include <unistd.h> | ||
3 | #include <stdio.h> | ||
4 | #include <string.h> | ||
5 | #include <sys/types.h> | ||
6 | #include <sys/stat.h> | ||
7 | #include <fcntl.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <linux/kernel.h> | ||
10 | |||
11 | #include "vdso.h" | ||
12 | #include "util.h" | ||
13 | #include "symbol.h" | ||
14 | #include "linux/string.h" | ||
15 | |||
16 | static bool vdso_found; | ||
17 | static char vdso_file[] = "/tmp/perf-vdso.so-XXXXXX"; | ||
18 | |||
19 | static int find_vdso_map(void **start, void **end) | ||
20 | { | ||
21 | FILE *maps; | ||
22 | char line[128]; | ||
23 | int found = 0; | ||
24 | |||
25 | maps = fopen("/proc/self/maps", "r"); | ||
26 | if (!maps) { | ||
27 | pr_err("vdso: cannot open maps\n"); | ||
28 | return -1; | ||
29 | } | ||
30 | |||
31 | while (!found && fgets(line, sizeof(line), maps)) { | ||
32 | int m = -1; | ||
33 | |||
34 | /* We care only about private r-x mappings. */ | ||
35 | if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n", | ||
36 | start, end, &m)) | ||
37 | continue; | ||
38 | if (m < 0) | ||
39 | continue; | ||
40 | |||
41 | if (!strncmp(&line[m], VDSO__MAP_NAME, | ||
42 | sizeof(VDSO__MAP_NAME) - 1)) | ||
43 | found = 1; | ||
44 | } | ||
45 | |||
46 | fclose(maps); | ||
47 | return !found; | ||
48 | } | ||
49 | |||
50 | static char *get_file(void) | ||
51 | { | ||
52 | char *vdso = NULL; | ||
53 | char *buf = NULL; | ||
54 | void *start, *end; | ||
55 | size_t size; | ||
56 | int fd; | ||
57 | |||
58 | if (vdso_found) | ||
59 | return vdso_file; | ||
60 | |||
61 | if (find_vdso_map(&start, &end)) | ||
62 | return NULL; | ||
63 | |||
64 | size = end - start; | ||
65 | |||
66 | buf = memdup(start, size); | ||
67 | if (!buf) | ||
68 | return NULL; | ||
69 | |||
70 | fd = mkstemp(vdso_file); | ||
71 | if (fd < 0) | ||
72 | goto out; | ||
73 | |||
74 | if (size == (size_t) write(fd, buf, size)) | ||
75 | vdso = vdso_file; | ||
76 | |||
77 | close(fd); | ||
78 | |||
79 | out: | ||
80 | free(buf); | ||
81 | |||
82 | vdso_found = (vdso != NULL); | ||
83 | return vdso; | ||
84 | } | ||
85 | |||
86 | void vdso__exit(void) | ||
87 | { | ||
88 | if (vdso_found) | ||
89 | unlink(vdso_file); | ||
90 | } | ||
91 | |||
92 | struct dso *vdso__dso_findnew(struct list_head *head) | ||
93 | { | ||
94 | struct dso *dso = dsos__find(head, VDSO__MAP_NAME); | ||
95 | |||
96 | if (!dso) { | ||
97 | char *file; | ||
98 | |||
99 | file = get_file(); | ||
100 | if (!file) | ||
101 | return NULL; | ||
102 | |||
103 | dso = dso__new(VDSO__MAP_NAME); | ||
104 | if (dso != NULL) { | ||
105 | dsos__add(head, dso); | ||
106 | dso__set_long_name(dso, file); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | return dso; | ||
111 | } | ||
diff --git a/tools/perf/util/vdso.h b/tools/perf/util/vdso.h new file mode 100644 index 000000000000..0f76e7caf6f8 --- /dev/null +++ b/tools/perf/util/vdso.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef __PERF_VDSO__ | ||
2 | #define __PERF_VDSO__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <string.h> | ||
6 | #include <stdbool.h> | ||
7 | |||
8 | #define VDSO__MAP_NAME "[vdso]" | ||
9 | |||
10 | static inline bool is_vdso_map(const char *filename) | ||
11 | { | ||
12 | return !strcmp(filename, VDSO__MAP_NAME); | ||
13 | } | ||
14 | |||
15 | struct dso *vdso__dso_findnew(struct list_head *head); | ||
16 | void vdso__exit(void); | ||
17 | |||
18 | #endif /* __PERF_VDSO__ */ | ||
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c index 73e900edb5a2..19f15b650703 100644 --- a/tools/perf/util/wrapper.c +++ b/tools/perf/util/wrapper.c | |||
@@ -7,7 +7,8 @@ | |||
7 | * There's no pack memory to release - but stay close to the Git | 7 | * There's no pack memory to release - but stay close to the Git |
8 | * version so wrap this away: | 8 | * version so wrap this away: |
9 | */ | 9 | */ |
10 | static inline void release_pack_memory(size_t size __used, int flag __used) | 10 | static inline void release_pack_memory(size_t size __maybe_unused, |
11 | int flag __maybe_unused) | ||
11 | { | 12 | { |
12 | } | 13 | } |
13 | 14 | ||
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index bde8521d56bb..96ce80a3743b 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include | |||
@@ -1,6 +1,8 @@ | |||
1 | ifeq ("$(origin O)", "command line") | 1 | ifeq ("$(origin O)", "command line") |
2 | OUTPUT := $(O)/ | 2 | dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) |
3 | COMMAND_O := O=$(O) | 3 | ABSOLUTE_O := $(shell cd $(O) ; pwd) |
4 | OUTPUT := $(ABSOLUTE_O)/ | ||
5 | COMMAND_O := O=$(ABSOLUTE_O) | ||
4 | endif | 6 | endif |
5 | 7 | ||
6 | ifneq ($(OUTPUT),) | 8 | ifneq ($(OUTPUT),) |
diff --git a/tools/testing/ktest/examples/include/defaults.conf b/tools/testing/ktest/examples/include/defaults.conf index 323a552ce642..63a1a83f4f0b 100644 --- a/tools/testing/ktest/examples/include/defaults.conf +++ b/tools/testing/ktest/examples/include/defaults.conf | |||
@@ -33,7 +33,7 @@ DEFAULTS | |||
33 | THIS_DIR := ${PWD} | 33 | THIS_DIR := ${PWD} |
34 | 34 | ||
35 | 35 | ||
36 | # to orginize your configs, having each machine save their configs | 36 | # to organize your configs, having each machine save their configs |
37 | # into a separate directly is useful. | 37 | # into a separate directly is useful. |
38 | CONFIG_DIR := ${THIS_DIR}/configs/${MACHINE} | 38 | CONFIG_DIR := ${THIS_DIR}/configs/${MACHINE} |
39 | 39 | ||
diff --git a/tools/testing/ktest/examples/include/tests.conf b/tools/testing/ktest/examples/include/tests.conf index 4fdb811bd810..60cedb1a1154 100644 --- a/tools/testing/ktest/examples/include/tests.conf +++ b/tools/testing/ktest/examples/include/tests.conf | |||
@@ -47,7 +47,7 @@ BUILD_NOCLEAN = 1 | |||
47 | # Build, install, boot and test with a randconfg 10 times. | 47 | # Build, install, boot and test with a randconfg 10 times. |
48 | # It is important that you have set MIN_CONFIG in the config | 48 | # It is important that you have set MIN_CONFIG in the config |
49 | # that includes this file otherwise it is likely that the | 49 | # that includes this file otherwise it is likely that the |
50 | # randconfig will not have the neccessary configs needed to | 50 | # randconfig will not have the necessary configs needed to |
51 | # boot your box. This version of the test requires a min | 51 | # boot your box. This version of the test requires a min |
52 | # config that has enough to make sure the target has network | 52 | # config that has enough to make sure the target has network |
53 | # working. | 53 | # working. |
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 52b7959cd513..c05bcd293d8c 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl | |||
@@ -840,7 +840,9 @@ sub __read_config { | |||
840 | 840 | ||
841 | if ($rest =~ /\sIF\s+(.*)/) { | 841 | if ($rest =~ /\sIF\s+(.*)/) { |
842 | # May be a ELSE IF section. | 842 | # May be a ELSE IF section. |
843 | if (!process_if($name, $1)) { | 843 | if (process_if($name, $1)) { |
844 | $if_set = 1; | ||
845 | } else { | ||
844 | $skip = 1; | 846 | $skip = 1; |
845 | } | 847 | } |
846 | $rest = ""; | 848 | $rest = ""; |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 85baf11e2acd..43480149119e 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug | 1 | TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug epoll |
2 | 2 | ||
3 | all: | 3 | all: |
4 | for TARGET in $(TARGETS); do \ | 4 | for TARGET in $(TARGETS); do \ |
diff --git a/tools/testing/selftests/epoll/Makefile b/tools/testing/selftests/epoll/Makefile new file mode 100644 index 000000000000..19806ed62f50 --- /dev/null +++ b/tools/testing/selftests/epoll/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # Makefile for epoll selftests | ||
2 | |||
3 | all: test_epoll | ||
4 | %: %.c | ||
5 | gcc -pthread -g -o $@ $^ | ||
6 | |||
7 | run_tests: all | ||
8 | ./test_epoll | ||
9 | |||
10 | clean: | ||
11 | $(RM) test_epoll | ||
diff --git a/tools/testing/selftests/epoll/test_epoll.c b/tools/testing/selftests/epoll/test_epoll.c new file mode 100644 index 000000000000..e0fcff1e8331 --- /dev/null +++ b/tools/testing/selftests/epoll/test_epoll.c | |||
@@ -0,0 +1,344 @@ | |||
1 | /* | ||
2 | * tools/testing/selftests/epoll/test_epoll.c | ||
3 | * | ||
4 | * Copyright 2012 Adobe Systems Incorporated | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * Paton J. Lewis <palewis@adobe.com> | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <errno.h> | ||
16 | #include <fcntl.h> | ||
17 | #include <pthread.h> | ||
18 | #include <stdio.h> | ||
19 | #include <stdlib.h> | ||
20 | #include <unistd.h> | ||
21 | #include <sys/epoll.h> | ||
22 | #include <sys/socket.h> | ||
23 | |||
24 | /* | ||
25 | * A pointer to an epoll_item_private structure will be stored in the epoll | ||
26 | * item's event structure so that we can get access to the epoll_item_private | ||
27 | * data after calling epoll_wait: | ||
28 | */ | ||
29 | struct epoll_item_private { | ||
30 | int index; /* Position of this struct within the epoll_items array. */ | ||
31 | int fd; | ||
32 | uint32_t events; | ||
33 | pthread_mutex_t mutex; /* Guards the following variables... */ | ||
34 | int stop; | ||
35 | int status; /* Stores any error encountered while handling item. */ | ||
36 | /* The following variable allows us to test whether we have encountered | ||
37 | a problem while attempting to cancel and delete the associated | ||
38 | event. When the test program exits, 'deleted' should be exactly | ||
39 | one. If it is greater than one, then the failed test reflects a real | ||
40 | world situation where we would have tried to access the epoll item's | ||
41 | private data after deleting it: */ | ||
42 | int deleted; | ||
43 | }; | ||
44 | |||
45 | struct epoll_item_private *epoll_items; | ||
46 | |||
47 | /* | ||
48 | * Delete the specified item from the epoll set. In a real-world secneario this | ||
49 | * is where we would free the associated data structure, but in this testing | ||
50 | * environment we retain the structure so that we can test for double-deletion: | ||
51 | */ | ||
52 | void delete_item(int index) | ||
53 | { | ||
54 | __sync_fetch_and_add(&epoll_items[index].deleted, 1); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * A pointer to a read_thread_data structure will be passed as the argument to | ||
59 | * each read thread: | ||
60 | */ | ||
61 | struct read_thread_data { | ||
62 | int stop; | ||
63 | int status; /* Indicates any error encountered by the read thread. */ | ||
64 | int epoll_set; | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * The function executed by the read threads: | ||
69 | */ | ||
70 | void *read_thread_function(void *function_data) | ||
71 | { | ||
72 | struct read_thread_data *thread_data = | ||
73 | (struct read_thread_data *)function_data; | ||
74 | struct epoll_event event_data; | ||
75 | struct epoll_item_private *item_data; | ||
76 | char socket_data; | ||
77 | |||
78 | /* Handle events until we encounter an error or this thread's 'stop' | ||
79 | condition is set: */ | ||
80 | while (1) { | ||
81 | int result = epoll_wait(thread_data->epoll_set, | ||
82 | &event_data, | ||
83 | 1, /* Number of desired events */ | ||
84 | 1000); /* Timeout in ms */ | ||
85 | if (result < 0) { | ||
86 | /* Breakpoints signal all threads. Ignore that while | ||
87 | debugging: */ | ||
88 | if (errno == EINTR) | ||
89 | continue; | ||
90 | thread_data->status = errno; | ||
91 | return 0; | ||
92 | } else if (thread_data->stop) | ||
93 | return 0; | ||
94 | else if (result == 0) /* Timeout */ | ||
95 | continue; | ||
96 | |||
97 | /* We need the mutex here because checking for the stop | ||
98 | condition and re-enabling the epoll item need to be done | ||
99 | together as one atomic operation when EPOLL_CTL_DISABLE is | ||
100 | available: */ | ||
101 | item_data = (struct epoll_item_private *)event_data.data.ptr; | ||
102 | pthread_mutex_lock(&item_data->mutex); | ||
103 | |||
104 | /* Remove the item from the epoll set if we want to stop | ||
105 | handling that event: */ | ||
106 | if (item_data->stop) | ||
107 | delete_item(item_data->index); | ||
108 | else { | ||
109 | /* Clear the data that was written to the other end of | ||
110 | our non-blocking socket: */ | ||
111 | do { | ||
112 | if (read(item_data->fd, &socket_data, 1) < 1) { | ||
113 | if ((errno == EAGAIN) || | ||
114 | (errno == EWOULDBLOCK)) | ||
115 | break; | ||
116 | else | ||
117 | goto error_unlock; | ||
118 | } | ||
119 | } while (item_data->events & EPOLLET); | ||
120 | |||
121 | /* The item was one-shot, so re-enable it: */ | ||
122 | event_data.events = item_data->events; | ||
123 | if (epoll_ctl(thread_data->epoll_set, | ||
124 | EPOLL_CTL_MOD, | ||
125 | item_data->fd, | ||
126 | &event_data) < 0) | ||
127 | goto error_unlock; | ||
128 | } | ||
129 | |||
130 | pthread_mutex_unlock(&item_data->mutex); | ||
131 | } | ||
132 | |||
133 | error_unlock: | ||
134 | thread_data->status = item_data->status = errno; | ||
135 | pthread_mutex_unlock(&item_data->mutex); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * A pointer to a write_thread_data structure will be passed as the argument to | ||
141 | * the write thread: | ||
142 | */ | ||
143 | struct write_thread_data { | ||
144 | int stop; | ||
145 | int status; /* Indicates any error encountered by the write thread. */ | ||
146 | int n_fds; | ||
147 | int *fds; | ||
148 | }; | ||
149 | |||
150 | /* | ||
151 | * The function executed by the write thread. It writes a single byte to each | ||
152 | * socket in turn until the stop condition for this thread is set. If writing to | ||
153 | * a socket would block (i.e. errno was EAGAIN), we leave that socket alone for | ||
154 | * the moment and just move on to the next socket in the list. We don't care | ||
155 | * about the order in which we deliver events to the epoll set. In fact we don't | ||
156 | * care about the data we're writing to the pipes at all; we just want to | ||
157 | * trigger epoll events: | ||
158 | */ | ||
159 | void *write_thread_function(void *function_data) | ||
160 | { | ||
161 | const char data = 'X'; | ||
162 | int index; | ||
163 | struct write_thread_data *thread_data = | ||
164 | (struct write_thread_data *)function_data; | ||
165 | while (!write_thread_data->stop) | ||
166 | for (index = 0; | ||
167 | !thread_data->stop && (index < thread_data->n_fds); | ||
168 | ++index) | ||
169 | if ((write(thread_data->fds[index], &data, 1) < 1) && | ||
170 | (errno != EAGAIN) && | ||
171 | (errno != EWOULDBLOCK)) { | ||
172 | write_thread_data->status = errno; | ||
173 | return; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Arguments are currently ignored: | ||
179 | */ | ||
180 | int main(int argc, char **argv) | ||
181 | { | ||
182 | const int n_read_threads = 100; | ||
183 | const int n_epoll_items = 500; | ||
184 | int index; | ||
185 | int epoll_set = epoll_create1(0); | ||
186 | struct write_thread_data write_thread_data = { | ||
187 | 0, 0, n_epoll_items, malloc(n_epoll_items * sizeof(int)) | ||
188 | }; | ||
189 | struct read_thread_data *read_thread_data = | ||
190 | malloc(n_read_threads * sizeof(struct read_thread_data)); | ||
191 | pthread_t *read_threads = malloc(n_read_threads * sizeof(pthread_t)); | ||
192 | pthread_t write_thread; | ||
193 | |||
194 | printf("-----------------\n"); | ||
195 | printf("Runing test_epoll\n"); | ||
196 | printf("-----------------\n"); | ||
197 | |||
198 | epoll_items = malloc(n_epoll_items * sizeof(struct epoll_item_private)); | ||
199 | |||
200 | if (epoll_set < 0 || epoll_items == 0 || write_thread_data.fds == 0 || | ||
201 | read_thread_data == 0 || read_threads == 0) | ||
202 | goto error; | ||
203 | |||
204 | if (sysconf(_SC_NPROCESSORS_ONLN) < 2) { | ||
205 | printf("Error: please run this test on a multi-core system.\n"); | ||
206 | goto error; | ||
207 | } | ||
208 | |||
209 | /* Create the socket pairs and epoll items: */ | ||
210 | for (index = 0; index < n_epoll_items; ++index) { | ||
211 | int socket_pair[2]; | ||
212 | struct epoll_event event_data; | ||
213 | if (socketpair(AF_UNIX, | ||
214 | SOCK_STREAM | SOCK_NONBLOCK, | ||
215 | 0, | ||
216 | socket_pair) < 0) | ||
217 | goto error; | ||
218 | write_thread_data.fds[index] = socket_pair[0]; | ||
219 | epoll_items[index].index = index; | ||
220 | epoll_items[index].fd = socket_pair[1]; | ||
221 | if (pthread_mutex_init(&epoll_items[index].mutex, NULL) != 0) | ||
222 | goto error; | ||
223 | /* We always use EPOLLONESHOT because this test is currently | ||
224 | structured to demonstrate the need for EPOLL_CTL_DISABLE, | ||
225 | which only produces useful information in the EPOLLONESHOT | ||
226 | case (without EPOLLONESHOT, calling epoll_ctl with | ||
227 | EPOLL_CTL_DISABLE will never return EBUSY). If support for | ||
228 | testing events without EPOLLONESHOT is desired, it should | ||
229 | probably be implemented in a separate unit test. */ | ||
230 | epoll_items[index].events = EPOLLIN | EPOLLONESHOT; | ||
231 | if (index < n_epoll_items / 2) | ||
232 | epoll_items[index].events |= EPOLLET; | ||
233 | epoll_items[index].stop = 0; | ||
234 | epoll_items[index].status = 0; | ||
235 | epoll_items[index].deleted = 0; | ||
236 | event_data.events = epoll_items[index].events; | ||
237 | event_data.data.ptr = &epoll_items[index]; | ||
238 | if (epoll_ctl(epoll_set, | ||
239 | EPOLL_CTL_ADD, | ||
240 | epoll_items[index].fd, | ||
241 | &event_data) < 0) | ||
242 | goto error; | ||
243 | } | ||
244 | |||
245 | /* Create and start the read threads: */ | ||
246 | for (index = 0; index < n_read_threads; ++index) { | ||
247 | read_thread_data[index].stop = 0; | ||
248 | read_thread_data[index].status = 0; | ||
249 | read_thread_data[index].epoll_set = epoll_set; | ||
250 | if (pthread_create(&read_threads[index], | ||
251 | NULL, | ||
252 | read_thread_function, | ||
253 | &read_thread_data[index]) != 0) | ||
254 | goto error; | ||
255 | } | ||
256 | |||
257 | if (pthread_create(&write_thread, | ||
258 | NULL, | ||
259 | write_thread_function, | ||
260 | &write_thread_data) != 0) | ||
261 | goto error; | ||
262 | |||
263 | /* Cancel all event pollers: */ | ||
264 | #ifdef EPOLL_CTL_DISABLE | ||
265 | for (index = 0; index < n_epoll_items; ++index) { | ||
266 | pthread_mutex_lock(&epoll_items[index].mutex); | ||
267 | ++epoll_items[index].stop; | ||
268 | if (epoll_ctl(epoll_set, | ||
269 | EPOLL_CTL_DISABLE, | ||
270 | epoll_items[index].fd, | ||
271 | NULL) == 0) | ||
272 | delete_item(index); | ||
273 | else if (errno != EBUSY) { | ||
274 | pthread_mutex_unlock(&epoll_items[index].mutex); | ||
275 | goto error; | ||
276 | } | ||
277 | /* EBUSY means events were being handled; allow the other thread | ||
278 | to delete the item. */ | ||
279 | pthread_mutex_unlock(&epoll_items[index].mutex); | ||
280 | } | ||
281 | #else | ||
282 | for (index = 0; index < n_epoll_items; ++index) { | ||
283 | pthread_mutex_lock(&epoll_items[index].mutex); | ||
284 | ++epoll_items[index].stop; | ||
285 | pthread_mutex_unlock(&epoll_items[index].mutex); | ||
286 | /* Wait in case a thread running read_thread_function is | ||
287 | currently executing code between epoll_wait and | ||
288 | pthread_mutex_lock with this item. Note that a longer delay | ||
289 | would make double-deletion less likely (at the expense of | ||
290 | performance), but there is no guarantee that any delay would | ||
291 | ever be sufficient. Note also that we delete all event | ||
292 | pollers at once for testing purposes, but in a real-world | ||
293 | environment we are likely to want to be able to cancel event | ||
294 | pollers at arbitrary times. Therefore we can't improve this | ||
295 | situation by just splitting this loop into two loops | ||
296 | (i.e. signal 'stop' for all items, sleep, and then delete all | ||
297 | items). We also can't fix the problem via EPOLL_CTL_DEL | ||
298 | because that command can't prevent the case where some other | ||
299 | thread is executing read_thread_function within the region | ||
300 | mentioned above: */ | ||
301 | usleep(1); | ||
302 | pthread_mutex_lock(&epoll_items[index].mutex); | ||
303 | if (!epoll_items[index].deleted) | ||
304 | delete_item(index); | ||
305 | pthread_mutex_unlock(&epoll_items[index].mutex); | ||
306 | } | ||
307 | #endif | ||
308 | |||
309 | /* Shut down the read threads: */ | ||
310 | for (index = 0; index < n_read_threads; ++index) | ||
311 | __sync_fetch_and_add(&read_thread_data[index].stop, 1); | ||
312 | for (index = 0; index < n_read_threads; ++index) { | ||
313 | if (pthread_join(read_threads[index], NULL) != 0) | ||
314 | goto error; | ||
315 | if (read_thread_data[index].status) | ||
316 | goto error; | ||
317 | } | ||
318 | |||
319 | /* Shut down the write thread: */ | ||
320 | __sync_fetch_and_add(&write_thread_data.stop, 1); | ||
321 | if ((pthread_join(write_thread, NULL) != 0) || write_thread_data.status) | ||
322 | goto error; | ||
323 | |||
324 | /* Check for final error conditions: */ | ||
325 | for (index = 0; index < n_epoll_items; ++index) { | ||
326 | if (epoll_items[index].status != 0) | ||
327 | goto error; | ||
328 | if (pthread_mutex_destroy(&epoll_items[index].mutex) < 0) | ||
329 | goto error; | ||
330 | } | ||
331 | for (index = 0; index < n_epoll_items; ++index) | ||
332 | if (epoll_items[index].deleted != 1) { | ||
333 | printf("Error: item data deleted %1d times.\n", | ||
334 | epoll_items[index].deleted); | ||
335 | goto error; | ||
336 | } | ||
337 | |||
338 | printf("[PASS]\n"); | ||
339 | return 0; | ||
340 | |||
341 | error: | ||
342 | printf("[FAIL]\n"); | ||
343 | return errno; | ||
344 | } | ||
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 8b40bd5e5cc2..4c53cae6c273 100644 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests | |||
@@ -36,7 +36,7 @@ mkdir $mnt | |||
36 | mount -t hugetlbfs none $mnt | 36 | mount -t hugetlbfs none $mnt |
37 | 37 | ||
38 | echo "--------------------" | 38 | echo "--------------------" |
39 | echo "runing hugepage-mmap" | 39 | echo "running hugepage-mmap" |
40 | echo "--------------------" | 40 | echo "--------------------" |
41 | ./hugepage-mmap | 41 | ./hugepage-mmap |
42 | if [ $? -ne 0 ]; then | 42 | if [ $? -ne 0 ]; then |
@@ -50,7 +50,7 @@ shmall=`cat /proc/sys/kernel/shmall` | |||
50 | echo 268435456 > /proc/sys/kernel/shmmax | 50 | echo 268435456 > /proc/sys/kernel/shmmax |
51 | echo 4194304 > /proc/sys/kernel/shmall | 51 | echo 4194304 > /proc/sys/kernel/shmall |
52 | echo "--------------------" | 52 | echo "--------------------" |
53 | echo "runing hugepage-shm" | 53 | echo "running hugepage-shm" |
54 | echo "--------------------" | 54 | echo "--------------------" |
55 | ./hugepage-shm | 55 | ./hugepage-shm |
56 | if [ $? -ne 0 ]; then | 56 | if [ $? -ne 0 ]; then |
@@ -62,7 +62,7 @@ echo $shmmax > /proc/sys/kernel/shmmax | |||
62 | echo $shmall > /proc/sys/kernel/shmall | 62 | echo $shmall > /proc/sys/kernel/shmall |
63 | 63 | ||
64 | echo "--------------------" | 64 | echo "--------------------" |
65 | echo "runing map_hugetlb" | 65 | echo "running map_hugetlb" |
66 | echo "--------------------" | 66 | echo "--------------------" |
67 | ./map_hugetlb | 67 | ./map_hugetlb |
68 | if [ $? -ne 0 ]; then | 68 | if [ $? -ne 0 ]; then |
diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c index b0adb2710c02..68d0734b2081 100644 --- a/tools/usb/testusb.c +++ b/tools/usb/testusb.c | |||
@@ -253,9 +253,6 @@ static int find_testdev(const char *name, const struct stat *sb, int flag) | |||
253 | 253 | ||
254 | if (flag != FTW_F) | 254 | if (flag != FTW_F) |
255 | return 0; | 255 | return 0; |
256 | /* ignore /proc/bus/usb/{devices,drivers} */ | ||
257 | if (strrchr(name, '/')[1] == 'd') | ||
258 | return 0; | ||
259 | 256 | ||
260 | fd = fopen(name, "rb"); | 257 | fd = fopen(name, "rb"); |
261 | if (!fd) { | 258 | if (!fd) { |
@@ -356,28 +353,8 @@ restart: | |||
356 | 353 | ||
357 | static const char *usbfs_dir_find(void) | 354 | static const char *usbfs_dir_find(void) |
358 | { | 355 | { |
359 | static char usbfs_path_0[] = "/dev/usb/devices"; | ||
360 | static char usbfs_path_1[] = "/proc/bus/usb/devices"; | ||
361 | static char udev_usb_path[] = "/dev/bus/usb"; | 356 | static char udev_usb_path[] = "/dev/bus/usb"; |
362 | 357 | ||
363 | static char *const usbfs_paths[] = { | ||
364 | usbfs_path_0, usbfs_path_1 | ||
365 | }; | ||
366 | |||
367 | static char *const * | ||
368 | end = usbfs_paths + sizeof usbfs_paths / sizeof *usbfs_paths; | ||
369 | |||
370 | char *const *it = usbfs_paths; | ||
371 | do { | ||
372 | int fd = open(*it, O_RDONLY); | ||
373 | close(fd); | ||
374 | if (fd >= 0) { | ||
375 | strrchr(*it, '/')[0] = '\0'; | ||
376 | return *it; | ||
377 | } | ||
378 | } while (++it != end); | ||
379 | |||
380 | /* real device-nodes managed by udev */ | ||
381 | if (access(udev_usb_path, F_OK) == 0) | 358 | if (access(udev_usb_path, F_OK) == 0) |
382 | return udev_usb_path; | 359 | return udev_usb_path; |
383 | 360 | ||
@@ -489,7 +466,7 @@ usage: | |||
489 | goto usage; | 466 | goto usage; |
490 | if (!all && !device) { | 467 | if (!all && !device) { |
491 | fprintf (stderr, "must specify '-a' or '-D dev', " | 468 | fprintf (stderr, "must specify '-a' or '-D dev', " |
492 | "or DEVICE=/proc/bus/usb/BBB/DDD in env\n"); | 469 | "or DEVICE=/dev/bus/usb/BBB/DDD in env\n"); |
493 | goto usage; | 470 | goto usage; |
494 | } | 471 | } |
495 | 472 | ||