summaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 20:06:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 20:06:21 -0400
commit772c1d06bd402f7ee72c61a18c2db74cd74b6758 (patch)
treee362fc7e158b3580d810a26189ecf91ec8a4f141 /tools/perf/builtin-stat.c
parentc7eba51cfdf9cd1ca7ed4201b30be8b2bef15ff5 (diff)
parente336b4027775cb458dc713745e526fa1a1996b2a (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "Kernel side changes: - Improved kbprobes robustness - Intel PEBS support for PT hardware tracing - Other Intel PT improvements: high order pages memory footprint reduction and various related cleanups - Misc cleanups The perf tooling side has been very busy in this cycle, with over 300 commits. This is an incomplete high-level summary of the many improvements done by over 30 developers: - Lots of updates to the following tools: 'perf c2c' 'perf config' 'perf record' 'perf report' 'perf script' 'perf test' 'perf top' 'perf trace' - Updates to libperf and libtraceevent, and a consolidation of the proliferation of x86 instruction decoder libraries. - Vendor event updates for Intel and PowerPC CPUs, - Updates to hardware tracing tooling for ARM and Intel CPUs, - ... and lots of other changes and cleanups - see the shortlog and Git log for details" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (322 commits) kprobes: Prohibit probing on BUG() and WARN() address perf/x86: Make more stuff static x86, perf: Fix the dependency of the x86 insn decoder selftest objtool: Ignore intentional differences for the x86 insn decoder objtool: Update sync-check.sh from perf's check-headers.sh perf build: Ignore intentional differences for the x86 insn decoder perf intel-pt: Use shared x86 insn decoder perf intel-pt: Remove inat.c from build dependency list perf: Update .gitignore file objtool: Move x86 insn decoder to a common location perf metricgroup: Support multiple events for metricgroup perf metricgroup: Scale the metric result perf pmu: Change convert_scale from static to global perf symbols: Move mem_info and branch_info out of symbol.h perf auxtrace: Uninline functions that touch perf_session perf tools: Remove needless evlist.h include directives perf tools: Remove needless evlist.h include directives perf tools: Remove needless thread_map.h include directives perf tools: Remove needless thread.h include directives perf tools: Remove needless map.h include directives ...
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c146
1 files changed, 74 insertions, 72 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 352cf39d7c2f..7e17bf9f700a 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -40,8 +40,8 @@
40 * Jaswinder Singh Rajput <jaswinder@kernel.org> 40 * Jaswinder Singh Rajput <jaswinder@kernel.org>
41 */ 41 */
42 42
43#include "perf.h"
44#include "builtin.h" 43#include "builtin.h"
44#include "perf.h"
45#include "util/cgroup.h" 45#include "util/cgroup.h"
46#include <subcmd/parse-options.h> 46#include <subcmd/parse-options.h>
47#include "util/parse-events.h" 47#include "util/parse-events.h"
@@ -54,7 +54,6 @@
54#include "util/stat.h" 54#include "util/stat.h"
55#include "util/header.h" 55#include "util/header.h"
56#include "util/cpumap.h" 56#include "util/cpumap.h"
57#include "util/thread.h"
58#include "util/thread_map.h" 57#include "util/thread_map.h"
59#include "util/counts.h" 58#include "util/counts.h"
60#include "util/group.h" 59#include "util/group.h"
@@ -62,6 +61,8 @@
62#include "util/tool.h" 61#include "util/tool.h"
63#include "util/string2.h" 62#include "util/string2.h"
64#include "util/metricgroup.h" 63#include "util/metricgroup.h"
64#include "util/target.h"
65#include "util/time-utils.h"
65#include "util/top.h" 66#include "util/top.h"
66#include "asm/bug.h" 67#include "asm/bug.h"
67 68
@@ -83,6 +84,7 @@
83#include <sys/resource.h> 84#include <sys/resource.h>
84 85
85#include <linux/ctype.h> 86#include <linux/ctype.h>
87#include <perf/evlist.h>
86 88
87#define DEFAULT_SEPARATOR " " 89#define DEFAULT_SEPARATOR " "
88#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" 90#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
@@ -130,7 +132,7 @@ static const char *smi_cost_attrs = {
130 "}" 132 "}"
131}; 133};
132 134
133static struct perf_evlist *evsel_list; 135static struct evlist *evsel_list;
134 136
135static struct target target = { 137static struct target target = {
136 .uid = UINT_MAX, 138 .uid = UINT_MAX,
@@ -164,8 +166,8 @@ struct perf_stat {
164 u64 bytes_written; 166 u64 bytes_written;
165 struct perf_tool tool; 167 struct perf_tool tool;
166 bool maps_allocated; 168 bool maps_allocated;
167 struct cpu_map *cpus; 169 struct perf_cpu_map *cpus;
168 struct thread_map *threads; 170 struct perf_thread_map *threads;
169 enum aggr_mode aggr_mode; 171 enum aggr_mode aggr_mode;
170}; 172};
171 173
@@ -234,7 +236,7 @@ static int write_stat_round_event(u64 tm, u64 type)
234#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 236#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
235 237
236static int 238static int
237perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread, 239perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
238 struct perf_counts_values *count) 240 struct perf_counts_values *count)
239{ 241{
240 struct perf_sample_id *sid = SID(counter, cpu, thread); 242 struct perf_sample_id *sid = SID(counter, cpu, thread);
@@ -243,7 +245,7 @@ perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
243 process_synthesized_event, NULL); 245 process_synthesized_event, NULL);
244} 246}
245 247
246static int read_single_counter(struct perf_evsel *counter, int cpu, 248static int read_single_counter(struct evsel *counter, int cpu,
247 int thread, struct timespec *rs) 249 int thread, struct timespec *rs)
248{ 250{
249 if (counter->tool_event == PERF_TOOL_DURATION_TIME) { 251 if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
@@ -261,9 +263,9 @@ static int read_single_counter(struct perf_evsel *counter, int cpu,
261 * Read out the results of a single counter: 263 * Read out the results of a single counter:
262 * do not aggregate counts across CPUs in system-wide mode 264 * do not aggregate counts across CPUs in system-wide mode
263 */ 265 */
264static int read_counter(struct perf_evsel *counter, struct timespec *rs) 266static int read_counter(struct evsel *counter, struct timespec *rs)
265{ 267{
266 int nthreads = thread_map__nr(evsel_list->threads); 268 int nthreads = perf_thread_map__nr(evsel_list->core.threads);
267 int ncpus, cpu, thread; 269 int ncpus, cpu, thread;
268 270
269 if (target__has_cpu(&target) && !target__has_per_thread(&target)) 271 if (target__has_cpu(&target) && !target__has_per_thread(&target))
@@ -287,7 +289,7 @@ static int read_counter(struct perf_evsel *counter, struct timespec *rs)
287 * The leader's group read loads data into its group members 289 * The leader's group read loads data into its group members
288 * (via perf_evsel__read_counter) and sets threir count->loaded. 290 * (via perf_evsel__read_counter) and sets threir count->loaded.
289 */ 291 */
290 if (!count->loaded && 292 if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
291 read_single_counter(counter, cpu, thread, rs)) { 293 read_single_counter(counter, cpu, thread, rs)) {
292 counter->counts->scaled = -1; 294 counter->counts->scaled = -1;
293 perf_counts(counter->counts, cpu, thread)->ena = 0; 295 perf_counts(counter->counts, cpu, thread)->ena = 0;
@@ -295,7 +297,7 @@ static int read_counter(struct perf_evsel *counter, struct timespec *rs)
295 return -1; 297 return -1;
296 } 298 }
297 299
298 count->loaded = false; 300 perf_counts__set_loaded(counter->counts, cpu, thread, false);
299 301
300 if (STAT_RECORD) { 302 if (STAT_RECORD) {
301 if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { 303 if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
@@ -319,7 +321,7 @@ static int read_counter(struct perf_evsel *counter, struct timespec *rs)
319 321
320static void read_counters(struct timespec *rs) 322static void read_counters(struct timespec *rs)
321{ 323{
322 struct perf_evsel *counter; 324 struct evsel *counter;
323 int ret; 325 int ret;
324 326
325 evlist__for_each_entry(evsel_list, counter) { 327 evlist__for_each_entry(evsel_list, counter) {
@@ -362,7 +364,7 @@ static void enable_counters(void)
362 * - we have initial delay configured 364 * - we have initial delay configured
363 */ 365 */
364 if (!target__none(&target) || stat_config.initial_delay) 366 if (!target__none(&target) || stat_config.initial_delay)
365 perf_evlist__enable(evsel_list); 367 evlist__enable(evsel_list);
366} 368}
367 369
368static void disable_counters(void) 370static void disable_counters(void)
@@ -373,7 +375,7 @@ static void disable_counters(void)
373 * from counting before reading their constituent counters. 375 * from counting before reading their constituent counters.
374 */ 376 */
375 if (!target__none(&target)) 377 if (!target__none(&target))
376 perf_evlist__disable(evsel_list); 378 evlist__disable(evsel_list);
377} 379}
378 380
379static volatile int workload_exec_errno; 381static volatile int workload_exec_errno;
@@ -389,13 +391,13 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
389 workload_exec_errno = info->si_value.sival_int; 391 workload_exec_errno = info->si_value.sival_int;
390} 392}
391 393
392static bool perf_evsel__should_store_id(struct perf_evsel *counter) 394static bool perf_evsel__should_store_id(struct evsel *counter)
393{ 395{
394 return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID; 396 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
395} 397}
396 398
397static bool is_target_alive(struct target *_target, 399static bool is_target_alive(struct target *_target,
398 struct thread_map *threads) 400 struct perf_thread_map *threads)
399{ 401{
400 struct stat st; 402 struct stat st;
401 int i; 403 int i;
@@ -423,7 +425,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
423 int timeout = stat_config.timeout; 425 int timeout = stat_config.timeout;
424 char msg[BUFSIZ]; 426 char msg[BUFSIZ];
425 unsigned long long t0, t1; 427 unsigned long long t0, t1;
426 struct perf_evsel *counter; 428 struct evsel *counter;
427 struct timespec ts; 429 struct timespec ts;
428 size_t l; 430 size_t l;
429 int status = 0; 431 int status = 0;
@@ -478,22 +480,22 @@ try_again:
478 counter->supported = false; 480 counter->supported = false;
479 481
480 if ((counter->leader != counter) || 482 if ((counter->leader != counter) ||
481 !(counter->leader->nr_members > 1)) 483 !(counter->leader->core.nr_members > 1))
482 continue; 484 continue;
483 } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { 485 } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
484 if (verbose > 0) 486 if (verbose > 0)
485 ui__warning("%s\n", msg); 487 ui__warning("%s\n", msg);
486 goto try_again; 488 goto try_again;
487 } else if (target__has_per_thread(&target) && 489 } else if (target__has_per_thread(&target) &&
488 evsel_list->threads && 490 evsel_list->core.threads &&
489 evsel_list->threads->err_thread != -1) { 491 evsel_list->core.threads->err_thread != -1) {
490 /* 492 /*
491 * For global --per-thread case, skip current 493 * For global --per-thread case, skip current
492 * error thread. 494 * error thread.
493 */ 495 */
494 if (!thread_map__remove(evsel_list->threads, 496 if (!thread_map__remove(evsel_list->core.threads,
495 evsel_list->threads->err_thread)) { 497 evsel_list->core.threads->err_thread)) {
496 evsel_list->threads->err_thread = -1; 498 evsel_list->core.threads->err_thread = -1;
497 goto try_again; 499 goto try_again;
498 } 500 }
499 } 501 }
@@ -579,7 +581,7 @@ try_again:
579 enable_counters(); 581 enable_counters();
580 while (!done) { 582 while (!done) {
581 nanosleep(&ts, NULL); 583 nanosleep(&ts, NULL);
582 if (!is_target_alive(&target, evsel_list->threads)) 584 if (!is_target_alive(&target, evsel_list->core.threads))
583 break; 585 break;
584 if (timeout) 586 if (timeout)
585 break; 587 break;
@@ -613,7 +615,7 @@ try_again:
613 * later the evsel_list will be closed after. 615 * later the evsel_list will be closed after.
614 */ 616 */
615 if (!STAT_RECORD) 617 if (!STAT_RECORD)
616 perf_evlist__close(evsel_list); 618 evlist__close(evsel_list);
617 619
618 return WEXITSTATUS(status); 620 return WEXITSTATUS(status);
619} 621}
@@ -803,24 +805,24 @@ static struct option stat_options[] = {
803}; 805};
804 806
805static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, 807static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
806 struct cpu_map *map, int cpu) 808 struct perf_cpu_map *map, int cpu)
807{ 809{
808 return cpu_map__get_socket(map, cpu, NULL); 810 return cpu_map__get_socket(map, cpu, NULL);
809} 811}
810 812
811static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused, 813static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
812 struct cpu_map *map, int cpu) 814 struct perf_cpu_map *map, int cpu)
813{ 815{
814 return cpu_map__get_die(map, cpu, NULL); 816 return cpu_map__get_die(map, cpu, NULL);
815} 817}
816 818
817static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused, 819static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
818 struct cpu_map *map, int cpu) 820 struct perf_cpu_map *map, int cpu)
819{ 821{
820 return cpu_map__get_core(map, cpu, NULL); 822 return cpu_map__get_core(map, cpu, NULL);
821} 823}
822 824
823static int cpu_map__get_max(struct cpu_map *map) 825static int cpu_map__get_max(struct perf_cpu_map *map)
824{ 826{
825 int i, max = -1; 827 int i, max = -1;
826 828
@@ -833,7 +835,7 @@ static int cpu_map__get_max(struct cpu_map *map)
833} 835}
834 836
835static int perf_stat__get_aggr(struct perf_stat_config *config, 837static int perf_stat__get_aggr(struct perf_stat_config *config,
836 aggr_get_id_t get_id, struct cpu_map *map, int idx) 838 aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
837{ 839{
838 int cpu; 840 int cpu;
839 841
@@ -849,26 +851,26 @@ static int perf_stat__get_aggr(struct perf_stat_config *config,
849} 851}
850 852
851static int perf_stat__get_socket_cached(struct perf_stat_config *config, 853static int perf_stat__get_socket_cached(struct perf_stat_config *config,
852 struct cpu_map *map, int idx) 854 struct perf_cpu_map *map, int idx)
853{ 855{
854 return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); 856 return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
855} 857}
856 858
857static int perf_stat__get_die_cached(struct perf_stat_config *config, 859static int perf_stat__get_die_cached(struct perf_stat_config *config,
858 struct cpu_map *map, int idx) 860 struct perf_cpu_map *map, int idx)
859{ 861{
860 return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); 862 return perf_stat__get_aggr(config, perf_stat__get_die, map, idx);
861} 863}
862 864
863static int perf_stat__get_core_cached(struct perf_stat_config *config, 865static int perf_stat__get_core_cached(struct perf_stat_config *config,
864 struct cpu_map *map, int idx) 866 struct perf_cpu_map *map, int idx)
865{ 867{
866 return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); 868 return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
867} 869}
868 870
869static bool term_percore_set(void) 871static bool term_percore_set(void)
870{ 872{
871 struct perf_evsel *counter; 873 struct evsel *counter;
872 874
873 evlist__for_each_entry(evsel_list, counter) { 875 evlist__for_each_entry(evsel_list, counter) {
874 if (counter->percore) 876 if (counter->percore)
@@ -884,21 +886,21 @@ static int perf_stat_init_aggr_mode(void)
884 886
885 switch (stat_config.aggr_mode) { 887 switch (stat_config.aggr_mode) {
886 case AGGR_SOCKET: 888 case AGGR_SOCKET:
887 if (cpu_map__build_socket_map(evsel_list->cpus, &stat_config.aggr_map)) { 889 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
888 perror("cannot build socket map"); 890 perror("cannot build socket map");
889 return -1; 891 return -1;
890 } 892 }
891 stat_config.aggr_get_id = perf_stat__get_socket_cached; 893 stat_config.aggr_get_id = perf_stat__get_socket_cached;
892 break; 894 break;
893 case AGGR_DIE: 895 case AGGR_DIE:
894 if (cpu_map__build_die_map(evsel_list->cpus, &stat_config.aggr_map)) { 896 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
895 perror("cannot build die map"); 897 perror("cannot build die map");
896 return -1; 898 return -1;
897 } 899 }
898 stat_config.aggr_get_id = perf_stat__get_die_cached; 900 stat_config.aggr_get_id = perf_stat__get_die_cached;
899 break; 901 break;
900 case AGGR_CORE: 902 case AGGR_CORE:
901 if (cpu_map__build_core_map(evsel_list->cpus, &stat_config.aggr_map)) { 903 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
902 perror("cannot build core map"); 904 perror("cannot build core map");
903 return -1; 905 return -1;
904 } 906 }
@@ -906,7 +908,7 @@ static int perf_stat_init_aggr_mode(void)
906 break; 908 break;
907 case AGGR_NONE: 909 case AGGR_NONE:
908 if (term_percore_set()) { 910 if (term_percore_set()) {
909 if (cpu_map__build_core_map(evsel_list->cpus, 911 if (cpu_map__build_core_map(evsel_list->core.cpus,
910 &stat_config.aggr_map)) { 912 &stat_config.aggr_map)) {
911 perror("cannot build core map"); 913 perror("cannot build core map");
912 return -1; 914 return -1;
@@ -926,20 +928,20 @@ static int perf_stat_init_aggr_mode(void)
926 * taking the highest cpu number to be the size of 928 * taking the highest cpu number to be the size of
927 * the aggregation translate cpumap. 929 * the aggregation translate cpumap.
928 */ 930 */
929 nr = cpu_map__get_max(evsel_list->cpus); 931 nr = cpu_map__get_max(evsel_list->core.cpus);
930 stat_config.cpus_aggr_map = cpu_map__empty_new(nr + 1); 932 stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1);
931 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 933 return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
932} 934}
933 935
934static void perf_stat__exit_aggr_mode(void) 936static void perf_stat__exit_aggr_mode(void)
935{ 937{
936 cpu_map__put(stat_config.aggr_map); 938 perf_cpu_map__put(stat_config.aggr_map);
937 cpu_map__put(stat_config.cpus_aggr_map); 939 perf_cpu_map__put(stat_config.cpus_aggr_map);
938 stat_config.aggr_map = NULL; 940 stat_config.aggr_map = NULL;
939 stat_config.cpus_aggr_map = NULL; 941 stat_config.cpus_aggr_map = NULL;
940} 942}
941 943
942static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx) 944static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx)
943{ 945{
944 int cpu; 946 int cpu;
945 947
@@ -954,7 +956,7 @@ static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, i
954 return cpu; 956 return cpu;
955} 957}
956 958
957static int perf_env__get_socket(struct cpu_map *map, int idx, void *data) 959static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data)
958{ 960{
959 struct perf_env *env = data; 961 struct perf_env *env = data;
960 int cpu = perf_env__get_cpu(env, map, idx); 962 int cpu = perf_env__get_cpu(env, map, idx);
@@ -962,7 +964,7 @@ static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
962 return cpu == -1 ? -1 : env->cpu[cpu].socket_id; 964 return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
963} 965}
964 966
965static int perf_env__get_die(struct cpu_map *map, int idx, void *data) 967static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data)
966{ 968{
967 struct perf_env *env = data; 969 struct perf_env *env = data;
968 int die_id = -1, cpu = perf_env__get_cpu(env, map, idx); 970 int die_id = -1, cpu = perf_env__get_cpu(env, map, idx);
@@ -986,7 +988,7 @@ static int perf_env__get_die(struct cpu_map *map, int idx, void *data)
986 return die_id; 988 return die_id;
987} 989}
988 990
989static int perf_env__get_core(struct cpu_map *map, int idx, void *data) 991static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data)
990{ 992{
991 struct perf_env *env = data; 993 struct perf_env *env = data;
992 int core = -1, cpu = perf_env__get_cpu(env, map, idx); 994 int core = -1, cpu = perf_env__get_cpu(env, map, idx);
@@ -1016,37 +1018,37 @@ static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
1016 return core; 1018 return core;
1017} 1019}
1018 1020
1019static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus, 1021static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus,
1020 struct cpu_map **sockp) 1022 struct perf_cpu_map **sockp)
1021{ 1023{
1022 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); 1024 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
1023} 1025}
1024 1026
1025static int perf_env__build_die_map(struct perf_env *env, struct cpu_map *cpus, 1027static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus,
1026 struct cpu_map **diep) 1028 struct perf_cpu_map **diep)
1027{ 1029{
1028 return cpu_map__build_map(cpus, diep, perf_env__get_die, env); 1030 return cpu_map__build_map(cpus, diep, perf_env__get_die, env);
1029} 1031}
1030 1032
1031static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus, 1033static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus,
1032 struct cpu_map **corep) 1034 struct perf_cpu_map **corep)
1033{ 1035{
1034 return cpu_map__build_map(cpus, corep, perf_env__get_core, env); 1036 return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
1035} 1037}
1036 1038
1037static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, 1039static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
1038 struct cpu_map *map, int idx) 1040 struct perf_cpu_map *map, int idx)
1039{ 1041{
1040 return perf_env__get_socket(map, idx, &perf_stat.session->header.env); 1042 return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
1041} 1043}
1042static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, 1044static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
1043 struct cpu_map *map, int idx) 1045 struct perf_cpu_map *map, int idx)
1044{ 1046{
1045 return perf_env__get_die(map, idx, &perf_stat.session->header.env); 1047 return perf_env__get_die(map, idx, &perf_stat.session->header.env);
1046} 1048}
1047 1049
1048static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, 1050static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
1049 struct cpu_map *map, int idx) 1051 struct perf_cpu_map *map, int idx)
1050{ 1052{
1051 return perf_env__get_core(map, idx, &perf_stat.session->header.env); 1053 return perf_env__get_core(map, idx, &perf_stat.session->header.env);
1052} 1054}
@@ -1057,21 +1059,21 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
1057 1059
1058 switch (stat_config.aggr_mode) { 1060 switch (stat_config.aggr_mode) {
1059 case AGGR_SOCKET: 1061 case AGGR_SOCKET:
1060 if (perf_env__build_socket_map(env, evsel_list->cpus, &stat_config.aggr_map)) { 1062 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1061 perror("cannot build socket map"); 1063 perror("cannot build socket map");
1062 return -1; 1064 return -1;
1063 } 1065 }
1064 stat_config.aggr_get_id = perf_stat__get_socket_file; 1066 stat_config.aggr_get_id = perf_stat__get_socket_file;
1065 break; 1067 break;
1066 case AGGR_DIE: 1068 case AGGR_DIE:
1067 if (perf_env__build_die_map(env, evsel_list->cpus, &stat_config.aggr_map)) { 1069 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1068 perror("cannot build die map"); 1070 perror("cannot build die map");
1069 return -1; 1071 return -1;
1070 } 1072 }
1071 stat_config.aggr_get_id = perf_stat__get_die_file; 1073 stat_config.aggr_get_id = perf_stat__get_die_file;
1072 break; 1074 break;
1073 case AGGR_CORE: 1075 case AGGR_CORE:
1074 if (perf_env__build_core_map(env, evsel_list->cpus, &stat_config.aggr_map)) { 1076 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1075 perror("cannot build core map"); 1077 perror("cannot build core map");
1076 return -1; 1078 return -1;
1077 } 1079 }
@@ -1366,7 +1368,7 @@ static int add_default_attributes(void)
1366 free(str); 1368 free(str);
1367 } 1369 }
1368 1370
1369 if (!evsel_list->nr_entries) { 1371 if (!evsel_list->core.nr_entries) {
1370 if (target__has_cpu(&target)) 1372 if (target__has_cpu(&target))
1371 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1373 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
1372 1374
@@ -1461,8 +1463,8 @@ static int __cmd_record(int argc, const char **argv)
1461static int process_stat_round_event(struct perf_session *session, 1463static int process_stat_round_event(struct perf_session *session,
1462 union perf_event *event) 1464 union perf_event *event)
1463{ 1465{
1464 struct stat_round_event *stat_round = &event->stat_round; 1466 struct perf_record_stat_round *stat_round = &event->stat_round;
1465 struct perf_evsel *counter; 1467 struct evsel *counter;
1466 struct timespec tsh, *ts = NULL; 1468 struct timespec tsh, *ts = NULL;
1467 const char **argv = session->header.env.cmdline_argv; 1469 const char **argv = session->header.env.cmdline_argv;
1468 int argc = session->header.env.nr_cmdline; 1470 int argc = session->header.env.nr_cmdline;
@@ -1492,7 +1494,7 @@ int process_stat_config_event(struct perf_session *session,
1492 1494
1493 perf_event__read_stat_config(&stat_config, &event->stat_config); 1495 perf_event__read_stat_config(&stat_config, &event->stat_config);
1494 1496
1495 if (cpu_map__empty(st->cpus)) { 1497 if (perf_cpu_map__empty(st->cpus)) {
1496 if (st->aggr_mode != AGGR_UNSET) 1498 if (st->aggr_mode != AGGR_UNSET)
1497 pr_warning("warning: processing task data, aggregation mode not set\n"); 1499 pr_warning("warning: processing task data, aggregation mode not set\n");
1498 return 0; 1500 return 0;
@@ -1517,7 +1519,7 @@ static int set_maps(struct perf_stat *st)
1517 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n")) 1519 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
1518 return -EINVAL; 1520 return -EINVAL;
1519 1521
1520 perf_evlist__set_maps(evsel_list, st->cpus, st->threads); 1522 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
1521 1523
1522 if (perf_evlist__alloc_stats(evsel_list, true)) 1524 if (perf_evlist__alloc_stats(evsel_list, true))
1523 return -ENOMEM; 1525 return -ENOMEM;
@@ -1551,7 +1553,7 @@ int process_cpu_map_event(struct perf_session *session,
1551{ 1553{
1552 struct perf_tool *tool = session->tool; 1554 struct perf_tool *tool = session->tool;
1553 struct perf_stat *st = container_of(tool, struct perf_stat, tool); 1555 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1554 struct cpu_map *cpus; 1556 struct perf_cpu_map *cpus;
1555 1557
1556 if (st->cpus) { 1558 if (st->cpus) {
1557 pr_warning("Extra cpu map event, ignoring.\n"); 1559 pr_warning("Extra cpu map event, ignoring.\n");
@@ -1676,14 +1678,14 @@ static void setup_system_wide(int forks)
1676 if (!forks) 1678 if (!forks)
1677 target.system_wide = true; 1679 target.system_wide = true;
1678 else { 1680 else {
1679 struct perf_evsel *counter; 1681 struct evsel *counter;
1680 1682
1681 evlist__for_each_entry(evsel_list, counter) { 1683 evlist__for_each_entry(evsel_list, counter) {
1682 if (!counter->system_wide) 1684 if (!counter->system_wide)
1683 return; 1685 return;
1684 } 1686 }
1685 1687
1686 if (evsel_list->nr_entries) 1688 if (evsel_list->core.nr_entries)
1687 target.system_wide = true; 1689 target.system_wide = true;
1688 } 1690 }
1689} 1691}
@@ -1702,7 +1704,7 @@ int cmd_stat(int argc, const char **argv)
1702 1704
1703 setlocale(LC_ALL, ""); 1705 setlocale(LC_ALL, "");
1704 1706
1705 evsel_list = perf_evlist__new(); 1707 evsel_list = evlist__new();
1706 if (evsel_list == NULL) 1708 if (evsel_list == NULL)
1707 return -ENOMEM; 1709 return -ENOMEM;
1708 1710
@@ -1889,10 +1891,10 @@ int cmd_stat(int argc, const char **argv)
1889 * so we could print it out on output. 1891 * so we could print it out on output.
1890 */ 1892 */
1891 if (stat_config.aggr_mode == AGGR_THREAD) { 1893 if (stat_config.aggr_mode == AGGR_THREAD) {
1892 thread_map__read_comms(evsel_list->threads); 1894 thread_map__read_comms(evsel_list->core.threads);
1893 if (target.system_wide) { 1895 if (target.system_wide) {
1894 if (runtime_stat_new(&stat_config, 1896 if (runtime_stat_new(&stat_config,
1895 thread_map__nr(evsel_list->threads))) { 1897 perf_thread_map__nr(evsel_list->core.threads))) {
1896 goto out; 1898 goto out;
1897 } 1899 }
1898 } 1900 }
@@ -2003,7 +2005,7 @@ int cmd_stat(int argc, const char **argv)
2003 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2005 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2004 } 2006 }
2005 2007
2006 perf_evlist__close(evsel_list); 2008 evlist__close(evsel_list);
2007 perf_session__delete(perf_stat.session); 2009 perf_session__delete(perf_stat.session);
2008 } 2010 }
2009 2011
@@ -2015,7 +2017,7 @@ out:
2015 if (smi_cost && smi_reset) 2017 if (smi_cost && smi_reset)
2016 sysfs__write_int(FREEZE_ON_SMI_PATH, 0); 2018 sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
2017 2019
2018 perf_evlist__delete(evsel_list); 2020 evlist__delete(evsel_list);
2019 2021
2020 runtime_stat_delete(&stat_config); 2022 runtime_stat_delete(&stat_config);
2021 2023