aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c31
1 files changed, 23 insertions, 8 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 0c16d20d7e32..3c7452b39f57 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -331,7 +331,7 @@ static int read_counter(struct perf_evsel *counter)
331 return 0; 331 return 0;
332} 332}
333 333
334static void read_counters(bool close_counters) 334static void read_counters(void)
335{ 335{
336 struct perf_evsel *counter; 336 struct perf_evsel *counter;
337 337
@@ -341,11 +341,6 @@ static void read_counters(bool close_counters)
341 341
342 if (perf_stat_process_counter(&stat_config, counter)) 342 if (perf_stat_process_counter(&stat_config, counter))
343 pr_warning("failed to process counter %s\n", counter->name); 343 pr_warning("failed to process counter %s\n", counter->name);
344
345 if (close_counters) {
346 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
347 thread_map__nr(evsel_list->threads));
348 }
349 } 344 }
350} 345}
351 346
@@ -353,7 +348,7 @@ static void process_interval(void)
353{ 348{
354 struct timespec ts, rs; 349 struct timespec ts, rs;
355 350
356 read_counters(false); 351 read_counters();
357 352
358 clock_gettime(CLOCK_MONOTONIC, &ts); 353 clock_gettime(CLOCK_MONOTONIC, &ts);
359 diff_timespec(&rs, &ts, &ref_time); 354 diff_timespec(&rs, &ts, &ref_time);
@@ -380,6 +375,17 @@ static void enable_counters(void)
380 perf_evlist__enable(evsel_list); 375 perf_evlist__enable(evsel_list);
381} 376}
382 377
378static void disable_counters(void)
379{
380 /*
381 * If we don't have tracee (attaching to task or cpu), counters may
382 * still be running. To get accurate group ratios, we must stop groups
383 * from counting before reading their constituent counters.
384 */
385 if (!target__none(&target))
386 perf_evlist__disable(evsel_list);
387}
388
383static volatile int workload_exec_errno; 389static volatile int workload_exec_errno;
384 390
385/* 391/*
@@ -657,11 +663,20 @@ try_again:
657 } 663 }
658 } 664 }
659 665
666 disable_counters();
667
660 t1 = rdclock(); 668 t1 = rdclock();
661 669
662 update_stats(&walltime_nsecs_stats, t1 - t0); 670 update_stats(&walltime_nsecs_stats, t1 - t0);
663 671
664 read_counters(true); 672 /*
673 * Closing a group leader splits the group, and as we only disable
674 * group leaders, results in remaining events becoming enabled. To
675 * avoid arbitrary skew, we must read all counters before closing any
676 * group leaders.
677 */
678 read_counters();
679 perf_evlist__close(evsel_list);
665 680
666 return WEXITSTATUS(status); 681 return WEXITSTATUS(status);
667} 682}