diff options
author | Jiri Olsa <jolsa@kernel.org> | 2015-06-26 05:29:20 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2015-06-26 10:49:03 -0400 |
commit | 3b3eb044519ac4c422dbc6084303c470b8d2dc61 (patch) | |
tree | e8375d48f9fafbe42354e88c6581c065d3457993 | |
parent | 106a94a0f8c207ef4113ce7e32f34a00b3b174e7 (diff) |
perf stat: Separate counters reading and processing
Separating counters reading and processing so we could use the
processing part in following patches.
Using simple reading via perf_evsel__read function to read counters now,
because part of the processing was in the read_cb callback.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1435310967-14570-16-git-send-email-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r-- | tools/perf/builtin-stat.c | 56 |
1 files changed, 39 insertions, 17 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 158859e622d3..74ac92baa2bd 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -217,8 +217,9 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) | |||
217 | return 0; | 217 | return 0; |
218 | } | 218 | } |
219 | 219 | ||
220 | static int read_cb(struct perf_evsel *evsel, int cpu, int thread, | 220 | static int |
221 | struct perf_counts_values *count) | 221 | process_counter_values(struct perf_evsel *evsel, int cpu, int thread, |
222 | struct perf_counts_values *count) | ||
222 | { | 223 | { |
223 | struct perf_counts_values *aggr = &evsel->counts->aggr; | 224 | struct perf_counts_values *aggr = &evsel->counts->aggr; |
224 | static struct perf_counts_values zero; | 225 | static struct perf_counts_values zero; |
@@ -239,7 +240,6 @@ static int read_cb(struct perf_evsel *evsel, int cpu, int thread, | |||
239 | if (!evsel->snapshot) | 240 | if (!evsel->snapshot) |
240 | perf_evsel__compute_deltas(evsel, cpu, thread, count); | 241 | perf_evsel__compute_deltas(evsel, cpu, thread, count); |
241 | perf_counts_values__scale(count, scale, NULL); | 242 | perf_counts_values__scale(count, scale, NULL); |
242 | *perf_counts(evsel->counts, cpu, thread) = *count; | ||
243 | if (aggr_mode == AGGR_NONE) | 243 | if (aggr_mode == AGGR_NONE) |
244 | perf_stat__update_shadow_stats(evsel, count->values, cpu); | 244 | perf_stat__update_shadow_stats(evsel, count->values, cpu); |
245 | break; | 245 | break; |
@@ -256,23 +256,41 @@ static int read_cb(struct perf_evsel *evsel, int cpu, int thread, | |||
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | static int read_counter(struct perf_evsel *counter); | 259 | static int process_counter_maps(struct perf_evsel *counter) |
260 | { | ||
261 | int nthreads = thread_map__nr(counter->threads); | ||
262 | int ncpus = perf_evsel__nr_cpus(counter); | ||
263 | int cpu, thread; | ||
260 | 264 | ||
261 | /* | 265 | if (counter->system_wide) |
262 | * Read out the results of a single counter: | 266 | nthreads = 1; |
263 | * aggregate counts across CPUs in system-wide mode | 267 | |
264 | */ | 268 | for (thread = 0; thread < nthreads; thread++) { |
265 | static int read_counter_aggr(struct perf_evsel *counter) | 269 | for (cpu = 0; cpu < ncpus; cpu++) { |
270 | if (process_counter_values(counter, cpu, thread, | ||
271 | perf_counts(counter->counts, cpu, thread))) | ||
272 | return -1; | ||
273 | } | ||
274 | } | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | static int process_counter(struct perf_evsel *counter) | ||
266 | { | 280 | { |
267 | struct perf_counts_values *aggr = &counter->counts->aggr; | 281 | struct perf_counts_values *aggr = &counter->counts->aggr; |
268 | struct perf_stat *ps = counter->priv; | 282 | struct perf_stat *ps = counter->priv; |
269 | u64 *count = counter->counts->aggr.values; | 283 | u64 *count = counter->counts->aggr.values; |
270 | int i; | 284 | int i, ret; |
271 | 285 | ||
272 | aggr->val = aggr->ena = aggr->run = 0; | 286 | aggr->val = aggr->ena = aggr->run = 0; |
273 | 287 | ||
274 | if (read_counter(counter)) | 288 | ret = process_counter_maps(counter); |
275 | return -1; | 289 | if (ret) |
290 | return ret; | ||
291 | |||
292 | if (aggr_mode != AGGR_GLOBAL) | ||
293 | return 0; | ||
276 | 294 | ||
277 | if (!counter->snapshot) | 295 | if (!counter->snapshot) |
278 | perf_evsel__compute_deltas(counter, -1, -1, aggr); | 296 | perf_evsel__compute_deltas(counter, -1, -1, aggr); |
@@ -315,7 +333,10 @@ static int read_counter(struct perf_evsel *counter) | |||
315 | 333 | ||
316 | for (thread = 0; thread < nthreads; thread++) { | 334 | for (thread = 0; thread < nthreads; thread++) { |
317 | for (cpu = 0; cpu < ncpus; cpu++) { | 335 | for (cpu = 0; cpu < ncpus; cpu++) { |
318 | if (perf_evsel__read_cb(counter, cpu, thread, read_cb)) | 336 | struct perf_counts_values *count; |
337 | |||
338 | count = perf_counts(counter->counts, cpu, thread); | ||
339 | if (perf_evsel__read(counter, cpu, thread, count)) | ||
319 | return -1; | 340 | return -1; |
320 | } | 341 | } |
321 | } | 342 | } |
@@ -332,10 +353,11 @@ static void read_counters(bool close) | |||
332 | ps = counter->priv; | 353 | ps = counter->priv; |
333 | memset(ps->res_stats, 0, sizeof(ps->res_stats)); | 354 | memset(ps->res_stats, 0, sizeof(ps->res_stats)); |
334 | 355 | ||
335 | if (aggr_mode == AGGR_GLOBAL) | 356 | if (read_counter(counter)) |
336 | read_counter_aggr(counter); | 357 | pr_warning("failed to read counter %s\n", counter->name); |
337 | else | 358 | |
338 | read_counter(counter); | 359 | if (process_counter(counter)) |
360 | pr_warning("failed to process counter %s\n", counter->name); | ||
339 | 361 | ||
340 | if (close) { | 362 | if (close) { |
341 | perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), | 363 | perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), |