aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDavid Ahern <david.ahern@oracle.com>2015-03-19 16:06:17 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-19 21:54:49 -0400
commitd51291cb8f32bfae6b331e1838651f3ddefa73a5 (patch)
tree2e5fc657403910a0db77ba24711019a56157e49e /arch/sparc
parent5b0d4b5514bbcce69b516d0742f2cfc84ebd6db3 (diff)
sparc: perf: Make counting mode actually work
Currently perf-stat (aka, counting mode) does not work: $ perf stat ls ... Performance counter stats for 'ls': 1.585665 task-clock (msec) # 0.580 CPUs utilized 24 context-switches # 0.015 M/sec 0 cpu-migrations # 0.000 K/sec 86 page-faults # 0.054 M/sec <not supported> cycles <not supported> stalled-cycles-frontend <not supported> stalled-cycles-backend <not supported> instructions <not supported> branches <not supported> branch-misses 0.002735100 seconds time elapsed The reason is that state is never reset (stays with PERF_HES_UPTODATE set). Add a call to sparc_pmu_enable_event during the added_event handling. Clean up the encoding since pmu_start calls sparc_pmu_enable_event which does the same. Passing PERF_EF_RELOAD to sparc_pmu_start means the call to sparc_perf_event_set_period can be removed as well. With this patch: $ perf stat ls ... Performance counter stats for 'ls': 1.552890 task-clock (msec) # 0.552 CPUs utilized 24 context-switches # 0.015 M/sec 0 cpu-migrations # 0.000 K/sec 86 page-faults # 0.055 M/sec 5,748,997 cycles # 3.702 GHz <not supported> stalled-cycles-frontend:HG <not supported> stalled-cycles-backend:HG 1,684,362 instructions:HG # 0.29 insns per cycle 295,133 branches:HG # 190.054 M/sec 28,007 branch-misses:HG # 9.49% of all branches 0.002815665 seconds time elapsed Signed-off-by: David Ahern <david.ahern@oracle.com> Acked-by: Bob Picco <bob.picco@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/kernel/perf_event.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6dc4e793df4c..af53c25da2e7 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -960,6 +960,8 @@ out:
960 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; 960 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
961} 961}
962 962
963static void sparc_pmu_start(struct perf_event *event, int flags);
964
963/* On this PMU each PIC has it's own PCR control register. */ 965/* On this PMU each PIC has it's own PCR control register. */
964static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) 966static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
965{ 967{
@@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
972 struct perf_event *cp = cpuc->event[i]; 974 struct perf_event *cp = cpuc->event[i];
973 struct hw_perf_event *hwc = &cp->hw; 975 struct hw_perf_event *hwc = &cp->hw;
974 int idx = hwc->idx; 976 int idx = hwc->idx;
975 u64 enc;
976 977
977 if (cpuc->current_idx[i] != PIC_NO_INDEX) 978 if (cpuc->current_idx[i] != PIC_NO_INDEX)
978 continue; 979 continue;
979 980
980 sparc_perf_event_set_period(cp, hwc, idx);
981 cpuc->current_idx[i] = idx; 981 cpuc->current_idx[i] = idx;
982 982
983 enc = perf_event_get_enc(cpuc->events[i]); 983 sparc_pmu_start(cp, PERF_EF_RELOAD);
984 cpuc->pcr[idx] &= ~mask_for_index(idx);
985 if (hwc->state & PERF_HES_STOPPED)
986 cpuc->pcr[idx] |= nop_for_index(idx);
987 else
988 cpuc->pcr[idx] |= event_encoding(enc, idx);
989 } 984 }
990out: 985out:
991 for (i = 0; i < cpuc->n_events; i++) { 986 for (i = 0; i < cpuc->n_events; i++) {