diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-03-26 16:47:34 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-03-27 20:33:24 -0400 |
commit | 1ce447b90f3e71c81ae59e0062bc305ef267668b (patch) | |
tree | 516f26debf251a7aa1538f72710f956b95a2f05c /arch/powerpc/perf | |
parent | cb52d8970eee65bf2c47d9a91bd4f58b17f595f4 (diff) |
powerpc/perf: Fix instruction address sampling on 970 and Power4
970 and Power4 don't support "continuous sampling" which means that
when we aren't in marked instruction sampling mode (marked events),
SIAR isn't updated with the last instruction sampled before the
perf interrupt. On those processors, we must thus use the exception
SRR0 value as the sampled instruction pointer.
Those processors also don't support the SIPR and SIHV bits in MMCRA
which means we need some kind of heuristic to decide if SIAR values
represent kernel or user addresses.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r-- | arch/powerpc/perf/core-book3s.c | 46 | ||||
-rw-r--r-- | arch/powerpc/perf/power4-pmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/perf/ppc970-pmu.c | 1 |
3 files changed, 43 insertions, 5 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index c2e27ede07ec..02aee03e713c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -116,14 +116,45 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) | |||
116 | *addrp = mfspr(SPRN_SDAR); | 116 | *addrp = mfspr(SPRN_SDAR); |
117 | } | 117 | } |
118 | 118 | ||
119 | static inline u32 perf_flags_from_msr(struct pt_regs *regs) | ||
120 | { | ||
121 | if (regs->msr & MSR_PR) | ||
122 | return PERF_RECORD_MISC_USER; | ||
123 | if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV) | ||
124 | return PERF_RECORD_MISC_HYPERVISOR; | ||
125 | return PERF_RECORD_MISC_KERNEL; | ||
126 | } | ||
127 | |||
119 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | 128 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) |
120 | { | 129 | { |
121 | unsigned long mmcra = regs->dsisr; | 130 | unsigned long mmcra = regs->dsisr; |
122 | unsigned long sihv = MMCRA_SIHV; | 131 | unsigned long sihv = MMCRA_SIHV; |
123 | unsigned long sipr = MMCRA_SIPR; | 132 | unsigned long sipr = MMCRA_SIPR; |
124 | 133 | ||
134 | /* Not a PMU interrupt: Make up flags from regs->msr */ | ||
125 | if (TRAP(regs) != 0xf00) | 135 | if (TRAP(regs) != 0xf00) |
126 | return 0; /* not a PMU interrupt */ | 136 | return perf_flags_from_msr(regs); |
137 | |||
138 | /* | ||
139 | * If we don't support continuous sampling and this | ||
140 | * is not a marked event, same deal | ||
141 | */ | ||
142 | if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) && | ||
143 | !(mmcra & MMCRA_SAMPLE_ENABLE)) | ||
144 | return perf_flags_from_msr(regs); | ||
145 | |||
146 | /* | ||
147 | * If we don't have flags in MMCRA, rather than using | ||
148 | * the MSR, we intuit the flags from the address in | ||
149 | * SIAR which should give slightly more reliable | ||
150 | * results | ||
151 | */ | ||
152 | if (ppmu->flags & PPMU_NO_SIPR) { | ||
153 | unsigned long siar = mfspr(SPRN_SIAR); | ||
154 | if (siar >= PAGE_OFFSET) | ||
155 | return PERF_RECORD_MISC_KERNEL; | ||
156 | return PERF_RECORD_MISC_USER; | ||
157 | } | ||
127 | 158 | ||
128 | if (ppmu->flags & PPMU_ALT_SIPR) { | 159 | if (ppmu->flags & PPMU_ALT_SIPR) { |
129 | sihv = POWER6_MMCRA_SIHV; | 160 | sihv = POWER6_MMCRA_SIHV; |
@@ -1299,13 +1330,18 @@ unsigned long perf_misc_flags(struct pt_regs *regs) | |||
1299 | */ | 1330 | */ |
1300 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | 1331 | unsigned long perf_instruction_pointer(struct pt_regs *regs) |
1301 | { | 1332 | { |
1302 | unsigned long ip; | 1333 | unsigned long mmcra = regs->dsisr; |
1303 | 1334 | ||
1335 | /* Not a PMU interrupt */ | ||
1304 | if (TRAP(regs) != 0xf00) | 1336 | if (TRAP(regs) != 0xf00) |
1305 | return regs->nip; /* not a PMU interrupt */ | 1337 | return regs->nip; |
1338 | |||
1339 | /* Processor doesn't support sampling non marked events */ | ||
1340 | if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) && | ||
1341 | !(mmcra & MMCRA_SAMPLE_ENABLE)) | ||
1342 | return regs->nip; | ||
1306 | 1343 | ||
1307 | ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); | 1344 | return mfspr(SPRN_SIAR) + perf_ip_adjust(regs); |
1308 | return ip; | ||
1309 | } | 1345 | } |
1310 | 1346 | ||
1311 | static bool pmc_overflow(unsigned long val) | 1347 | static bool pmc_overflow(unsigned long val) |
diff --git a/arch/powerpc/perf/power4-pmu.c b/arch/powerpc/perf/power4-pmu.c index b4f1dda4d089..9103a1de864d 100644 --- a/arch/powerpc/perf/power4-pmu.c +++ b/arch/powerpc/perf/power4-pmu.c | |||
@@ -607,6 +607,7 @@ static struct power_pmu power4_pmu = { | |||
607 | .n_generic = ARRAY_SIZE(p4_generic_events), | 607 | .n_generic = ARRAY_SIZE(p4_generic_events), |
608 | .generic_events = p4_generic_events, | 608 | .generic_events = p4_generic_events, |
609 | .cache_events = &power4_cache_events, | 609 | .cache_events = &power4_cache_events, |
610 | .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING, | ||
610 | }; | 611 | }; |
611 | 612 | ||
612 | static int __init init_power4_pmu(void) | 613 | static int __init init_power4_pmu(void) |
diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c index 111eb25bb0b6..20139ceeacf6 100644 --- a/arch/powerpc/perf/ppc970-pmu.c +++ b/arch/powerpc/perf/ppc970-pmu.c | |||
@@ -487,6 +487,7 @@ static struct power_pmu ppc970_pmu = { | |||
487 | .n_generic = ARRAY_SIZE(ppc970_generic_events), | 487 | .n_generic = ARRAY_SIZE(ppc970_generic_events), |
488 | .generic_events = ppc970_generic_events, | 488 | .generic_events = ppc970_generic_events, |
489 | .cache_events = &ppc970_cache_events, | 489 | .cache_events = &ppc970_cache_events, |
490 | .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING, | ||
490 | }; | 491 | }; |
491 | 492 | ||
492 | static int __init init_ppc970_pmu(void) | 493 | static int __init init_ppc970_pmu(void) |