aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorsukadev@linux.vnet.ibm.com <sukadev@linux.vnet.ibm.com>2012-09-18 16:56:11 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-09-26 22:51:05 -0400
commite6878835ac4794f25385522d29c634b7bbb7cca9 (patch)
tree31abcefb32c9388588b176bbca8709d97c5d3559 /arch/powerpc
parente8294de53bb788b3a6f7c09d143c7cdc60b65753 (diff)
powerpc/perf: Sample only if SIAR-Valid bit is set in P7+
powerpc/perf: Sample only if SIAR-Valid bit is set in P7+ On POWER7+ two new bits (mmcra[35] and mmcra[36]) indicate whether the contents of SIAR and SDAR are valid. For marked instructions on P7+, we must save the contents of SIAR and SDAR registers only if these new bits are set. This code/check for the SIAR-Valid bit is specific to P7+, so rather than waste a CPU-feature bit use the PVR flag. Note that Carl Love proposed a similar change for oprofile: https://lkml.org/lkml/2012/6/22/309 Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/perf/core-book3s.c44
-rw-r--r--arch/powerpc/perf/power7-pmu.c3
4 files changed, 46 insertions, 6 deletions
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 078019b5b353..9710be3a2d17 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -49,6 +49,7 @@ struct power_pmu {
49#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ 49#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
50#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */ 50#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */
51#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */ 51#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */
52#define PPMU_SIAR_VALID 16 /* Processor has SIAR Valid bit */
52 53
53/* 54/*
54 * Values for flags to get_alternatives() 55 * Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a1096fb62816..d24c14163966 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -606,6 +606,10 @@
606#define POWER6_MMCRA_SIPR 0x0000020000000000ULL 606#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
607#define POWER6_MMCRA_THRM 0x00000020UL 607#define POWER6_MMCRA_THRM 0x00000020UL
608#define POWER6_MMCRA_OTHER 0x0000000EUL 608#define POWER6_MMCRA_OTHER 0x0000000EUL
609
610#define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */
611#define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */
612
609#define SPRN_PMC1 787 613#define SPRN_PMC1 787
610#define SPRN_PMC2 788 614#define SPRN_PMC2 788
611#define SPRN_PMC3 789 615#define SPRN_PMC3 789
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fb55da91aa45..0db88f501f91 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -82,6 +82,11 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
82 return 0; 82 return 0;
83} 83}
84 84
85static inline int siar_valid(struct pt_regs *regs)
86{
87 return 1;
88}
89
85#endif /* CONFIG_PPC32 */ 90#endif /* CONFIG_PPC32 */
86 91
87/* 92/*
@@ -106,14 +111,20 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
106 * If we're not doing instruction sampling, give them the SDAR 111 * If we're not doing instruction sampling, give them the SDAR
107 * (sampled data address). If we are doing instruction sampling, then 112 * (sampled data address). If we are doing instruction sampling, then
108 * only give them the SDAR if it corresponds to the instruction 113 * only give them the SDAR if it corresponds to the instruction
109 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC 114 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
110 * bit in MMCRA. 115 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
111 */ 116 */
112static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) 117static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
113{ 118{
114 unsigned long mmcra = regs->dsisr; 119 unsigned long mmcra = regs->dsisr;
115 unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? 120 unsigned long sdsync;
116 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; 121
122 if (ppmu->flags & PPMU_SIAR_VALID)
123 sdsync = POWER7P_MMCRA_SDAR_VALID;
124 else if (ppmu->flags & PPMU_ALT_SIPR)
125 sdsync = POWER6_MMCRA_SDSYNC;
126 else
127 sdsync = MMCRA_SDSYNC;
117 128
118 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) 129 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
119 *addrp = mfspr(SPRN_SDAR); 130 *addrp = mfspr(SPRN_SDAR);
@@ -230,6 +241,24 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
230 return !regs->softe; 241 return !regs->softe;
231} 242}
232 243
244/*
245 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
246 * must be sampled only if the SIAR-valid bit is set.
247 *
248 * For unmarked instructions and for processors that don't have the SIAR-Valid
249 * bit, assume that SIAR is valid.
250 */
251static inline int siar_valid(struct pt_regs *regs)
252{
253 unsigned long mmcra = regs->dsisr;
254 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
255
256 if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
257 return mmcra & POWER7P_MMCRA_SIAR_VALID;
258
259 return 1;
260}
261
233#endif /* CONFIG_PPC64 */ 262#endif /* CONFIG_PPC64 */
234 263
235static void perf_event_interrupt(struct pt_regs *regs); 264static void perf_event_interrupt(struct pt_regs *regs);
@@ -1291,6 +1320,7 @@ struct pmu power_pmu = {
1291 .event_idx = power_pmu_event_idx, 1320 .event_idx = power_pmu_event_idx,
1292}; 1321};
1293 1322
1323
1294/* 1324/*
1295 * A counter has overflowed; update its count and record 1325 * A counter has overflowed; update its count and record
1296 * things if requested. Note that interrupts are hard-disabled 1326 * things if requested. Note that interrupts are hard-disabled
@@ -1324,7 +1354,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1324 left += period; 1354 left += period;
1325 if (left <= 0) 1355 if (left <= 0)
1326 left = period; 1356 left = period;
1327 record = 1; 1357 record = siar_valid(regs);
1328 event->hw.last_period = event->hw.sample_period; 1358 event->hw.last_period = event->hw.sample_period;
1329 } 1359 }
1330 if (left < 0x80000000LL) 1360 if (left < 0x80000000LL)
@@ -1374,8 +1404,10 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
1374{ 1404{
1375 unsigned long use_siar = regs->result; 1405 unsigned long use_siar = regs->result;
1376 1406
1377 if (use_siar) 1407 if (use_siar && siar_valid(regs))
1378 return mfspr(SPRN_SIAR) + perf_ip_adjust(regs); 1408 return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1409 else if (use_siar)
1410 return 0; // no valid instruction pointer
1379 else 1411 else
1380 return regs->nip; 1412 return regs->nip;
1381} 1413}
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 1251e4d7e262..441af08edf43 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -373,6 +373,9 @@ static int __init init_power7_pmu(void)
373 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) 373 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
374 return -ENODEV; 374 return -ENODEV;
375 375
376 if (pvr_version_is(PVR_POWER7p))
377 power7_pmu.flags |= PPMU_SIAR_VALID;
378
376 return register_power_pmu(&power7_pmu); 379 return register_power_pmu(&power7_pmu);
377} 380}
378 381