aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-17 07:53:10 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-18 05:11:46 -0400
commit98fb1807b97e3e631b940f67544e265c64b984dc (patch)
treef420b490e785b7d599a301cfcaa15475f7b6df41 /arch
parent079b3c569c87819e7a19d9b9f51d4746fc47bf9a (diff)
perf_counter: powerpc: Make powerpc perf_counter code safe for 32-bit kernels
This abstracts a few things in arch/powerpc/kernel/perf_counter.c that are specific to 64-bit kernels, and provides definitions for 32-bit kernels. In particular, * Only 64-bit has MMCRA and the bits in it that give information about a PMU interrupt (sampled PR, HV, slot number etc.) * Only 64-bit has the lppaca and the lppaca->pmcregs_in_use field * Use of SDAR is confined to 64-bit for now * Only 64-bit has soft/lazy interrupt disable and therefore pseudo-NMIs (interrupts that occur while interrupts are soft-disabled) * Only 64-bit has PMC7 and PMC8 * Only 64-bit has the MSR_HV bit. This also fixes the types used in a couple of places, where we were using long types for things that need to be 64-bit. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: linuxppc-dev@ozlabs.org Cc: benh@kernel.crashing.org LKML-Reference: <19000.55590.634126.876084@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_counter.c193
1 files changed, 133 insertions, 60 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 25e656c14945..809fdf94b95f 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -46,6 +46,115 @@ struct power_pmu *ppmu;
46 */ 46 */
47static unsigned int freeze_counters_kernel = MMCR0_FCS; 47static unsigned int freeze_counters_kernel = MMCR0_FCS;
48 48
49/*
50 * 32-bit doesn't have MMCRA but does have an MMCR2,
51 * and a few other names are different.
52 */
53#ifdef CONFIG_PPC32
54
55#define MMCR0_FCHV 0
56#define MMCR0_PMCjCE MMCR0_PMCnCE
57
58#define SPRN_MMCRA SPRN_MMCR2
59#define MMCRA_SAMPLE_ENABLE 0
60
61static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
62{
63 return 0;
64}
65static inline void perf_set_pmu_inuse(int inuse) { }
66static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
67static inline u32 perf_get_misc_flags(struct pt_regs *regs)
68{
69 return 0;
70}
71static inline void perf_read_regs(struct pt_regs *regs) { }
72static inline int perf_intr_is_nmi(struct pt_regs *regs)
73{
74 return 0;
75}
76
77#endif /* CONFIG_PPC32 */
78
79/*
80 * Things that are specific to 64-bit implementations.
81 */
82#ifdef CONFIG_PPC64
83
84static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
85{
86 unsigned long mmcra = regs->dsisr;
87
88 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
89 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
90 if (slot > 1)
91 return 4 * (slot - 1);
92 }
93 return 0;
94}
95
96static inline void perf_set_pmu_inuse(int inuse)
97{
98 get_lppaca()->pmcregs_in_use = inuse;
99}
100
101/*
102 * The user wants a data address recorded.
103 * If we're not doing instruction sampling, give them the SDAR
104 * (sampled data address). If we are doing instruction sampling, then
105 * only give them the SDAR if it corresponds to the instruction
106 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
107 * bit in MMCRA.
108 */
109static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
110{
111 unsigned long mmcra = regs->dsisr;
112 unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
113 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
114
115 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
116 *addrp = mfspr(SPRN_SDAR);
117}
118
119static inline u32 perf_get_misc_flags(struct pt_regs *regs)
120{
121 unsigned long mmcra = regs->dsisr;
122
123 if (TRAP(regs) != 0xf00)
124 return 0; /* not a PMU interrupt */
125
126 if (ppmu->flags & PPMU_ALT_SIPR) {
127 if (mmcra & POWER6_MMCRA_SIHV)
128 return PERF_EVENT_MISC_HYPERVISOR;
129 return (mmcra & POWER6_MMCRA_SIPR) ?
130 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
131 }
132 if (mmcra & MMCRA_SIHV)
133 return PERF_EVENT_MISC_HYPERVISOR;
134 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
135 PERF_EVENT_MISC_KERNEL;
136}
137
138/*
139 * Overload regs->dsisr to store MMCRA so we only need to read it once
140 * on each interrupt.
141 */
142static inline void perf_read_regs(struct pt_regs *regs)
143{
144 regs->dsisr = mfspr(SPRN_MMCRA);
145}
146
147/*
148 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
149 * it as an NMI.
150 */
151static inline int perf_intr_is_nmi(struct pt_regs *regs)
152{
153 return !regs->softe;
154}
155
156#endif /* CONFIG_PPC64 */
157
49static void perf_counter_interrupt(struct pt_regs *regs); 158static void perf_counter_interrupt(struct pt_regs *regs);
50 159
51void perf_counter_print_debug(void) 160void perf_counter_print_debug(void)
@@ -78,12 +187,14 @@ static unsigned long read_pmc(int idx)
78 case 6: 187 case 6:
79 val = mfspr(SPRN_PMC6); 188 val = mfspr(SPRN_PMC6);
80 break; 189 break;
190#ifdef CONFIG_PPC64
81 case 7: 191 case 7:
82 val = mfspr(SPRN_PMC7); 192 val = mfspr(SPRN_PMC7);
83 break; 193 break;
84 case 8: 194 case 8:
85 val = mfspr(SPRN_PMC8); 195 val = mfspr(SPRN_PMC8);
86 break; 196 break;
197#endif /* CONFIG_PPC64 */
87 default: 198 default:
88 printk(KERN_ERR "oops trying to read PMC%d\n", idx); 199 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
89 val = 0; 200 val = 0;
@@ -115,12 +226,14 @@ static void write_pmc(int idx, unsigned long val)
115 case 6: 226 case 6:
116 mtspr(SPRN_PMC6, val); 227 mtspr(SPRN_PMC6, val);
117 break; 228 break;
229#ifdef CONFIG_PPC64
118 case 7: 230 case 7:
119 mtspr(SPRN_PMC7, val); 231 mtspr(SPRN_PMC7, val);
120 break; 232 break;
121 case 8: 233 case 8:
122 mtspr(SPRN_PMC8, val); 234 mtspr(SPRN_PMC8, val);
123 break; 235 break;
236#endif /* CONFIG_PPC64 */
124 default: 237 default:
125 printk(KERN_ERR "oops trying to write PMC%d\n", idx); 238 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
126 } 239 }
@@ -283,7 +396,7 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
283 396
284static void power_pmu_read(struct perf_counter *counter) 397static void power_pmu_read(struct perf_counter *counter)
285{ 398{
286 long val, delta, prev; 399 s64 val, delta, prev;
287 400
288 if (!counter->hw.idx) 401 if (!counter->hw.idx)
289 return; 402 return;
@@ -477,7 +590,7 @@ void hw_perf_enable(void)
477 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 590 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
478 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 591 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
479 if (cpuhw->n_counters == 0) 592 if (cpuhw->n_counters == 0)
480 get_lppaca()->pmcregs_in_use = 0; 593 perf_set_pmu_inuse(0);
481 goto out_enable; 594 goto out_enable;
482 } 595 }
483 596
@@ -510,7 +623,7 @@ void hw_perf_enable(void)
510 * bit set and set the hardware counters to their initial values. 623 * bit set and set the hardware counters to their initial values.
511 * Then unfreeze the counters. 624 * Then unfreeze the counters.
512 */ 625 */
513 get_lppaca()->pmcregs_in_use = 1; 626 perf_set_pmu_inuse(1);
514 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 627 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
515 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 628 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
516 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) 629 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -1007,11 +1120,10 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1007 * things if requested. Note that interrupts are hard-disabled 1120 * things if requested. Note that interrupts are hard-disabled
1008 * here so there is no possibility of being interrupted. 1121 * here so there is no possibility of being interrupted.
1009 */ 1122 */
1010static void record_and_restart(struct perf_counter *counter, long val, 1123static void record_and_restart(struct perf_counter *counter, unsigned long val,
1011 struct pt_regs *regs, int nmi) 1124 struct pt_regs *regs, int nmi)
1012{ 1125{
1013 u64 period = counter->hw.sample_period; 1126 u64 period = counter->hw.sample_period;
1014 unsigned long mmcra, sdsync;
1015 s64 prev, delta, left; 1127 s64 prev, delta, left;
1016 int record = 0; 1128 int record = 0;
1017 1129
@@ -1033,8 +1145,8 @@ static void record_and_restart(struct perf_counter *counter, long val,
1033 left = period; 1145 left = period;
1034 record = 1; 1146 record = 1;
1035 } 1147 }
1036 if (left < 0x80000000L) 1148 if (left < 0x80000000LL)
1037 val = 0x80000000L - left; 1149 val = 0x80000000LL - left;
1038 } 1150 }
1039 1151
1040 /* 1152 /*
@@ -1047,22 +1159,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
1047 .period = counter->hw.last_period, 1159 .period = counter->hw.last_period,
1048 }; 1160 };
1049 1161
1050 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { 1162 if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
1051 /* 1163 perf_get_data_addr(regs, &data.addr);
1052 * The user wants a data address recorded. 1164
1053 * If we're not doing instruction sampling,
1054 * give them the SDAR (sampled data address).
1055 * If we are doing instruction sampling, then only
1056 * give them the SDAR if it corresponds to the
1057 * instruction pointed to by SIAR; this is indicated
1058 * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
1059 */
1060 mmcra = regs->dsisr;
1061 sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
1062 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
1063 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
1064 data.addr = mfspr(SPRN_SDAR);
1065 }
1066 if (perf_counter_overflow(counter, nmi, &data)) { 1165 if (perf_counter_overflow(counter, nmi, &data)) {
1067 /* 1166 /*
1068 * Interrupts are coming too fast - throttle them 1167 * Interrupts are coming too fast - throttle them
@@ -1088,25 +1187,12 @@ static void record_and_restart(struct perf_counter *counter, long val,
1088 */ 1187 */
1089unsigned long perf_misc_flags(struct pt_regs *regs) 1188unsigned long perf_misc_flags(struct pt_regs *regs)
1090{ 1189{
1091 unsigned long mmcra; 1190 u32 flags = perf_get_misc_flags(regs);
1092 1191
1093 if (TRAP(regs) != 0xf00) { 1192 if (flags)
1094 /* not a PMU interrupt */ 1193 return flags;
1095 return user_mode(regs) ? PERF_EVENT_MISC_USER : 1194 return user_mode(regs) ? PERF_EVENT_MISC_USER :
1096 PERF_EVENT_MISC_KERNEL; 1195 PERF_EVENT_MISC_KERNEL;
1097 }
1098
1099 mmcra = regs->dsisr;
1100 if (ppmu->flags & PPMU_ALT_SIPR) {
1101 if (mmcra & POWER6_MMCRA_SIHV)
1102 return PERF_EVENT_MISC_HYPERVISOR;
1103 return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
1104 PERF_EVENT_MISC_KERNEL;
1105 }
1106 if (mmcra & MMCRA_SIHV)
1107 return PERF_EVENT_MISC_HYPERVISOR;
1108 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
1109 PERF_EVENT_MISC_KERNEL;
1110} 1196}
1111 1197
1112/* 1198/*
@@ -1115,20 +1201,12 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1115 */ 1201 */
1116unsigned long perf_instruction_pointer(struct pt_regs *regs) 1202unsigned long perf_instruction_pointer(struct pt_regs *regs)
1117{ 1203{
1118 unsigned long mmcra;
1119 unsigned long ip; 1204 unsigned long ip;
1120 unsigned long slot;
1121 1205
1122 if (TRAP(regs) != 0xf00) 1206 if (TRAP(regs) != 0xf00)
1123 return regs->nip; /* not a PMU interrupt */ 1207 return regs->nip; /* not a PMU interrupt */
1124 1208
1125 ip = mfspr(SPRN_SIAR); 1209 ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1126 mmcra = regs->dsisr;
1127 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
1128 slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
1129 if (slot > 1)
1130 ip += 4 * (slot - 1);
1131 }
1132 return ip; 1210 return ip;
1133} 1211}
1134 1212
@@ -1140,7 +1218,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1140 int i; 1218 int i;
1141 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); 1219 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
1142 struct perf_counter *counter; 1220 struct perf_counter *counter;
1143 long val; 1221 unsigned long val;
1144 int found = 0; 1222 int found = 0;
1145 int nmi; 1223 int nmi;
1146 1224
@@ -1148,16 +1226,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1148 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), 1226 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1149 mfspr(SPRN_PMC6)); 1227 mfspr(SPRN_PMC6));
1150 1228
1151 /* 1229 perf_read_regs(regs);
1152 * Overload regs->dsisr to store MMCRA so we only need to read it once.
1153 */
1154 regs->dsisr = mfspr(SPRN_MMCRA);
1155 1230
1156 /* 1231 nmi = perf_intr_is_nmi(regs);
1157 * If interrupts were soft-disabled when this PMU interrupt
1158 * occurred, treat it as an NMI.
1159 */
1160 nmi = !regs->softe;
1161 if (nmi) 1232 if (nmi)
1162 nmi_enter(); 1233 nmi_enter();
1163 else 1234 else
@@ -1223,11 +1294,13 @@ int register_power_pmu(struct power_pmu *pmu)
1223 pr_info("%s performance monitor hardware support registered\n", 1294 pr_info("%s performance monitor hardware support registered\n",
1224 pmu->name); 1295 pmu->name);
1225 1296
1297#ifdef MSR_HV
1226 /* 1298 /*
1227 * Use FCHV to ignore kernel events if MSR.HV is set. 1299 * Use FCHV to ignore kernel events if MSR.HV is set.
1228 */ 1300 */
1229 if (mfmsr() & MSR_HV) 1301 if (mfmsr() & MSR_HV)
1230 freeze_counters_kernel = MMCR0_FCHV; 1302 freeze_counters_kernel = MMCR0_FCHV;
1303#endif /* CONFIG_PPC64 */
1231 1304
1232 return 0; 1305 return 0;
1233} 1306}