aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric B Munson <emunson@mgebm.net>2011-05-23 10:22:40 -0400
committerRobert Richter <robert.richter@amd.com>2011-05-24 05:26:10 -0400
commitd819437156fd99da61d4e1402b2dbfc5cc472265 (patch)
treef7eeacffee0161139d31ec2b968b787b82bdb626
parent3d2606f42984613d324ad3047cf503bcddc3880a (diff)
oprofile, powerpc: Handle events that raise an exception without overflowing
Commit 0837e3242c73566fc1c0196b4ec61779c25ffc93 fixes a situation on POWER7 where events can roll back if a specualtive event doesn't actually complete. This can raise a performance monitor exception. We need to catch this to ensure that we reset the PMC. In all cases the PMC will be less than 256 cycles from overflow. This patch lifts Anton's fix for the problem in perf and applies it to oprofile as well. Signed-off-by: Eric B Munson <emunson@mgebm.net> Cc: <stable@kernel.org> # as far back as it applies cleanly Tested-by: Maynard Johnson <maynardj@us.ibm.com> Signed-off-by: Robert Richter <robert.richter@amd.com>
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 8ee51a252cf1..e6bec74be131 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
261 return is_kernel; 261 return is_kernel;
262} 262}
263 263
264static bool pmc_overflow(unsigned long val)
265{
266 if ((int)val < 0)
267 return true;
268
269 /*
270 * Events on POWER7 can roll back if a speculative event doesn't
271 * eventually complete. Unfortunately in some rare cases they will
272 * raise a performance monitor exception. We need to catch this to
273 * ensure we reset the PMC. In all cases the PMC will be 256 or less
274 * cycles from overflow.
275 *
276 * We only do this if the first pass fails to find any overflowing
277 * PMCs because a user might set a period of less than 256 and we
278 * don't want to mistakenly reset them.
279 */
280 if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
281 return true;
282
283 return false;
284}
285
264static void power4_handle_interrupt(struct pt_regs *regs, 286static void power4_handle_interrupt(struct pt_regs *regs,
265 struct op_counter_config *ctr) 287 struct op_counter_config *ctr)
266{ 288{
@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
281 303
282 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { 304 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
283 val = classic_ctr_read(i); 305 val = classic_ctr_read(i);
284 if (val < 0) { 306 if (pmc_overflow(val)) {
285 if (oprofile_running && ctr[i].enabled) { 307 if (oprofile_running && ctr[i].enabled) {
286 oprofile_add_ext_sample(pc, regs, i, is_kernel); 308 oprofile_add_ext_sample(pc, regs, i, is_kernel);
287 classic_ctr_write(i, reset_value[i]); 309 classic_ctr_write(i, reset_value[i]);