aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2012-04-02 14:19:18 -0400
committerIngo Molnar <mingo@kernel.org>2012-05-09 09:23:17 -0400
commit8b1e13638d465863572c8207a5cfceeef0cf0441 (patch)
tree3e0a5558edd80741a6994df1d8a432d1a3be0628 /arch
parentfc5fb2b5e1874e5894e2ac503bfb744220db89a1 (diff)
perf/x86-ibs: Fix usage of IBS op current count
The value of IbsOpCurCnt rolls over when it reaches IbsOpMaxCnt. Thus, it is reset to zero by hardware. To get the correct count we need to add the max count to it in case we received an ibs sample (valid bit set). Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1333390758-10893-13-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 5a9f95b5cc26..da9bcdcd9856 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -286,7 +286,15 @@ static u64 get_ibs_fetch_count(u64 config)
286 286
287static u64 get_ibs_op_count(u64 config) 287static u64 get_ibs_op_count(u64 config)
288{ 288{
289 return (config & IBS_OP_CUR_CNT) >> 32; 289 u64 count = 0;
290
291 if (config & IBS_OP_VAL)
292 count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
293
294 if (ibs_caps & IBS_CAPS_RDWROPCNT)
295 count += (config & IBS_OP_CUR_CNT) >> 32;
296
297 return count;
290} 298}
291 299
292static void 300static void
@@ -295,7 +303,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
295{ 303{
296 u64 count = perf_ibs->get_count(*config); 304 u64 count = perf_ibs->get_count(*config);
297 305
298 while (!perf_event_try_update(event, count, 20)) { 306 /*
307 * Set width to 64 since we do not overflow on max width but
308 * instead on max count. In perf_ibs_set_period() we clear
309 * prev count manually on overflow.
310 */
311 while (!perf_event_try_update(event, count, 64)) {
299 rdmsrl(event->hw.config_base, *config); 312 rdmsrl(event->hw.config_base, *config);
300 count = perf_ibs->get_count(*config); 313 count = perf_ibs->get_count(*config);
301 } 314 }
@@ -374,6 +387,12 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
374 if (hwc->state & PERF_HES_UPTODATE) 387 if (hwc->state & PERF_HES_UPTODATE)
375 return; 388 return;
376 389
390 /*
391 * Clear valid bit to not count rollovers on update, rollovers
392 * are only updated in the irq handler.
393 */
394 config &= ~perf_ibs->valid_mask;
395
377 perf_ibs_event_update(perf_ibs, event, &config); 396 perf_ibs_event_update(perf_ibs, event, &config);
378 hwc->state |= PERF_HES_UPTODATE; 397 hwc->state |= PERF_HES_UPTODATE;
379} 398}
@@ -488,17 +507,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
488 if (!(*buf++ & perf_ibs->valid_mask)) 507 if (!(*buf++ & perf_ibs->valid_mask))
489 return 0; 508 return 0;
490 509
491 /*
492 * Emulate IbsOpCurCnt in MSRC001_1033 (IbsOpCtl), not
493 * supported in all cpus. As this triggered an interrupt, we
494 * set the current count to the max count.
495 */
496 config = &ibs_data.regs[0]; 510 config = &ibs_data.regs[0];
497 if (perf_ibs == &perf_ibs_op && !(ibs_caps & IBS_CAPS_RDWROPCNT)) {
498 *config &= ~IBS_OP_CUR_CNT;
499 *config |= (*config & IBS_OP_MAX_CNT) << 36;
500 }
501
502 perf_ibs_event_update(perf_ibs, event, config); 511 perf_ibs_event_update(perf_ibs, event, config);
503 perf_sample_data_init(&data, 0, hwc->last_period); 512 perf_sample_data_init(&data, 0, hwc->last_period);
504 if (!perf_ibs_set_period(perf_ibs, hwc, &period)) 513 if (!perf_ibs_set_period(perf_ibs, hwc, &period))