diff options
| author | Kan Liang <kan.liang@intel.com> | 2018-05-03 14:25:06 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-05-31 06:36:27 -0400 |
| commit | 2da331465f44f9618abe8837d1a68405d550b66e (patch) | |
| tree | 3e6bc6682372f82db6510dc33f071ea7b4b3ca0d | |
| parent | c52b5c5f96b217854a953689f65234f7448c7f47 (diff) | |
perf/x86/intel/uncore: Introduce customized event_read() for client IMC uncore
There are two free-running counters for client IMC uncore. The
customized event_init() function hard codes their index to
'UNCORE_PMC_IDX_FIXED' and 'UNCORE_PMC_IDX_FIXED + 1'.
To support the index 'UNCORE_PMC_IDX_FIXED + 1', the generic
uncore_perf_event_update is obscurely hacked.
The code quality issue will bring problems when a new counter index is
introduced into the generic code, for example, a new index for
free-running counter.
Introducing a customized event_read() function for client IMC uncore.
The customized function is copied from previous generic
uncore_pmu_event_read().
The index 'UNCORE_PMC_IDX_FIXED + 1' will be isolated for client IMC
uncore only.
Signed-off-by: Kan Liang <kan.liang@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: acme@kernel.org
Cc: eranian@google.com
Link: http://lkml.kernel.org/r/1525371913-10597-1-git-send-email-kan.liang@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | arch/x86/events/intel/uncore_snb.c | 33 |
1 files changed, 31 insertions, 2 deletions
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index aee5e8496be4..df535215d18b 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c | |||
| @@ -450,6 +450,35 @@ static void snb_uncore_imc_event_start(struct perf_event *event, int flags) | |||
| 450 | uncore_pmu_start_hrtimer(box); | 450 | uncore_pmu_start_hrtimer(box); |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | static void snb_uncore_imc_event_read(struct perf_event *event) | ||
| 454 | { | ||
| 455 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
| 456 | u64 prev_count, new_count, delta; | ||
| 457 | int shift; | ||
| 458 | |||
| 459 | /* | ||
| 460 | * There are two free running counters in IMC. | ||
| 461 | * The index for the second one is hardcoded to | ||
| 462 | * UNCORE_PMC_IDX_FIXED + 1. | ||
| 463 | */ | ||
| 464 | if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) | ||
| 465 | shift = 64 - uncore_fixed_ctr_bits(box); | ||
| 466 | else | ||
| 467 | shift = 64 - uncore_perf_ctr_bits(box); | ||
| 468 | |||
| 469 | /* the hrtimer might modify the previous event value */ | ||
| 470 | again: | ||
| 471 | prev_count = local64_read(&event->hw.prev_count); | ||
| 472 | new_count = uncore_read_counter(box, event); | ||
| 473 | if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) | ||
| 474 | goto again; | ||
| 475 | |||
| 476 | delta = (new_count << shift) - (prev_count << shift); | ||
| 477 | delta >>= shift; | ||
| 478 | |||
| 479 | local64_add(delta, &event->count); | ||
| 480 | } | ||
| 481 | |||
| 453 | static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) | 482 | static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) |
| 454 | { | 483 | { |
| 455 | struct intel_uncore_box *box = uncore_event_to_box(event); | 484 | struct intel_uncore_box *box = uncore_event_to_box(event); |
| @@ -472,7 +501,7 @@ static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) | |||
| 472 | * Drain the remaining delta count out of a event | 501 | * Drain the remaining delta count out of a event |
| 473 | * that we are disabling: | 502 | * that we are disabling: |
| 474 | */ | 503 | */ |
| 475 | uncore_perf_event_update(box, event); | 504 | snb_uncore_imc_event_read(event); |
| 476 | hwc->state |= PERF_HES_UPTODATE; | 505 | hwc->state |= PERF_HES_UPTODATE; |
| 477 | } | 506 | } |
| 478 | } | 507 | } |
| @@ -534,7 +563,7 @@ static struct pmu snb_uncore_imc_pmu = { | |||
| 534 | .del = snb_uncore_imc_event_del, | 563 | .del = snb_uncore_imc_event_del, |
| 535 | .start = snb_uncore_imc_event_start, | 564 | .start = snb_uncore_imc_event_start, |
| 536 | .stop = snb_uncore_imc_event_stop, | 565 | .stop = snb_uncore_imc_event_stop, |
| 537 | .read = uncore_pmu_event_read, | 566 | .read = snb_uncore_imc_event_read, |
| 538 | }; | 567 | }; |
| 539 | 568 | ||
| 540 | static struct intel_uncore_ops snb_uncore_imc_ops = { | 569 | static struct intel_uncore_ops snb_uncore_imc_ops = { |
