aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKan Liang <kan.liang@intel.com>2018-05-03 14:25:13 -0400
committerIngo Molnar <mingo@kernel.org>2018-05-31 06:36:29 -0400
commit9aae1780e7e81e54edfb70ba33ead5b0b48be009 (patch)
tree0c45aea11a68c083a06b204a21038f57630bdf2e
parent5a6c9d94e9ed7410142bc6fcb638a4db1895aa0c (diff)
perf/x86/intel/uncore: Clean up client IMC uncore
The counters in client IMC uncore are free running counters, not fixed counters. It should be corrected. The new infrastructure for free running counter should be applied. Introducing a new type SNB_PCI_UNCORE_IMC_DATA for client IMC free running counters. Keeping the customized event_init() function to be compatible with old event encoding. Clean up other customized event_*() functions. Signed-off-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: acme@kernel.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1525371913-10597-8-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/intel/uncore_snb.c132
1 files changed, 20 insertions, 112 deletions
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index df535215d18b..8527c3e1038b 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -285,6 +285,15 @@ static struct uncore_event_desc snb_uncore_imc_events[] = {
285#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 285#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
286#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE 286#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
287 287
288enum perf_snb_uncore_imc_freerunning_types {
289 SNB_PCI_UNCORE_IMC_DATA = 0,
290 SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
291};
292
293static struct freerunning_counters snb_uncore_imc_freerunning[] = {
294 [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
295};
296
288static struct attribute *snb_uncore_imc_formats_attr[] = { 297static struct attribute *snb_uncore_imc_formats_attr[] = {
289 &format_attr_event.attr, 298 &format_attr_event.attr,
290 NULL, 299 NULL,
@@ -341,9 +350,8 @@ static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf
341} 350}
342 351
343/* 352/*
344 * custom event_init() function because we define our own fixed, free 353 * Keep the custom event_init() function compatible with old event
345 * running counters, so we do not want to conflict with generic uncore 354 * encoding for free running counters.
346 * logic. Also simplifies processing
347 */ 355 */
348static int snb_uncore_imc_event_init(struct perf_event *event) 356static int snb_uncore_imc_event_init(struct perf_event *event)
349{ 357{
@@ -405,11 +413,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
405 switch (cfg) { 413 switch (cfg) {
406 case SNB_UNCORE_PCI_IMC_DATA_READS: 414 case SNB_UNCORE_PCI_IMC_DATA_READS:
407 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; 415 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
408 idx = UNCORE_PMC_IDX_FIXED; 416 idx = UNCORE_PMC_IDX_FREERUNNING;
409 break; 417 break;
410 case SNB_UNCORE_PCI_IMC_DATA_WRITES: 418 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
411 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; 419 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
412 idx = UNCORE_PMC_IDX_FIXED + 1; 420 idx = UNCORE_PMC_IDX_FREERUNNING;
413 break; 421 break;
414 default: 422 default:
415 return -EINVAL; 423 return -EINVAL;
@@ -430,104 +438,6 @@ static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_ev
430 return 0; 438 return 0;
431} 439}
432 440
433static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
434{
435 struct intel_uncore_box *box = uncore_event_to_box(event);
436 u64 count;
437
438 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
439 return;
440
441 event->hw.state = 0;
442 box->n_active++;
443
444 list_add_tail(&event->active_entry, &box->active_list);
445
446 count = snb_uncore_imc_read_counter(box, event);
447 local64_set(&event->hw.prev_count, count);
448
449 if (box->n_active == 1)
450 uncore_pmu_start_hrtimer(box);
451}
452
453static void snb_uncore_imc_event_read(struct perf_event *event)
454{
455 struct intel_uncore_box *box = uncore_event_to_box(event);
456 u64 prev_count, new_count, delta;
457 int shift;
458
459 /*
460 * There are two free running counters in IMC.
461 * The index for the second one is hardcoded to
462 * UNCORE_PMC_IDX_FIXED + 1.
463 */
464 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
465 shift = 64 - uncore_fixed_ctr_bits(box);
466 else
467 shift = 64 - uncore_perf_ctr_bits(box);
468
469 /* the hrtimer might modify the previous event value */
470again:
471 prev_count = local64_read(&event->hw.prev_count);
472 new_count = uncore_read_counter(box, event);
473 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
474 goto again;
475
476 delta = (new_count << shift) - (prev_count << shift);
477 delta >>= shift;
478
479 local64_add(delta, &event->count);
480}
481
482static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
483{
484 struct intel_uncore_box *box = uncore_event_to_box(event);
485 struct hw_perf_event *hwc = &event->hw;
486
487 if (!(hwc->state & PERF_HES_STOPPED)) {
488 box->n_active--;
489
490 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
491 hwc->state |= PERF_HES_STOPPED;
492
493 list_del(&event->active_entry);
494
495 if (box->n_active == 0)
496 uncore_pmu_cancel_hrtimer(box);
497 }
498
499 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
500 /*
501 * Drain the remaining delta count out of a event
502 * that we are disabling:
503 */
504 snb_uncore_imc_event_read(event);
505 hwc->state |= PERF_HES_UPTODATE;
506 }
507}
508
509static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
510{
511 struct intel_uncore_box *box = uncore_event_to_box(event);
512 struct hw_perf_event *hwc = &event->hw;
513
514 if (!box)
515 return -ENODEV;
516
517 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
518 if (!(flags & PERF_EF_START))
519 hwc->state |= PERF_HES_ARCH;
520
521 snb_uncore_imc_event_start(event, 0);
522
523 return 0;
524}
525
526static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
527{
528 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
529}
530
531int snb_pci2phy_map_init(int devid) 441int snb_pci2phy_map_init(int devid)
532{ 442{
533 struct pci_dev *dev = NULL; 443 struct pci_dev *dev = NULL;
@@ -559,11 +469,11 @@ int snb_pci2phy_map_init(int devid)
559static struct pmu snb_uncore_imc_pmu = { 469static struct pmu snb_uncore_imc_pmu = {
560 .task_ctx_nr = perf_invalid_context, 470 .task_ctx_nr = perf_invalid_context,
561 .event_init = snb_uncore_imc_event_init, 471 .event_init = snb_uncore_imc_event_init,
562 .add = snb_uncore_imc_event_add, 472 .add = uncore_pmu_event_add,
563 .del = snb_uncore_imc_event_del, 473 .del = uncore_pmu_event_del,
564 .start = snb_uncore_imc_event_start, 474 .start = uncore_pmu_event_start,
565 .stop = snb_uncore_imc_event_stop, 475 .stop = uncore_pmu_event_stop,
566 .read = snb_uncore_imc_event_read, 476 .read = uncore_pmu_event_read,
567}; 477};
568 478
569static struct intel_uncore_ops snb_uncore_imc_ops = { 479static struct intel_uncore_ops snb_uncore_imc_ops = {
@@ -581,12 +491,10 @@ static struct intel_uncore_type snb_uncore_imc = {
581 .name = "imc", 491 .name = "imc",
582 .num_counters = 2, 492 .num_counters = 2,
583 .num_boxes = 1, 493 .num_boxes = 1,
584 .fixed_ctr_bits = 32, 494 .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
585 .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, 495 .freerunning = snb_uncore_imc_freerunning,
586 .event_descs = snb_uncore_imc_events, 496 .event_descs = snb_uncore_imc_events,
587 .format_group = &snb_uncore_imc_format_group, 497 .format_group = &snb_uncore_imc_format_group,
588 .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
589 .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
590 .ops = &snb_uncore_imc_ops, 498 .ops = &snb_uncore_imc_ops,
591 .pmu = &snb_uncore_imc_pmu, 499 .pmu = &snb_uncore_imc_pmu,
592}; 500};