diff options
author | Kan Liang <kan.liang@intel.com> | 2018-05-03 14:25:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-05-31 06:36:28 -0400 |
commit | 0e0162dfcd1fbe4c711ee86f24f966c318999603 (patch) | |
tree | 9bf956967d27593bd8b302fb2446dbe3156ec0b9 | |
parent | 927b2deb067b8b4753fc09c7a42092f43fc0c1f6 (diff) |
perf/x86/intel/uncore: Add infrastructure for free running counters
There are a number of free running counters introduced for uncore, which
provide highly valuable information to a wide array of customers.
However, the generic uncore code doesn't support them yet.
The free running counters will be specially handled based on their
unique attributes:
- They are read-only. They cannot be enabled/disabled.
- The event and the counter are always 1:1 mapped. It doesn't need to
be assigned nor tracked by event_list.
- They are always active. It doesn't need to check the availability.
- They have different bit width.
Also, using inline helpers to replace the check for fixed counter and
free running counter.
Signed-off-by: Kan Liang <kan.liang@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: acme@kernel.org
Cc: eranian@google.com
Link: http://lkml.kernel.org/r/1525371913-10597-5-git-send-email-kan.liang@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/events/intel/uncore.c | 68 |
1 files changed, 64 insertions, 4 deletions
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 3b0f93eb3cc0..0a6f6973690b 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c | |||
@@ -203,7 +203,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, | |||
203 | hwc->idx = idx; | 203 | hwc->idx = idx; |
204 | hwc->last_tag = ++box->tags[idx]; | 204 | hwc->last_tag = ++box->tags[idx]; |
205 | 205 | ||
206 | if (hwc->idx == UNCORE_PMC_IDX_FIXED) { | 206 | if (uncore_pmc_fixed(hwc->idx)) { |
207 | hwc->event_base = uncore_fixed_ctr(box); | 207 | hwc->event_base = uncore_fixed_ctr(box); |
208 | hwc->config_base = uncore_fixed_ctl(box); | 208 | hwc->config_base = uncore_fixed_ctl(box); |
209 | return; | 209 | return; |
@@ -218,7 +218,9 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e | |||
218 | u64 prev_count, new_count, delta; | 218 | u64 prev_count, new_count, delta; |
219 | int shift; | 219 | int shift; |
220 | 220 | ||
221 | if (event->hw.idx == UNCORE_PMC_IDX_FIXED) | 221 | if (uncore_pmc_freerunning(event->hw.idx)) |
222 | shift = 64 - uncore_freerunning_bits(box, event); | ||
223 | else if (uncore_pmc_fixed(event->hw.idx)) | ||
222 | shift = 64 - uncore_fixed_ctr_bits(box); | 224 | shift = 64 - uncore_fixed_ctr_bits(box); |
223 | else | 225 | else |
224 | shift = 64 - uncore_perf_ctr_bits(box); | 226 | shift = 64 - uncore_perf_ctr_bits(box); |
@@ -454,10 +456,25 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags) | |||
454 | struct intel_uncore_box *box = uncore_event_to_box(event); | 456 | struct intel_uncore_box *box = uncore_event_to_box(event); |
455 | int idx = event->hw.idx; | 457 | int idx = event->hw.idx; |
456 | 458 | ||
457 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | 459 | if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) |
458 | return; | 460 | return; |
459 | 461 | ||
460 | if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) | 462 | /* |
463 | * Free running counter is read-only and always active. | ||
464 | * Use the current counter value as start point. | ||
465 | * There is no overflow interrupt for free running counter. | ||
466 | * Use hrtimer to periodically poll the counter to avoid overflow. | ||
467 | */ | ||
468 | if (uncore_pmc_freerunning(event->hw.idx)) { | ||
469 | list_add_tail(&event->active_entry, &box->active_list); | ||
470 | local64_set(&event->hw.prev_count, | ||
471 | uncore_read_counter(box, event)); | ||
472 | if (box->n_active++ == 0) | ||
473 | uncore_pmu_start_hrtimer(box); | ||
474 | return; | ||
475 | } | ||
476 | |||
477 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
461 | return; | 478 | return; |
462 | 479 | ||
463 | event->hw.state = 0; | 480 | event->hw.state = 0; |
@@ -479,6 +496,15 @@ static void uncore_pmu_event_stop(struct perf_event *event, int flags) | |||
479 | struct intel_uncore_box *box = uncore_event_to_box(event); | 496 | struct intel_uncore_box *box = uncore_event_to_box(event); |
480 | struct hw_perf_event *hwc = &event->hw; | 497 | struct hw_perf_event *hwc = &event->hw; |
481 | 498 | ||
499 | /* Cannot disable free running counter which is read-only */ | ||
500 | if (uncore_pmc_freerunning(hwc->idx)) { | ||
501 | list_del(&event->active_entry); | ||
502 | if (--box->n_active == 0) | ||
503 | uncore_pmu_cancel_hrtimer(box); | ||
504 | uncore_perf_event_update(box, event); | ||
505 | return; | ||
506 | } | ||
507 | |||
482 | if (__test_and_clear_bit(hwc->idx, box->active_mask)) { | 508 | if (__test_and_clear_bit(hwc->idx, box->active_mask)) { |
483 | uncore_disable_event(box, event); | 509 | uncore_disable_event(box, event); |
484 | box->n_active--; | 510 | box->n_active--; |
@@ -512,6 +538,17 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags) | |||
512 | if (!box) | 538 | if (!box) |
513 | return -ENODEV; | 539 | return -ENODEV; |
514 | 540 | ||
541 | /* | ||
542 | * The free funning counter is assigned in event_init(). | ||
543 | * The free running counter event and free running counter | ||
544 | * are 1:1 mapped. It doesn't need to be tracked in event_list. | ||
545 | */ | ||
546 | if (uncore_pmc_freerunning(hwc->idx)) { | ||
547 | if (flags & PERF_EF_START) | ||
548 | uncore_pmu_event_start(event, 0); | ||
549 | return 0; | ||
550 | } | ||
551 | |||
515 | ret = n = uncore_collect_events(box, event, false); | 552 | ret = n = uncore_collect_events(box, event, false); |
516 | if (ret < 0) | 553 | if (ret < 0) |
517 | return ret; | 554 | return ret; |
@@ -570,6 +607,14 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags) | |||
570 | 607 | ||
571 | uncore_pmu_event_stop(event, PERF_EF_UPDATE); | 608 | uncore_pmu_event_stop(event, PERF_EF_UPDATE); |
572 | 609 | ||
610 | /* | ||
611 | * The event for free running counter is not tracked by event_list. | ||
612 | * It doesn't need to force event->hw.idx = -1 to reassign the counter. | ||
613 | * Because the event and the free running counter are 1:1 mapped. | ||
614 | */ | ||
615 | if (uncore_pmc_freerunning(event->hw.idx)) | ||
616 | return; | ||
617 | |||
573 | for (i = 0; i < box->n_events; i++) { | 618 | for (i = 0; i < box->n_events; i++) { |
574 | if (event == box->event_list[i]) { | 619 | if (event == box->event_list[i]) { |
575 | uncore_put_event_constraint(box, event); | 620 | uncore_put_event_constraint(box, event); |
@@ -603,6 +648,10 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu, | |||
603 | struct intel_uncore_box *fake_box; | 648 | struct intel_uncore_box *fake_box; |
604 | int ret = -EINVAL, n; | 649 | int ret = -EINVAL, n; |
605 | 650 | ||
651 | /* The free running counter is always active. */ | ||
652 | if (uncore_pmc_freerunning(event->hw.idx)) | ||
653 | return 0; | ||
654 | |||
606 | fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); | 655 | fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); |
607 | if (!fake_box) | 656 | if (!fake_box) |
608 | return -ENOMEM; | 657 | return -ENOMEM; |
@@ -690,6 +739,17 @@ static int uncore_pmu_event_init(struct perf_event *event) | |||
690 | 739 | ||
691 | /* fixed counters have event field hardcoded to zero */ | 740 | /* fixed counters have event field hardcoded to zero */ |
692 | hwc->config = 0ULL; | 741 | hwc->config = 0ULL; |
742 | } else if (is_freerunning_event(event)) { | ||
743 | if (!check_valid_freerunning_event(box, event)) | ||
744 | return -EINVAL; | ||
745 | event->hw.idx = UNCORE_PMC_IDX_FREERUNNING; | ||
746 | /* | ||
747 | * The free running counter event and free running counter | ||
748 | * are always 1:1 mapped. | ||
749 | * The free running counter is always active. | ||
750 | * Assign the free running counter here. | ||
751 | */ | ||
752 | event->hw.event_base = uncore_freerunning_counter(box, event); | ||
693 | } else { | 753 | } else { |
694 | hwc->config = event->attr.config & | 754 | hwc->config = event->attr.config & |
695 | (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); | 755 | (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); |