aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKan Liang <kan.liang@linux.intel.com>2019-10-25 10:43:13 -0400
committerIngo Molnar <mingo@kernel.org>2019-10-28 06:02:01 -0400
commit75be6f703a141b048590d659a3954c4fedd30bba (patch)
tree9f8950ab3c394e1847781bff60054ff9aff63d05 /arch/x86
parente431e79b60603079d269e0c2a5177943b95fa4b6 (diff)
perf/x86/uncore: Fix event group support
The events in the same group don't start or stop simultaneously. Here is the ftrace when enabling event group for uncore_iio_0: # perf stat -e "{uncore_iio_0/event=0x1/,uncore_iio_0/event=0xe/}" <idle>-0 [000] d.h. 8959.064832: read_msr: a41, value b2b0b030 //Read counter reg of IIO unit0 counter0 <idle>-0 [000] d.h. 8959.064835: write_msr: a48, value 400001 //Write Ctrl reg of IIO unit0 counter0 to enable counter0. <------ Although counter0 is enabled, Unit Ctrl is still freezed. Nothing will count. We are still good here. <idle>-0 [000] d.h. 8959.064836: read_msr: a40, value 30100 //Read Unit Ctrl reg of IIO unit0 <idle>-0 [000] d.h. 8959.064838: write_msr: a40, value 30000 //Write Unit Ctrl reg of IIO unit0 to enable all counters in the unit by clear Freeze bit <------Unit0 is un-freezed. Counter0 has been enabled. Now it starts counting. But counter1 has not been enabled yet. The issue starts here. <idle>-0 [000] d.h. 8959.064846: read_msr: a42, value 0 //Read counter reg of IIO unit0 counter1 <idle>-0 [000] d.h. 8959.064847: write_msr: a49, value 40000e //Write Ctrl reg of IIO unit0 counter1 to enable counter1. <------ Now, counter1 just starts to count. Counter0 has been running for a while. Current code un-freezes the Unit Ctrl right after the first counter is enabled. The subsequent group events always loses some counter values. Implement pmu_enable and pmu_disable support for uncore, which can help to batch hardware accesses. No one uses uncore_enable_box and uncore_disable_box. Remove them. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-drivers-review@eclists.intel.com Cc: linux-perf@eclists.intel.com Fixes: 087bfbb03269 ("perf/x86: Add generic Intel uncore PMU support") Link: https://lkml.kernel.org/r/1572014593-31591-1-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/events/intel/uncore.c44
-rw-r--r--arch/x86/events/intel/uncore.h12
2 files changed, 38 insertions, 18 deletions
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 6fc2e06ab4c6..86467f85c383 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -502,10 +502,8 @@ void uncore_pmu_event_start(struct perf_event *event, int flags)
502 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); 502 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
503 uncore_enable_event(box, event); 503 uncore_enable_event(box, event);
504 504
505 if (box->n_active == 1) { 505 if (box->n_active == 1)
506 uncore_enable_box(box);
507 uncore_pmu_start_hrtimer(box); 506 uncore_pmu_start_hrtimer(box);
508 }
509} 507}
510 508
511void uncore_pmu_event_stop(struct perf_event *event, int flags) 509void uncore_pmu_event_stop(struct perf_event *event, int flags)
@@ -529,10 +527,8 @@ void uncore_pmu_event_stop(struct perf_event *event, int flags)
529 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 527 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
530 hwc->state |= PERF_HES_STOPPED; 528 hwc->state |= PERF_HES_STOPPED;
531 529
532 if (box->n_active == 0) { 530 if (box->n_active == 0)
533 uncore_disable_box(box);
534 uncore_pmu_cancel_hrtimer(box); 531 uncore_pmu_cancel_hrtimer(box);
535 }
536 } 532 }
537 533
538 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 534 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
@@ -778,6 +774,40 @@ static int uncore_pmu_event_init(struct perf_event *event)
778 return ret; 774 return ret;
779} 775}
780 776
777static void uncore_pmu_enable(struct pmu *pmu)
778{
779 struct intel_uncore_pmu *uncore_pmu;
780 struct intel_uncore_box *box;
781
782 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
783 if (!uncore_pmu)
784 return;
785
786 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
787 if (!box)
788 return;
789
790 if (uncore_pmu->type->ops->enable_box)
791 uncore_pmu->type->ops->enable_box(box);
792}
793
794static void uncore_pmu_disable(struct pmu *pmu)
795{
796 struct intel_uncore_pmu *uncore_pmu;
797 struct intel_uncore_box *box;
798
799 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
800 if (!uncore_pmu)
801 return;
802
803 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
804 if (!box)
805 return;
806
807 if (uncore_pmu->type->ops->disable_box)
808 uncore_pmu->type->ops->disable_box(box);
809}
810
781static ssize_t uncore_get_attr_cpumask(struct device *dev, 811static ssize_t uncore_get_attr_cpumask(struct device *dev,
782 struct device_attribute *attr, char *buf) 812 struct device_attribute *attr, char *buf)
783{ 813{
@@ -803,6 +833,8 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
803 pmu->pmu = (struct pmu) { 833 pmu->pmu = (struct pmu) {
804 .attr_groups = pmu->type->attr_groups, 834 .attr_groups = pmu->type->attr_groups,
805 .task_ctx_nr = perf_invalid_context, 835 .task_ctx_nr = perf_invalid_context,
836 .pmu_enable = uncore_pmu_enable,
837 .pmu_disable = uncore_pmu_disable,
806 .event_init = uncore_pmu_event_init, 838 .event_init = uncore_pmu_event_init,
807 .add = uncore_pmu_event_add, 839 .add = uncore_pmu_event_add,
808 .del = uncore_pmu_event_del, 840 .del = uncore_pmu_event_del,
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index f36f7bebbc1b..bbfdaa720b45 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -441,18 +441,6 @@ static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
441 return -EINVAL; 441 return -EINVAL;
442} 442}
443 443
444static inline void uncore_disable_box(struct intel_uncore_box *box)
445{
446 if (box->pmu->type->ops->disable_box)
447 box->pmu->type->ops->disable_box(box);
448}
449
450static inline void uncore_enable_box(struct intel_uncore_box *box)
451{
452 if (box->pmu->type->ops->enable_box)
453 box->pmu->type->ops->enable_box(box);
454}
455
456static inline void uncore_disable_event(struct intel_uncore_box *box, 444static inline void uncore_disable_event(struct intel_uncore_box *box,
457 struct perf_event *event) 445 struct perf_event *event)
458{ 446{