aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kernel/perf_event.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cccf4fc86d67..6bb28aaf5aea 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -26,6 +26,7 @@
26 26
27#include <linux/acpi.h> 27#include <linux/acpi.h>
28#include <linux/clocksource.h> 28#include <linux/clocksource.h>
29#include <linux/kvm_host.h>
29#include <linux/of.h> 30#include <linux/of.h>
30#include <linux/perf/arm_pmu.h> 31#include <linux/perf/arm_pmu.h>
31#include <linux/platform_device.h> 32#include <linux/platform_device.h>
@@ -528,11 +529,21 @@ static inline int armv8pmu_enable_counter(int idx)
528 529
529static inline void armv8pmu_enable_event_counter(struct perf_event *event) 530static inline void armv8pmu_enable_event_counter(struct perf_event *event)
530{ 531{
532 struct perf_event_attr *attr = &event->attr;
531 int idx = event->hw.idx; 533 int idx = event->hw.idx;
534 u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
532 535
533 armv8pmu_enable_counter(idx);
534 if (armv8pmu_event_is_chained(event)) 536 if (armv8pmu_event_is_chained(event))
535 armv8pmu_enable_counter(idx - 1); 537 counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
538
539 kvm_set_pmu_events(counter_bits, attr);
540
541 /* We rely on the hypervisor switch code to enable guest counters */
542 if (!kvm_pmu_counter_deferred(attr)) {
543 armv8pmu_enable_counter(idx);
544 if (armv8pmu_event_is_chained(event))
545 armv8pmu_enable_counter(idx - 1);
546 }
536} 547}
537 548
538static inline int armv8pmu_disable_counter(int idx) 549static inline int armv8pmu_disable_counter(int idx)
@@ -545,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx)
545static inline void armv8pmu_disable_event_counter(struct perf_event *event) 556static inline void armv8pmu_disable_event_counter(struct perf_event *event)
546{ 557{
547 struct hw_perf_event *hwc = &event->hw; 558 struct hw_perf_event *hwc = &event->hw;
559 struct perf_event_attr *attr = &event->attr;
548 int idx = hwc->idx; 560 int idx = hwc->idx;
561 u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
549 562
550 if (armv8pmu_event_is_chained(event)) 563 if (armv8pmu_event_is_chained(event))
551 armv8pmu_disable_counter(idx - 1); 564 counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
552 armv8pmu_disable_counter(idx); 565
566 kvm_clr_pmu_events(counter_bits);
567
568 /* We rely on the hypervisor switch code to disable guest counters */
569 if (!kvm_pmu_counter_deferred(attr)) {
570 if (armv8pmu_event_is_chained(event))
571 armv8pmu_disable_counter(idx - 1);
572 armv8pmu_disable_counter(idx);
573 }
553} 574}
554 575
555static inline int armv8pmu_enable_intens(int idx) 576static inline int armv8pmu_enable_intens(int idx)
@@ -829,11 +850,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
829 if (!attr->exclude_kernel) 850 if (!attr->exclude_kernel)
830 config_base |= ARMV8_PMU_INCLUDE_EL2; 851 config_base |= ARMV8_PMU_INCLUDE_EL2;
831 } else { 852 } else {
832 if (attr->exclude_kernel) 853 if (!attr->exclude_hv && !attr->exclude_host)
833 config_base |= ARMV8_PMU_EXCLUDE_EL1;
834 if (!attr->exclude_hv)
835 config_base |= ARMV8_PMU_INCLUDE_EL2; 854 config_base |= ARMV8_PMU_INCLUDE_EL2;
836 } 855 }
856
857 /*
858 * Filter out !VHE kernels and guest kernels
859 */
860 if (attr->exclude_kernel)
861 config_base |= ARMV8_PMU_EXCLUDE_EL1;
862
837 if (attr->exclude_user) 863 if (attr->exclude_user)
838 config_base |= ARMV8_PMU_EXCLUDE_EL0; 864 config_base |= ARMV8_PMU_EXCLUDE_EL0;
839 865
@@ -863,6 +889,9 @@ static void armv8pmu_reset(void *info)
863 armv8pmu_disable_intens(idx); 889 armv8pmu_disable_intens(idx);
864 } 890 }
865 891
892 /* Clear the counters we flip at guest entry/exit */
893 kvm_clr_pmu_events(U32_MAX);
894
866 /* 895 /*
867 * Initialize & Reset PMNC. Request overflow interrupt for 896 * Initialize & Reset PMNC. Request overflow interrupt for
868 * 64 bit cycle counter but cheat in armv8pmu_write_counter(). 897 * 64 bit cycle counter but cheat in armv8pmu_write_counter().