aboutsummaryrefslogtreecommitdiffstats
path: root/include/kvm
diff options
context:
space:
mode:
authorShannon Zhao <shannon.zhao@linaro.org>2016-02-26 06:29:19 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2016-02-29 13:34:21 -0500
commitb02386eb7dac7555a208d81aef2a0e5c6f0f8085 (patch)
treea3a06d842810134ac014da93fe78a161918cf2d1 /include/kvm
parentd692b8ad6ec4814ddd9a37ce5c9c9d971e741088 (diff)
arm64: KVM: Add PMU overflow interrupt routing
When calling perf_event_create_kernel_counter to create perf_event, assign a overflow handler. Then when the perf event overflows, set the corresponding bit of guest PMOVSSET register. If this counter is enabled and its interrupt is enabled as well, kick the vcpu to sync the interrupt. On VM entry, if there is counter overflowed and interrupt level is changed, inject the interrupt with corresponding level. On VM exit, sync the interrupt level as well if it has been changed. Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Andrew Jones <drjones@redhat.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'include/kvm')
-rw-r--r--include/kvm/arm_pmu.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 8bc92d119713..9c184edb8e07 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -35,6 +35,7 @@ struct kvm_pmu {
35 int irq_num; 35 int irq_num;
36 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; 36 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
37 bool ready; 37 bool ready;
38 bool irq_level;
38}; 39};
39 40
40#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) 41#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
@@ -44,6 +45,8 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
44void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); 45void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
45void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); 46void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
46void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); 47void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
48void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
49void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
47void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 50void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
48void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 51void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
49void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 52void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
@@ -67,6 +70,8 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
67static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} 70static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
68static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} 71static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
69static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} 72static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
73static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
74static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
70static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 75static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
71static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 76static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
72static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 77static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,