aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Jones <drjones@redhat.com>2017-07-01 12:26:54 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2017-07-25 09:18:01 -0400
commitd9f89b4e9290e46cd9b273e9ad0bff0f93e86fae (patch)
tree94f2f88610beefb87a9995c91a9452a867990d3a
parent79962a5c8ba5b33f49d88a058e2124bf2ff3c034 (diff)
KVM: arm/arm64: PMU: Fix overflow interrupt injection
kvm_pmu_overflow_set() is called from perf's interrupt handler, making the call of kvm_vgic_inject_irq() from it introduced with "KVM: arm/arm64: PMU: remove request-less vcpu kick" a really bad idea, as it's quite easy to try and retake a lock that the interrupted context is already holding. The fix is to use a vcpu kick, leaving the interrupt injection to kvm_pmu_sync_hwstate(), like it was doing before the refactoring. We don't just revert, though, because before the kick was request-less, leaving the vcpu exposed to the request-less vcpu kick race, and also because the kick was used unnecessarily from register access handlers. Reviewed-by: Christoffer Dall <cdall@linaro.org> Signed-off-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm64/kvm/sys_regs.c2
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--virt/kvm/arm/pmu.c43
3 files changed, 16 insertions, 31 deletions
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 77862881ae86..2e070d3baf9f 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -764,7 +764,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
764 if (p->is_write) { 764 if (p->is_write) {
765 if (r->CRm & 0x2) 765 if (r->CRm & 0x2)
766 /* accessing PMOVSSET_EL0 */ 766 /* accessing PMOVSSET_EL0 */
767 kvm_pmu_overflow_set(vcpu, p->regval & mask); 767 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
768 else 768 else
769 /* accessing PMOVSCLR_EL0 */ 769 /* accessing PMOVSCLR_EL0 */
770 vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); 770 vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index f6e030617467..f87fe20fcb05 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -48,7 +48,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
48void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 48void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
49void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); 49void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
50void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); 50void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
51void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
52void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 51void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
53void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 52void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
54bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); 53bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -86,7 +85,6 @@ static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
86static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 85static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
87static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} 86static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
88static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} 87static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
89static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
90static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 88static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
91static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 89static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
92static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 90static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index fc8a723ff387..8a9c42366db7 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -203,11 +203,15 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
203 return reg; 203 return reg;
204} 204}
205 205
206static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu) 206static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
207{ 207{
208 struct kvm_pmu *pmu = &vcpu->arch.pmu; 208 struct kvm_pmu *pmu = &vcpu->arch.pmu;
209 bool overflow = !!kvm_pmu_overflow_status(vcpu); 209 bool overflow;
210
211 if (!kvm_arm_pmu_v3_ready(vcpu))
212 return;
210 213
214 overflow = !!kvm_pmu_overflow_status(vcpu);
211 if (pmu->irq_level == overflow) 215 if (pmu->irq_level == overflow)
212 return; 216 return;
213 217
@@ -215,33 +219,11 @@ static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu)
215 219
216 if (likely(irqchip_in_kernel(vcpu->kvm))) { 220 if (likely(irqchip_in_kernel(vcpu->kvm))) {
217 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, 221 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
218 pmu->irq_num, overflow, 222 pmu->irq_num, overflow, pmu);
219 &vcpu->arch.pmu);
220 WARN_ON(ret); 223 WARN_ON(ret);
221 } 224 }
222} 225}
223 226
224/**
225 * kvm_pmu_overflow_set - set PMU overflow interrupt
226 * @vcpu: The vcpu pointer
227 * @val: the value guest writes to PMOVSSET register
228 */
229void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
230{
231 if (val == 0)
232 return;
233
234 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
235 kvm_pmu_check_overflow(vcpu);
236}
237
238static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
239{
240 if (!kvm_arm_pmu_v3_ready(vcpu))
241 return;
242 kvm_pmu_check_overflow(vcpu);
243}
244
245bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 227bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
246{ 228{
247 struct kvm_pmu *pmu = &vcpu->arch.pmu; 229 struct kvm_pmu *pmu = &vcpu->arch.pmu;
@@ -303,7 +285,7 @@ static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
303} 285}
304 286
305/** 287/**
306 * When perf event overflows, call kvm_pmu_overflow_set to set overflow status. 288 * When the perf event overflows, set the overflow status and inform the vcpu.
307 */ 289 */
308static void kvm_pmu_perf_overflow(struct perf_event *perf_event, 290static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
309 struct perf_sample_data *data, 291 struct perf_sample_data *data,
@@ -313,7 +295,12 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
313 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 295 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
314 int idx = pmc->idx; 296 int idx = pmc->idx;
315 297
316 kvm_pmu_overflow_set(vcpu, BIT(idx)); 298 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
299
300 if (kvm_pmu_overflow_status(vcpu)) {
301 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
302 kvm_vcpu_kick(vcpu);
303 }
317} 304}
318 305
319/** 306/**
@@ -341,7 +328,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
341 reg = lower_32_bits(reg); 328 reg = lower_32_bits(reg);
342 vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; 329 vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
343 if (!reg) 330 if (!reg)
344 kvm_pmu_overflow_set(vcpu, BIT(i)); 331 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
345 } 332 }
346 } 333 }
347} 334}