summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2019-10-06 05:28:50 -0400
committerMarc Zyngier <maz@kernel.org>2019-10-20 05:47:07 -0400
commit8c3252c06516eac22c4f8e2506122171abedcc09 (patch)
tree7ca547d4e4d0c04a8e4bbcf905d4ff8a44ae3788 /virt/kvm
parent725ce66979fb6da5c1aec5b064d0871bedc23bf7 (diff)
KVM: arm64: pmu: Reset sample period on overflow handling
The PMU emulation code uses the perf event sample period to trigger the overflow detection. This works fine for the *first* overflow handling, but results in a huge number of interrupts on the host, unrelated to the number of interrupts handled in the guest (a x20 factor is pretty common for the cycle counter). On a slow system (such as a SW model), this can result in the guest only making forward progress at a glacial pace. It turns out that the clue is in the name. The sample period is exactly that: a period. And once the an overflow has occured, the following period should be the full width of the associated counter, instead of whatever the guest had initially programed. Reset the sample period to the architected value in the overflow handler, which now results in a number of host interrupts that is much closer to the number of interrupts in the guest. Fixes: b02386eb7dac ("arm64: KVM: Add PMU overflow interrupt routing") Reviewed-by: Andrew Murray <andrew.murray@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/pmu.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index f291d4ac3519..8731dfeced8b 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -8,6 +8,7 @@
8#include <linux/kvm.h> 8#include <linux/kvm.h>
9#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
10#include <linux/perf_event.h> 10#include <linux/perf_event.h>
11#include <linux/perf/arm_pmu.h>
11#include <linux/uaccess.h> 12#include <linux/uaccess.h>
12#include <asm/kvm_emulate.h> 13#include <asm/kvm_emulate.h>
13#include <kvm/arm_pmu.h> 14#include <kvm/arm_pmu.h>
@@ -442,8 +443,25 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
442 struct pt_regs *regs) 443 struct pt_regs *regs)
443{ 444{
444 struct kvm_pmc *pmc = perf_event->overflow_handler_context; 445 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
446 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
445 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 447 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
446 int idx = pmc->idx; 448 int idx = pmc->idx;
449 u64 period;
450
451 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
452
453 /*
454 * Reset the sample period to the architectural limit,
455 * i.e. the point where the counter overflows.
456 */
457 period = -(local64_read(&perf_event->count));
458
459 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
460 period &= GENMASK(31, 0);
461
462 local64_set(&perf_event->hw.period_left, 0);
463 perf_event->attr.sample_period = period;
464 perf_event->hw.sample_period = period;
447 465
448 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); 466 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
449 467
@@ -451,6 +469,8 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
451 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 469 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
452 kvm_vcpu_kick(vcpu); 470 kvm_vcpu_kick(vcpu);
453 } 471 }
472
473 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
454} 474}
455 475
456/** 476/**