summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2019-05-20 11:20:40 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2019-05-24 15:27:13 -0400
commit0e6f467ee28ec97f68c7b74e35ec1601bb1368a7 (patch)
treef951940ed259c245b68129f997115ff50301bccd
parenta80c4ec10ed9632c44c829452dc40a0443ff4e85 (diff)
KVM: x86/pmu: mask the result of rdpmc according to the width of the counters
This patch will simplify the changes in the next, by enforcing the masking of the counters to RDPMC and RDMSR. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/pmu.c10
-rw-r--r--arch/x86/kvm/pmu.h3
-rw-r--r--arch/x86/kvm/pmu_amd.c2
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c13
4 files changed, 15 insertions, 13 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index e39741997893..dd745b58ffd8 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -283,7 +283,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
283 bool fast_mode = idx & (1u << 31); 283 bool fast_mode = idx & (1u << 31);
284 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 284 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
285 struct kvm_pmc *pmc; 285 struct kvm_pmc *pmc;
286 u64 ctr_val; 286 u64 mask = fast_mode ? ~0u : ~0ull;
287 287
288 if (!pmu->version) 288 if (!pmu->version)
289 return 1; 289 return 1;
@@ -291,15 +291,11 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
291 if (is_vmware_backdoor_pmc(idx)) 291 if (is_vmware_backdoor_pmc(idx))
292 return kvm_pmu_rdpmc_vmware(vcpu, idx, data); 292 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
293 293
294 pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx); 294 pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
295 if (!pmc) 295 if (!pmc)
296 return 1; 296 return 1;
297 297
298 ctr_val = pmc_read_counter(pmc); 298 *data = pmc_read_counter(pmc) & mask;
299 if (fast_mode)
300 ctr_val = (u32)ctr_val;
301
302 *data = ctr_val;
303 return 0; 299 return 0;
304} 300}
305 301
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index ba8898e1a854..22dff661145a 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -25,7 +25,8 @@ struct kvm_pmu_ops {
25 unsigned (*find_fixed_event)(int idx); 25 unsigned (*find_fixed_event)(int idx);
26 bool (*pmc_is_enabled)(struct kvm_pmc *pmc); 26 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
27 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); 27 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
28 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx); 28 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
29 u64 *mask);
29 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); 30 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
30 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 31 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
31 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 32 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 50fa9450fcf1..d3118088f1cd 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -186,7 +186,7 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
186} 186}
187 187
188/* idx is the ECX register of RDPMC instruction */ 188/* idx is the ECX register of RDPMC instruction */
189static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx) 189static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
190{ 190{
191 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 191 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
192 struct kvm_pmc *counters; 192 struct kvm_pmc *counters;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index f8502c376b37..b6f5157445fe 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -126,7 +126,7 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
126} 126}
127 127
128static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, 128static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
129 unsigned idx) 129 unsigned idx, u64 *mask)
130{ 130{
131 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 131 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
132 bool fixed = idx & (1u << 30); 132 bool fixed = idx & (1u << 30);
@@ -138,6 +138,7 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
138 if (fixed && idx >= pmu->nr_arch_fixed_counters) 138 if (fixed && idx >= pmu->nr_arch_fixed_counters)
139 return NULL; 139 return NULL;
140 counters = fixed ? pmu->fixed_counters : pmu->gp_counters; 140 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
141 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
141 142
142 return &counters[idx]; 143 return &counters[idx];
143} 144}
@@ -183,9 +184,13 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
183 *data = pmu->global_ovf_ctrl; 184 *data = pmu->global_ovf_ctrl;
184 return 0; 185 return 0;
185 default: 186 default:
186 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || 187 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
187 (pmc = get_fixed_pmc(pmu, msr))) { 188 u64 val = pmc_read_counter(pmc);
188 *data = pmc_read_counter(pmc); 189 *data = val & pmu->counter_bitmask[KVM_PMC_GP];
190 return 0;
191 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
192 u64 val = pmc_read_counter(pmc);
193 *data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
189 return 0; 194 return 0;
190 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { 195 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
191 *data = pmc->eventsel; 196 *data = pmc->eventsel;