aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/pmu.c25
2 files changed, 21 insertions, 5 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f87f7fcefa0a..531f47cbf1f8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -323,6 +323,7 @@ struct kvm_pmu {
323 u64 global_ovf_ctrl; 323 u64 global_ovf_ctrl;
324 u64 counter_bitmask[2]; 324 u64 counter_bitmask[2];
325 u64 global_ctrl_mask; 325 u64 global_ctrl_mask;
326 u64 reserved_bits;
326 u8 version; 327 u8 version;
327 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; 328 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
328 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; 329 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index c53e797e7369..5c4f63151b4d 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -160,7 +160,7 @@ static void stop_counter(struct kvm_pmc *pmc)
160 160
161static void reprogram_counter(struct kvm_pmc *pmc, u32 type, 161static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
162 unsigned config, bool exclude_user, bool exclude_kernel, 162 unsigned config, bool exclude_user, bool exclude_kernel,
163 bool intr) 163 bool intr, bool in_tx, bool in_tx_cp)
164{ 164{
165 struct perf_event *event; 165 struct perf_event *event;
166 struct perf_event_attr attr = { 166 struct perf_event_attr attr = {
@@ -173,6 +173,10 @@ static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
173 .exclude_kernel = exclude_kernel, 173 .exclude_kernel = exclude_kernel,
174 .config = config, 174 .config = config,
175 }; 175 };
176 if (in_tx)
177 attr.config |= HSW_IN_TX;
178 if (in_tx_cp)
179 attr.config |= HSW_IN_TX_CHECKPOINTED;
176 180
177 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); 181 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
178 182
@@ -226,7 +230,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
226 230
227 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | 231 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
228 ARCH_PERFMON_EVENTSEL_INV | 232 ARCH_PERFMON_EVENTSEL_INV |
229 ARCH_PERFMON_EVENTSEL_CMASK))) { 233 ARCH_PERFMON_EVENTSEL_CMASK |
234 HSW_IN_TX |
235 HSW_IN_TX_CHECKPOINTED))) {
230 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select, 236 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
231 unit_mask); 237 unit_mask);
232 if (config != PERF_COUNT_HW_MAX) 238 if (config != PERF_COUNT_HW_MAX)
@@ -239,7 +245,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
239 reprogram_counter(pmc, type, config, 245 reprogram_counter(pmc, type, config,
240 !(eventsel & ARCH_PERFMON_EVENTSEL_USR), 246 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
241 !(eventsel & ARCH_PERFMON_EVENTSEL_OS), 247 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
242 eventsel & ARCH_PERFMON_EVENTSEL_INT); 248 eventsel & ARCH_PERFMON_EVENTSEL_INT,
249 (eventsel & HSW_IN_TX),
250 (eventsel & HSW_IN_TX_CHECKPOINTED));
243} 251}
244 252
245static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) 253static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
@@ -256,7 +264,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
256 arch_events[fixed_pmc_events[idx]].event_type, 264 arch_events[fixed_pmc_events[idx]].event_type,
257 !(en & 0x2), /* exclude user */ 265 !(en & 0x2), /* exclude user */
258 !(en & 0x1), /* exclude kernel */ 266 !(en & 0x1), /* exclude kernel */
259 pmi); 267 pmi, false, false);
260} 268}
261 269
262static inline u8 fixed_en_pmi(u64 ctrl, int idx) 270static inline u8 fixed_en_pmi(u64 ctrl, int idx)
@@ -408,7 +416,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
408 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { 416 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
409 if (data == pmc->eventsel) 417 if (data == pmc->eventsel)
410 return 0; 418 return 0;
411 if (!(data & 0xffffffff00200000ull)) { 419 if (!(data & pmu->reserved_bits)) {
412 reprogram_gp_counter(pmc, data); 420 reprogram_gp_counter(pmc, data);
413 return 0; 421 return 0;
414 } 422 }
@@ -450,6 +458,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
450 pmu->counter_bitmask[KVM_PMC_GP] = 0; 458 pmu->counter_bitmask[KVM_PMC_GP] = 0;
451 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; 459 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
452 pmu->version = 0; 460 pmu->version = 0;
461 pmu->reserved_bits = 0xffffffff00200000ull;
453 462
454 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); 463 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
455 if (!entry) 464 if (!entry)
@@ -478,6 +487,12 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
478 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | 487 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
479 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); 488 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
480 pmu->global_ctrl_mask = ~pmu->global_ctrl; 489 pmu->global_ctrl_mask = ~pmu->global_ctrl;
490
491 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
492 if (entry &&
493 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
494 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
495 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
481} 496}
482 497
483void kvm_pmu_init(struct kvm_vcpu *vcpu) 498void kvm_pmu_init(struct kvm_vcpu *vcpu)