aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Huang <wehuang@redhat.com>2015-06-19 09:45:05 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-06-23 08:12:14 -0400
commit25462f7f5295e2d3e9c2b31761ac95f0b3c8562f (patch)
treeed5d7df52d131213a898c34d4596d60e997c485c
parent41aac14a8dee66a720894e5979c2372c0d5afd34 (diff)
KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch
This patch defines a new function pointer struct (kvm_pmu_ops) to support vPMU for both Intel and AMD. The functions pointers defined in this new struct will be linked with Intel and AMD functions later. In the meanwhile the struct that maps from event_sel bits to PERF_TYPE_HARDWARE events is renamed and moved from Intel specific code to kvm_host.h as a common struct. Reviewed-by: Joerg Roedel <jroedel@suse.de> Tested-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Wei Huang <wei@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/kvm/Makefile4
-rw-r--r--arch/x86/kvm/pmu.c383
-rw-r--r--arch/x86/kvm/pmu.h92
-rw-r--r--arch/x86/kvm/pmu_amd.c97
-rw-r--r--arch/x86/kvm/pmu_intel.c358
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--arch/x86/kvm/vmx.c3
8 files changed, 606 insertions, 338 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 534dfa324e35..5a2b4508be44 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -336,6 +336,8 @@ struct kvm_pmu {
336 u64 reprogram_pmi; 336 u64 reprogram_pmi;
337}; 337};
338 338
339struct kvm_pmu_ops;
340
339enum { 341enum {
340 KVM_DEBUGREG_BP_ENABLED = 1, 342 KVM_DEBUGREG_BP_ENABLED = 1,
341 KVM_DEBUGREG_WONT_EXIT = 2, 343 KVM_DEBUGREG_WONT_EXIT = 2,
@@ -854,6 +856,8 @@ struct kvm_x86_ops {
854 void (*enable_log_dirty_pt_masked)(struct kvm *kvm, 856 void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
855 struct kvm_memory_slot *slot, 857 struct kvm_memory_slot *slot,
856 gfn_t offset, unsigned long mask); 858 gfn_t offset, unsigned long mask);
859 /* pmu operations of sub-arch */
860 const struct kvm_pmu_ops *pmu_ops;
857}; 861};
858 862
859struct kvm_arch_async_pf { 863struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 470dc6c9d409..67d215cb8953 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -14,8 +14,8 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
14kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ 14kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
15 i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o 15 i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o
16kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o 16kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
17kvm-intel-y += vmx.o 17kvm-intel-y += vmx.o pmu_intel.o
18kvm-amd-y += svm.o 18kvm-amd-y += svm.o pmu_amd.o
19 19
20obj-$(CONFIG_KVM) += kvm.o 20obj-$(CONFIG_KVM) += kvm.o
21obj-$(CONFIG_KVM_INTEL) += kvm-intel.o 21obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index bd5dbd9ce0e3..31aa2c85dc97 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -1,11 +1,12 @@
1/* 1/*
2 * Kernel-based Virtual Machine -- Performance Monitoring Unit support 2 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
3 * 3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 4 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
5 * 5 *
6 * Authors: 6 * Authors:
7 * Avi Kivity <avi@redhat.com> 7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com> 8 * Gleb Natapov <gleb@redhat.com>
9 * Wei Huang <wei@redhat.com>
9 * 10 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory. 12 * the COPYING file in the top-level directory.
@@ -21,67 +22,30 @@
21#include "lapic.h" 22#include "lapic.h"
22#include "pmu.h" 23#include "pmu.h"
23 24
24static struct kvm_event_hw_type_mapping arch_events[] = { 25/* NOTE:
25 /* Index must match CPUID 0x0A.EBX bit vector */ 26 * - Each perf counter is defined as "struct kvm_pmc";
26 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, 27 * - There are two types of perf counters: general purpose (gp) and fixed.
27 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, 28 * gp counters are stored in gp_counters[] and fixed counters are stored
28 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES }, 29 * in fixed_counters[] respectively. Both of them are part of "struct
29 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES }, 30 * kvm_pmu";
30 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, 31 * - pmu.c understands the difference between gp counters and fixed counters.
31 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 32 * However AMD doesn't support fixed-counters;
32 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, 33 * - There are three types of index to access perf counters (PMC):
33 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, 34 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
34}; 35 * has MSR_K7_PERFCTRn.
35 36 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
36/* mapping between fixed pmc index and arch_events array */ 37 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
37static int fixed_pmc_events[] = {1, 0, 7}; 38 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
38 39 * that it also supports fixed counters. idx can be used to as index to
39static bool pmc_is_gp(struct kvm_pmc *pmc) 40 * gp and fixed counters.
40{ 41 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
41 return pmc->type == KVM_PMC_GP; 42 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
42} 43 * all perf counters (both gp and fixed). The mapping relationship
43 44 * between pmc and perf counters is as the following:
44static inline u64 pmc_bitmask(struct kvm_pmc *pmc) 45 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
45{ 46 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
46 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 47 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
47 48 */
48 return pmu->counter_bitmask[pmc->type];
49}
50
51static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
52{
53 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
54 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
55}
56
57static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
58 u32 base)
59{
60 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
61 return &pmu->gp_counters[msr - base];
62 return NULL;
63}
64
65static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
66{
67 int base = MSR_CORE_PERF_FIXED_CTR0;
68 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
69 return &pmu->fixed_counters[msr - base];
70 return NULL;
71}
72
73static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
74{
75 return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
76}
77
78static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
79{
80 if (idx < INTEL_PMC_IDX_FIXED)
81 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
82 else
83 return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
84}
85 49
86static void kvm_pmi_trigger_fn(struct irq_work *irq_work) 50static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
87{ 51{
@@ -132,30 +96,6 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
132 } 96 }
133} 97}
134 98
135static u64 pmc_read_counter(struct kvm_pmc *pmc)
136{
137 u64 counter, enabled, running;
138
139 counter = pmc->counter;
140
141 if (pmc->perf_event)
142 counter += perf_event_read_value(pmc->perf_event,
143 &enabled, &running);
144
145 /* FIXME: Scaling needed? */
146
147 return counter & pmc_bitmask(pmc);
148}
149
150static void pmc_stop_counter(struct kvm_pmc *pmc)
151{
152 if (pmc->perf_event) {
153 pmc->counter = pmc_read_counter(pmc);
154 perf_event_release_kernel(pmc->perf_event);
155 pmc->perf_event = NULL;
156 }
157}
158
159static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, 99static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
160 unsigned config, bool exclude_user, 100 unsigned config, bool exclude_user,
161 bool exclude_kernel, bool intr, 101 bool exclude_kernel, bool intr,
@@ -193,24 +133,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
193 clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); 133 clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
194} 134}
195 135
196static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select, 136void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
197 u8 unit_mask)
198{
199 int i;
200
201 for (i = 0; i < ARRAY_SIZE(arch_events); i++)
202 if (arch_events[i].eventsel == event_select
203 && arch_events[i].unit_mask == unit_mask
204 && (pmu->available_event_types & (1 << i)))
205 break;
206
207 if (i == ARRAY_SIZE(arch_events))
208 return PERF_COUNT_HW_MAX;
209
210 return arch_events[i].event_type;
211}
212
213static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
214{ 137{
215 unsigned config, type = PERF_TYPE_RAW; 138 unsigned config, type = PERF_TYPE_RAW;
216 u8 event_select, unit_mask; 139 u8 event_select, unit_mask;
@@ -233,8 +156,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
233 ARCH_PERFMON_EVENTSEL_CMASK | 156 ARCH_PERFMON_EVENTSEL_CMASK |
234 HSW_IN_TX | 157 HSW_IN_TX |
235 HSW_IN_TX_CHECKPOINTED))) { 158 HSW_IN_TX_CHECKPOINTED))) {
236 config = find_arch_event(pmc_to_pmu(pmc), event_select, 159 config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
237 unit_mask); 160 event_select,
161 unit_mask);
238 if (config != PERF_COUNT_HW_MAX) 162 if (config != PERF_COUNT_HW_MAX)
239 type = PERF_TYPE_HARDWARE; 163 type = PERF_TYPE_HARDWARE;
240 } 164 }
@@ -249,8 +173,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
249 (eventsel & HSW_IN_TX), 173 (eventsel & HSW_IN_TX),
250 (eventsel & HSW_IN_TX_CHECKPOINTED)); 174 (eventsel & HSW_IN_TX_CHECKPOINTED));
251} 175}
176EXPORT_SYMBOL_GPL(reprogram_gp_counter);
252 177
253static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) 178void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
254{ 179{
255 unsigned en_field = ctrl & 0x3; 180 unsigned en_field = ctrl & 0x3;
256 bool pmi = ctrl & 0x8; 181 bool pmi = ctrl & 0x8;
@@ -261,38 +186,16 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
261 return; 186 return;
262 187
263 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, 188 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
264 arch_events[fixed_pmc_events[idx]].event_type, 189 kvm_x86_ops->pmu_ops->find_fixed_event(idx),
265 !(en_field & 0x2), /* exclude user */ 190 !(en_field & 0x2), /* exclude user */
266 !(en_field & 0x1), /* exclude kernel */ 191 !(en_field & 0x1), /* exclude kernel */
267 pmi, false, false); 192 pmi, false, false);
268} 193}
194EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
269 195
270static inline u8 fixed_ctrl_field(u64 ctrl, int idx) 196void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
271{
272 return (ctrl >> (idx * 4)) & 0xf;
273}
274
275static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
276{ 197{
277 int i; 198 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
278
279 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
280 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
281 u8 new_ctrl = fixed_ctrl_field(data, i);
282 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
283
284 if (old_ctrl == new_ctrl)
285 continue;
286
287 reprogram_fixed_counter(pmc, new_ctrl, i);
288 }
289
290 pmu->fixed_ctr_ctrl = data;
291}
292
293static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
294{
295 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, pmc_idx);
296 199
297 if (!pmc) 200 if (!pmc)
298 return; 201 return;
@@ -306,17 +209,7 @@ static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
306 reprogram_fixed_counter(pmc, ctrl, idx); 209 reprogram_fixed_counter(pmc, ctrl, idx);
307 } 210 }
308} 211}
309 212EXPORT_SYMBOL_GPL(reprogram_counter);
310static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
311{
312 int bit;
313 u64 diff = pmu->global_ctrl ^ data;
314
315 pmu->global_ctrl = data;
316
317 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
318 reprogram_counter(pmu, bit);
319}
320 213
321void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) 214void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
322{ 215{
@@ -327,7 +220,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
327 bitmask = pmu->reprogram_pmi; 220 bitmask = pmu->reprogram_pmi;
328 221
329 for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { 222 for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
330 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit); 223 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
331 224
332 if (unlikely(!pmc || !pmc->perf_event)) { 225 if (unlikely(!pmc || !pmc->perf_event)) {
333 clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); 226 clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
@@ -341,28 +234,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
341/* check if idx is a valid index to access PMU */ 234/* check if idx is a valid index to access PMU */
342int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) 235int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
343{ 236{
344 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 237 return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
345 bool fixed = idx & (1u << 30);
346 idx &= ~(3u << 30);
347 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
348 (fixed && idx >= pmu->nr_arch_fixed_counters);
349}
350
351static struct kvm_pmc *kvm_pmu_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
352 unsigned idx)
353{
354 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
355 bool fixed = idx & (1u << 30);
356 struct kvm_pmc *counters;
357
358 idx &= ~(3u << 30);
359 if (!fixed && idx >= pmu->nr_arch_gp_counters)
360 return NULL;
361 if (fixed && idx >= pmu->nr_arch_fixed_counters)
362 return NULL;
363 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
364
365 return &counters[idx];
366} 238}
367 239
368int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) 240int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
@@ -371,7 +243,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
371 struct kvm_pmc *pmc; 243 struct kvm_pmc *pmc;
372 u64 ctr_val; 244 u64 ctr_val;
373 245
374 pmc = kvm_pmu_msr_idx_to_pmc(vcpu, idx); 246 pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
375 if (!pmc) 247 if (!pmc)
376 return 1; 248 return 1;
377 249
@@ -391,111 +263,17 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
391 263
392bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) 264bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
393{ 265{
394 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 266 return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
395 int ret;
396
397 switch (msr) {
398 case MSR_CORE_PERF_FIXED_CTR_CTRL:
399 case MSR_CORE_PERF_GLOBAL_STATUS:
400 case MSR_CORE_PERF_GLOBAL_CTRL:
401 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
402 ret = pmu->version > 1;
403 break;
404 default:
405 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
406 || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
407 || get_fixed_pmc(pmu, msr);
408 break;
409 }
410 return ret;
411} 267}
412 268
413int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 269int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
414{ 270{
415 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 271 return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
416 struct kvm_pmc *pmc;
417
418 switch (index) {
419 case MSR_CORE_PERF_FIXED_CTR_CTRL:
420 *data = pmu->fixed_ctr_ctrl;
421 return 0;
422 case MSR_CORE_PERF_GLOBAL_STATUS:
423 *data = pmu->global_status;
424 return 0;
425 case MSR_CORE_PERF_GLOBAL_CTRL:
426 *data = pmu->global_ctrl;
427 return 0;
428 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
429 *data = pmu->global_ovf_ctrl;
430 return 0;
431 default:
432 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
433 (pmc = get_fixed_pmc(pmu, index))) {
434 *data = pmc_read_counter(pmc);
435 return 0;
436 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
437 *data = pmc->eventsel;
438 return 0;
439 }
440 }
441 return 1;
442} 272}
443 273
444int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 274int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
445{ 275{
446 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 276 return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
447 struct kvm_pmc *pmc;
448 u32 index = msr_info->index;
449 u64 data = msr_info->data;
450
451 switch (index) {
452 case MSR_CORE_PERF_FIXED_CTR_CTRL:
453 if (pmu->fixed_ctr_ctrl == data)
454 return 0;
455 if (!(data & 0xfffffffffffff444ull)) {
456 reprogram_fixed_counters(pmu, data);
457 return 0;
458 }
459 break;
460 case MSR_CORE_PERF_GLOBAL_STATUS:
461 if (msr_info->host_initiated) {
462 pmu->global_status = data;
463 return 0;
464 }
465 break; /* RO MSR */
466 case MSR_CORE_PERF_GLOBAL_CTRL:
467 if (pmu->global_ctrl == data)
468 return 0;
469 if (!(data & pmu->global_ctrl_mask)) {
470 global_ctrl_changed(pmu, data);
471 return 0;
472 }
473 break;
474 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
475 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
476 if (!msr_info->host_initiated)
477 pmu->global_status &= ~data;
478 pmu->global_ovf_ctrl = data;
479 return 0;
480 }
481 break;
482 default:
483 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
484 (pmc = get_fixed_pmc(pmu, index))) {
485 if (!msr_info->host_initiated)
486 data = (s64)(s32)data;
487 pmc->counter += data - pmc_read_counter(pmc);
488 return 0;
489 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
490 if (data == pmc->eventsel)
491 return 0;
492 if (!(data & pmu->reserved_bits)) {
493 reprogram_gp_counter(pmc, data);
494 return 0;
495 }
496 }
497 }
498 return 1;
499} 277}
500 278
501/* refresh PMU settings. This function generally is called when underlying 279/* refresh PMU settings. This function generally is called when underlying
@@ -504,90 +282,23 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
504 */ 282 */
505void kvm_pmu_refresh(struct kvm_vcpu *vcpu) 283void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
506{ 284{
507 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 285 kvm_x86_ops->pmu_ops->refresh(vcpu);
508 struct kvm_cpuid_entry2 *entry;
509 union cpuid10_eax eax;
510 union cpuid10_edx edx;
511
512 pmu->nr_arch_gp_counters = 0;
513 pmu->nr_arch_fixed_counters = 0;
514 pmu->counter_bitmask[KVM_PMC_GP] = 0;
515 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
516 pmu->version = 0;
517 pmu->reserved_bits = 0xffffffff00200000ull;
518
519 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
520 if (!entry)
521 return;
522 eax.full = entry->eax;
523 edx.full = entry->edx;
524
525 pmu->version = eax.split.version_id;
526 if (!pmu->version)
527 return;
528
529 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
530 INTEL_PMC_MAX_GENERIC);
531 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
532 pmu->available_event_types = ~entry->ebx &
533 ((1ull << eax.split.mask_length) - 1);
534
535 if (pmu->version == 1) {
536 pmu->nr_arch_fixed_counters = 0;
537 } else {
538 pmu->nr_arch_fixed_counters =
539 min_t(int, edx.split.num_counters_fixed,
540 INTEL_PMC_MAX_FIXED);
541 pmu->counter_bitmask[KVM_PMC_FIXED] =
542 ((u64)1 << edx.split.bit_width_fixed) - 1;
543 }
544
545 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
546 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
547 pmu->global_ctrl_mask = ~pmu->global_ctrl;
548
549 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
550 if (entry &&
551 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
552 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
553 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
554} 286}
555 287
556void kvm_pmu_reset(struct kvm_vcpu *vcpu) 288void kvm_pmu_reset(struct kvm_vcpu *vcpu)
557{ 289{
558 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 290 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
559 int i;
560 291
561 irq_work_sync(&pmu->irq_work); 292 irq_work_sync(&pmu->irq_work);
562 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { 293 kvm_x86_ops->pmu_ops->reset(vcpu);
563 struct kvm_pmc *pmc = &pmu->gp_counters[i];
564 pmc_stop_counter(pmc);
565 pmc->counter = pmc->eventsel = 0;
566 }
567
568 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
569 pmc_stop_counter(&pmu->fixed_counters[i]);
570
571 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
572 pmu->global_ovf_ctrl = 0;
573} 294}
574 295
575void kvm_pmu_init(struct kvm_vcpu *vcpu) 296void kvm_pmu_init(struct kvm_vcpu *vcpu)
576{ 297{
577 int i;
578 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 298 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
579 299
580 memset(pmu, 0, sizeof(*pmu)); 300 memset(pmu, 0, sizeof(*pmu));
581 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { 301 kvm_x86_ops->pmu_ops->init(vcpu);
582 pmu->gp_counters[i].type = KVM_PMC_GP;
583 pmu->gp_counters[i].vcpu = vcpu;
584 pmu->gp_counters[i].idx = i;
585 }
586 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
587 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
588 pmu->fixed_counters[i].vcpu = vcpu;
589 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
590 }
591 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); 302 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
592 kvm_pmu_refresh(vcpu); 303 kvm_pmu_refresh(vcpu);
593} 304}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 19bf0172f93b..f96e1f962587 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -5,12 +5,102 @@
5#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) 5#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
6#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 6#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
7 7
8/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
9#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
10
8struct kvm_event_hw_type_mapping { 11struct kvm_event_hw_type_mapping {
9 u8 eventsel; 12 u8 eventsel;
10 u8 unit_mask; 13 u8 unit_mask;
11 unsigned event_type; 14 unsigned event_type;
12}; 15};
13 16
17struct kvm_pmu_ops {
18 unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
19 u8 unit_mask);
20 unsigned (*find_fixed_event)(int idx);
21 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
22 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
23 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
24 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
25 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
26 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
27 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
28 void (*refresh)(struct kvm_vcpu *vcpu);
29 void (*init)(struct kvm_vcpu *vcpu);
30 void (*reset)(struct kvm_vcpu *vcpu);
31};
32
33static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
34{
35 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
36
37 return pmu->counter_bitmask[pmc->type];
38}
39
40static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
41{
42 u64 counter, enabled, running;
43
44 counter = pmc->counter;
45 if (pmc->perf_event)
46 counter += perf_event_read_value(pmc->perf_event,
47 &enabled, &running);
48 /* FIXME: Scaling needed? */
49 return counter & pmc_bitmask(pmc);
50}
51
52static inline void pmc_stop_counter(struct kvm_pmc *pmc)
53{
54 if (pmc->perf_event) {
55 pmc->counter = pmc_read_counter(pmc);
56 perf_event_release_kernel(pmc->perf_event);
57 pmc->perf_event = NULL;
58 }
59}
60
61static inline bool pmc_is_gp(struct kvm_pmc *pmc)
62{
63 return pmc->type == KVM_PMC_GP;
64}
65
66static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
67{
68 return pmc->type == KVM_PMC_FIXED;
69}
70
71static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
72{
73 return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
74}
75
76/* returns general purpose PMC with the specified MSR. Note that it can be
77 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
78 * paramenter to tell them apart.
79 */
80static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
81 u32 base)
82{
83 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
84 return &pmu->gp_counters[msr - base];
85
86 return NULL;
87}
88
89/* returns fixed PMC with the specified MSR */
90static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
91{
92 int base = MSR_CORE_PERF_FIXED_CTR0;
93
94 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
95 return &pmu->fixed_counters[msr - base];
96
97 return NULL;
98}
99
100void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
101void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
102void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
103
14void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 104void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
15void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 105void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
16int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 106int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
@@ -23,4 +113,6 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu);
23void kvm_pmu_init(struct kvm_vcpu *vcpu); 113void kvm_pmu_init(struct kvm_vcpu *vcpu);
24void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 114void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
25 115
116extern struct kvm_pmu_ops intel_pmu_ops;
117extern struct kvm_pmu_ops amd_pmu_ops;
26#endif /* __KVM_X86_PMU_H */ 118#endif /* __KVM_X86_PMU_H */
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
new file mode 100644
index 000000000000..48786407fee1
--- /dev/null
+++ b/arch/x86/kvm/pmu_amd.c
@@ -0,0 +1,97 @@
1/*
2 * KVM PMU support for AMD
3 *
4 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
5 *
6 * Author:
7 * Wei Huang <wei@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Implementation is based on pmu_intel.c file
13 */
14#include <linux/types.h>
15#include <linux/kvm_host.h>
16#include <linux/perf_event.h>
17#include "x86.h"
18#include "cpuid.h"
19#include "lapic.h"
20#include "pmu.h"
21
22static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
23 u8 event_select,
24 u8 unit_mask)
25{
26 return PERF_COUNT_HW_MAX;
27}
28
29/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
30static unsigned amd_find_fixed_event(int idx)
31{
32 return PERF_COUNT_HW_MAX;
33}
34
35static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
36{
37 return false;
38}
39
40static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
41{
42 return NULL;
43}
44
45/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
46static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
47{
48 return 1;
49}
50
51/* idx is the ECX register of RDPMC instruction */
52static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
53{
54 return NULL;
55}
56
57static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
58{
59 return false;
60}
61
62static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
63{
64 return 1;
65}
66
67static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
68{
69 return 1;
70}
71
72static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
73{
74}
75
76static void amd_pmu_init(struct kvm_vcpu *vcpu)
77{
78}
79
80static void amd_pmu_reset(struct kvm_vcpu *vcpu)
81{
82}
83
84struct kvm_pmu_ops amd_pmu_ops = {
85 .find_arch_event = amd_find_arch_event,
86 .find_fixed_event = amd_find_fixed_event,
87 .pmc_is_enabled = amd_pmc_is_enabled,
88 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
89 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
90 .is_valid_msr_idx = amd_is_valid_msr_idx,
91 .is_valid_msr = amd_is_valid_msr,
92 .get_msr = amd_pmu_get_msr,
93 .set_msr = amd_pmu_set_msr,
94 .refresh = amd_pmu_refresh,
95 .init = amd_pmu_init,
96 .reset = amd_pmu_reset,
97};
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
new file mode 100644
index 000000000000..ab38af4f4947
--- /dev/null
+++ b/arch/x86/kvm/pmu_intel.c
@@ -0,0 +1,358 @@
1/*
2 * KVM PMU support for Intel CPUs
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14#include <linux/types.h>
15#include <linux/kvm_host.h>
16#include <linux/perf_event.h>
17#include <asm/perf_event.h>
18#include "x86.h"
19#include "cpuid.h"
20#include "lapic.h"
21#include "pmu.h"
22
23static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 /* Index must match CPUID 0x0A.EBX bit vector */
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
33};
34
35/* mapping between fixed pmc index and intel_arch_events array */
36static int fixed_pmc_events[] = {1, 0, 7};
37
38static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39{
40 int i;
41
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 u8 new_ctrl = fixed_ctrl_field(data, i);
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45 struct kvm_pmc *pmc;
46
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
48
49 if (old_ctrl == new_ctrl)
50 continue;
51
52 reprogram_fixed_counter(pmc, new_ctrl, i);
53 }
54
55 pmu->fixed_ctr_ctrl = data;
56}
57
58/* function is called when global control register has been updated. */
59static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
60{
61 int bit;
62 u64 diff = pmu->global_ctrl ^ data;
63
64 pmu->global_ctrl = data;
65
66 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
67 reprogram_counter(pmu, bit);
68}
69
70static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
71 u8 event_select,
72 u8 unit_mask)
73{
74 int i;
75
76 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
77 if (intel_arch_events[i].eventsel == event_select
78 && intel_arch_events[i].unit_mask == unit_mask
79 && (pmu->available_event_types & (1 << i)))
80 break;
81
82 if (i == ARRAY_SIZE(intel_arch_events))
83 return PERF_COUNT_HW_MAX;
84
85 return intel_arch_events[i].event_type;
86}
87
88static unsigned intel_find_fixed_event(int idx)
89{
90 if (idx >= ARRAY_SIZE(fixed_pmc_events))
91 return PERF_COUNT_HW_MAX;
92
93 return intel_arch_events[fixed_pmc_events[idx]].event_type;
94}
95
96/* check if a PMC is enabled by comparising it with globl_ctrl bits. */
97static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
98{
99 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
100
101 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
102}
103
104static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
105{
106 if (pmc_idx < INTEL_PMC_IDX_FIXED)
107 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
108 MSR_P6_EVNTSEL0);
109 else {
110 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
111
112 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
113 }
114}
115
116/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
117static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
118{
119 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
120 bool fixed = idx & (1u << 30);
121
122 idx &= ~(3u << 30);
123
124 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
125 (fixed && idx >= pmu->nr_arch_fixed_counters);
126}
127
128static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
129 unsigned idx)
130{
131 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
132 bool fixed = idx & (1u << 30);
133 struct kvm_pmc *counters;
134
135 idx &= ~(3u << 30);
136 if (!fixed && idx >= pmu->nr_arch_gp_counters)
137 return NULL;
138 if (fixed && idx >= pmu->nr_arch_fixed_counters)
139 return NULL;
140 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
141
142 return &counters[idx];
143}
144
145static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
146{
147 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
148 int ret;
149
150 switch (msr) {
151 case MSR_CORE_PERF_FIXED_CTR_CTRL:
152 case MSR_CORE_PERF_GLOBAL_STATUS:
153 case MSR_CORE_PERF_GLOBAL_CTRL:
154 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
155 ret = pmu->version > 1;
156 break;
157 default:
158 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
159 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
160 get_fixed_pmc(pmu, msr);
161 break;
162 }
163
164 return ret;
165}
166
167static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
168{
169 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
170 struct kvm_pmc *pmc;
171
172 switch (msr) {
173 case MSR_CORE_PERF_FIXED_CTR_CTRL:
174 *data = pmu->fixed_ctr_ctrl;
175 return 0;
176 case MSR_CORE_PERF_GLOBAL_STATUS:
177 *data = pmu->global_status;
178 return 0;
179 case MSR_CORE_PERF_GLOBAL_CTRL:
180 *data = pmu->global_ctrl;
181 return 0;
182 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
183 *data = pmu->global_ovf_ctrl;
184 return 0;
185 default:
186 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
187 (pmc = get_fixed_pmc(pmu, msr))) {
188 *data = pmc_read_counter(pmc);
189 return 0;
190 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
191 *data = pmc->eventsel;
192 return 0;
193 }
194 }
195
196 return 1;
197}
198
199static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
200{
201 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
202 struct kvm_pmc *pmc;
203 u32 msr = msr_info->index;
204 u64 data = msr_info->data;
205
206 switch (msr) {
207 case MSR_CORE_PERF_FIXED_CTR_CTRL:
208 if (pmu->fixed_ctr_ctrl == data)
209 return 0;
210 if (!(data & 0xfffffffffffff444ull)) {
211 reprogram_fixed_counters(pmu, data);
212 return 0;
213 }
214 break;
215 case MSR_CORE_PERF_GLOBAL_STATUS:
216 if (msr_info->host_initiated) {
217 pmu->global_status = data;
218 return 0;
219 }
220 break; /* RO MSR */
221 case MSR_CORE_PERF_GLOBAL_CTRL:
222 if (pmu->global_ctrl == data)
223 return 0;
224 if (!(data & pmu->global_ctrl_mask)) {
225 global_ctrl_changed(pmu, data);
226 return 0;
227 }
228 break;
229 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
230 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
231 if (!msr_info->host_initiated)
232 pmu->global_status &= ~data;
233 pmu->global_ovf_ctrl = data;
234 return 0;
235 }
236 break;
237 default:
238 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
239 (pmc = get_fixed_pmc(pmu, msr))) {
240 if (!msr_info->host_initiated)
241 data = (s64)(s32)data;
242 pmc->counter += data - pmc_read_counter(pmc);
243 return 0;
244 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
245 if (data == pmc->eventsel)
246 return 0;
247 if (!(data & pmu->reserved_bits)) {
248 reprogram_gp_counter(pmc, data);
249 return 0;
250 }
251 }
252 }
253
254 return 1;
255}
256
257static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
258{
259 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
260 struct kvm_cpuid_entry2 *entry;
261 union cpuid10_eax eax;
262 union cpuid10_edx edx;
263
264 pmu->nr_arch_gp_counters = 0;
265 pmu->nr_arch_fixed_counters = 0;
266 pmu->counter_bitmask[KVM_PMC_GP] = 0;
267 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
268 pmu->version = 0;
269 pmu->reserved_bits = 0xffffffff00200000ull;
270
271 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
272 if (!entry)
273 return;
274 eax.full = entry->eax;
275 edx.full = entry->edx;
276
277 pmu->version = eax.split.version_id;
278 if (!pmu->version)
279 return;
280
281 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
282 INTEL_PMC_MAX_GENERIC);
283 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
284 pmu->available_event_types = ~entry->ebx &
285 ((1ull << eax.split.mask_length) - 1);
286
287 if (pmu->version == 1) {
288 pmu->nr_arch_fixed_counters = 0;
289 } else {
290 pmu->nr_arch_fixed_counters =
291 min_t(int, edx.split.num_counters_fixed,
292 INTEL_PMC_MAX_FIXED);
293 pmu->counter_bitmask[KVM_PMC_FIXED] =
294 ((u64)1 << edx.split.bit_width_fixed) - 1;
295 }
296
297 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
299 pmu->global_ctrl_mask = ~pmu->global_ctrl;
300
301 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
302 if (entry &&
303 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
304 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
305 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
306}
307
308static void intel_pmu_init(struct kvm_vcpu *vcpu)
309{
310 int i;
311 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
312
313 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
314 pmu->gp_counters[i].type = KVM_PMC_GP;
315 pmu->gp_counters[i].vcpu = vcpu;
316 pmu->gp_counters[i].idx = i;
317 }
318
319 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
320 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
321 pmu->fixed_counters[i].vcpu = vcpu;
322 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
323 }
324}
325
326static void intel_pmu_reset(struct kvm_vcpu *vcpu)
327{
328 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
329 int i;
330
331 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
332 struct kvm_pmc *pmc = &pmu->gp_counters[i];
333
334 pmc_stop_counter(pmc);
335 pmc->counter = pmc->eventsel = 0;
336 }
337
338 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
339 pmc_stop_counter(&pmu->fixed_counters[i]);
340
341 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
342 pmu->global_ovf_ctrl = 0;
343}
344
345struct kvm_pmu_ops intel_pmu_ops = {
346 .find_arch_event = intel_find_arch_event,
347 .find_fixed_event = intel_find_fixed_event,
348 .pmc_is_enabled = intel_pmc_is_enabled,
349 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
350 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
351 .is_valid_msr_idx = intel_is_valid_msr_idx,
352 .is_valid_msr = intel_is_valid_msr,
353 .get_msr = intel_pmu_get_msr,
354 .set_msr = intel_pmu_set_msr,
355 .refresh = intel_pmu_refresh,
356 .init = intel_pmu_init,
357 .reset = intel_pmu_reset,
358};
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e7685af399e4..851a9a1c6dfc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -21,6 +21,7 @@
21#include "kvm_cache_regs.h" 21#include "kvm_cache_regs.h"
22#include "x86.h" 22#include "x86.h"
23#include "cpuid.h" 23#include "cpuid.h"
24#include "pmu.h"
24 25
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/mod_devicetable.h> 27#include <linux/mod_devicetable.h>
@@ -4457,6 +4458,8 @@ static struct kvm_x86_ops svm_x86_ops = {
4457 .handle_external_intr = svm_handle_external_intr, 4458 .handle_external_intr = svm_handle_external_intr,
4458 4459
4459 .sched_in = svm_sched_in, 4460 .sched_in = svm_sched_in,
4461
4462 .pmu_ops = &amd_pmu_ops,
4460}; 4463};
4461 4464
4462static int __init svm_init(void) 4465static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 44eafdb440c9..e5a379f82672 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -48,6 +48,7 @@
48#include <asm/apic.h> 48#include <asm/apic.h>
49 49
50#include "trace.h" 50#include "trace.h"
51#include "pmu.h"
51 52
52#define __ex(x) __kvm_handle_fault_on_reboot(x) 53#define __ex(x) __kvm_handle_fault_on_reboot(x)
53#define __ex_clear(x, reg) \ 54#define __ex_clear(x, reg) \
@@ -10419,6 +10420,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
10419 .slot_disable_log_dirty = vmx_slot_disable_log_dirty, 10420 .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
10420 .flush_log_dirty = vmx_flush_log_dirty, 10421 .flush_log_dirty = vmx_flush_log_dirty,
10421 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, 10422 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
10423
10424 .pmu_ops = &intel_pmu_ops,
10422}; 10425};
10423 10426
10424static int __init vmx_init(void) 10427static int __init vmx_init(void)