aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2011-11-10 07:57:22 -0500
committerAvi Kivity <avi@redhat.com>2011-12-27 04:24:29 -0500
commitf5132b01386b5a67f1ff673bb2b96a507a3f7e41 (patch)
tree1891d74d54aadda63c0fc6ce77a62025404f4757
parent893420822192f717af6fde927c9e78c9b82f8327 (diff)
KVM: Expose a version 2 architectural PMU to a guests
Use perf_events to emulate an architectural PMU, version 2. Based on PMU version 1 emulation by Avi Kivity. [avi: adjust for cpuid.c] [jan: fix anonymous field initialization for older gcc] Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h48
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/pmu.c533
-rw-r--r--arch/x86/kvm/x86.c22
-rw-r--r--include/linux/kvm_host.h2
7 files changed, 600 insertions, 10 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 020413afb28..fb60ffdb4e4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -16,10 +16,12 @@
16#include <linux/mmu_notifier.h> 16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h> 17#include <linux/tracepoint.h>
18#include <linux/cpumask.h> 18#include <linux/cpumask.h>
19#include <linux/irq_work.h>
19 20
20#include <linux/kvm.h> 21#include <linux/kvm.h>
21#include <linux/kvm_para.h> 22#include <linux/kvm_para.h>
22#include <linux/kvm_types.h> 23#include <linux/kvm_types.h>
24#include <linux/perf_event.h>
23 25
24#include <asm/pvclock-abi.h> 26#include <asm/pvclock-abi.h>
25#include <asm/desc.h> 27#include <asm/desc.h>
@@ -291,6 +293,37 @@ struct kvm_mmu {
291 u64 pdptrs[4]; /* pae */ 293 u64 pdptrs[4]; /* pae */
292}; 294};
293 295
296enum pmc_type {
297 KVM_PMC_GP = 0,
298 KVM_PMC_FIXED,
299};
300
301struct kvm_pmc {
302 enum pmc_type type;
303 u8 idx;
304 u64 counter;
305 u64 eventsel;
306 struct perf_event *perf_event;
307 struct kvm_vcpu *vcpu;
308};
309
310struct kvm_pmu {
311 unsigned nr_arch_gp_counters;
312 unsigned nr_arch_fixed_counters;
313 unsigned available_event_types;
314 u64 fixed_ctr_ctrl;
315 u64 global_ctrl;
316 u64 global_status;
317 u64 global_ovf_ctrl;
318 u64 counter_bitmask[2];
319 u64 global_ctrl_mask;
320 u8 version;
321 struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
322 struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
323 struct irq_work irq_work;
324 u64 reprogram_pmi;
325};
326
294struct kvm_vcpu_arch { 327struct kvm_vcpu_arch {
295 /* 328 /*
296 * rip and regs accesses must go through 329 * rip and regs accesses must go through
@@ -424,6 +457,8 @@ struct kvm_vcpu_arch {
424 unsigned access; 457 unsigned access;
425 gfn_t mmio_gfn; 458 gfn_t mmio_gfn;
426 459
460 struct kvm_pmu pmu;
461
427 /* used for guest single stepping over the given code position */ 462 /* used for guest single stepping over the given code position */
428 unsigned long singlestep_rip; 463 unsigned long singlestep_rip;
429 464
@@ -891,4 +926,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
891 926
892void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); 927void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
893 928
929int kvm_is_in_guest(void);
930
931void kvm_pmu_init(struct kvm_vcpu *vcpu);
932void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
933void kvm_pmu_reset(struct kvm_vcpu *vcpu);
934void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
935bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
936int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
937int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
938int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
939void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
940void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
941
894#endif /* _ASM_X86_KVM_HOST_H */ 942#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ca4d49ed9a6..1a7fe868f37 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -35,6 +35,7 @@ config KVM
35 select KVM_MMIO 35 select KVM_MMIO
36 select TASKSTATS 36 select TASKSTATS
37 select TASK_DELAY_ACCT 37 select TASK_DELAY_ACCT
38 select PERF_EVENTS
38 ---help--- 39 ---help---
39 Support hosting fully virtualized guest machines using hardware 40 Support hosting fully virtualized guest machines using hardware
40 virtualization extensions. You will need a fairly recent 41 virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 161b76ae87c..4f579e8dcac 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
12kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o) 12kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o)
13 13
14kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ 14kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
15 i8254.o timer.o cpuid.o 15 i8254.o timer.o cpuid.o pmu.o
16kvm-intel-y += vmx.o 16kvm-intel-y += vmx.o
17kvm-amd-y += svm.o 17kvm-amd-y += svm.o
18 18
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index ca63032bf03..e70be46f50f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -45,6 +45,8 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu)
45 else 45 else
46 apic->lapic_timer.timer_mode_mask = 1 << 17; 46 apic->lapic_timer.timer_mode_mask = 1 << 17;
47 } 47 }
48
49 kvm_pmu_cpuid_update(vcpu);
48} 50}
49 51
50static int is_efer_nx(void) 52static int is_efer_nx(void)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
new file mode 100644
index 00000000000..7aad5446f39
--- /dev/null
+++ b/arch/x86/kvm/pmu.c
@@ -0,0 +1,533 @@
1/*
2 * Kernel-based Virtual Machine -- Performane Monitoring Unit support
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/types.h>
16#include <linux/kvm_host.h>
17#include <linux/perf_event.h>
18#include "x86.h"
19#include "cpuid.h"
20#include "lapic.h"
21
22static struct kvm_arch_event_perf_mapping {
23 u8 eventsel;
24 u8 unit_mask;
25 unsigned event_type;
26 bool inexact;
27} arch_events[] = {
28 /* Index must match CPUID 0x0A.EBX bit vector */
29 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
30 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
31 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
32 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
33 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
34 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
35 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
36};
37
38/* mapping between fixed pmc index and arch_events array */
39int fixed_pmc_events[] = {1, 0, 2};
40
41static bool pmc_is_gp(struct kvm_pmc *pmc)
42{
43 return pmc->type == KVM_PMC_GP;
44}
45
46static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
47{
48 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
49
50 return pmu->counter_bitmask[pmc->type];
51}
52
53static inline bool pmc_enabled(struct kvm_pmc *pmc)
54{
55 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
56 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
57}
58
59static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
60 u32 base)
61{
62 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
63 return &pmu->gp_counters[msr - base];
64 return NULL;
65}
66
67static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
68{
69 int base = MSR_CORE_PERF_FIXED_CTR0;
70 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
71 return &pmu->fixed_counters[msr - base];
72 return NULL;
73}
74
75static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
76{
77 return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
78}
79
80static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
81{
82 if (idx < X86_PMC_IDX_FIXED)
83 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
84 else
85 return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
86}
87
88void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
89{
90 if (vcpu->arch.apic)
91 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
92}
93
94static void trigger_pmi(struct irq_work *irq_work)
95{
96 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
97 irq_work);
98 struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
99 arch.pmu);
100
101 kvm_deliver_pmi(vcpu);
102}
103
104static void kvm_perf_overflow(struct perf_event *perf_event,
105 struct perf_sample_data *data,
106 struct pt_regs *regs)
107{
108 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
109 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
110 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
111}
112
113static void kvm_perf_overflow_intr(struct perf_event *perf_event,
114 struct perf_sample_data *data, struct pt_regs *regs)
115{
116 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
117 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
118 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
119 kvm_perf_overflow(perf_event, data, regs);
120 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
121 /*
122 * Inject PMI. If vcpu was in a guest mode during NMI PMI
123 * can be ejected on a guest mode re-entry. Otherwise we can't
124 * be sure that vcpu wasn't executing hlt instruction at the
125 * time of vmexit and is not going to re-enter guest mode until,
126 * woken up. So we should wake it, but this is impossible from
127 * NMI context. Do it from irq work instead.
128 */
129 if (!kvm_is_in_guest())
130 irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
131 else
132 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
133 }
134}
135
136static u64 read_pmc(struct kvm_pmc *pmc)
137{
138 u64 counter, enabled, running;
139
140 counter = pmc->counter;
141
142 if (pmc->perf_event)
143 counter += perf_event_read_value(pmc->perf_event,
144 &enabled, &running);
145
146 /* FIXME: Scaling needed? */
147
148 return counter & pmc_bitmask(pmc);
149}
150
151static void stop_counter(struct kvm_pmc *pmc)
152{
153 if (pmc->perf_event) {
154 pmc->counter = read_pmc(pmc);
155 perf_event_release_kernel(pmc->perf_event);
156 pmc->perf_event = NULL;
157 }
158}
159
160static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
161 unsigned config, bool exclude_user, bool exclude_kernel,
162 bool intr)
163{
164 struct perf_event *event;
165 struct perf_event_attr attr = {
166 .type = type,
167 .size = sizeof(attr),
168 .pinned = true,
169 .exclude_idle = true,
170 .exclude_host = 1,
171 .exclude_user = exclude_user,
172 .exclude_kernel = exclude_kernel,
173 .config = config,
174 };
175
176 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
177
178 event = perf_event_create_kernel_counter(&attr, -1, current,
179 intr ? kvm_perf_overflow_intr :
180 kvm_perf_overflow, pmc);
181 if (IS_ERR(event)) {
182 printk_once("kvm: pmu event creation failed %ld\n",
183 PTR_ERR(event));
184 return;
185 }
186
187 pmc->perf_event = event;
188 clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
189}
190
191static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
192 u8 unit_mask)
193{
194 int i;
195
196 for (i = 0; i < ARRAY_SIZE(arch_events); i++)
197 if (arch_events[i].eventsel == event_select
198 && arch_events[i].unit_mask == unit_mask
199 && (pmu->available_event_types & (1 << i)))
200 break;
201
202 if (i == ARRAY_SIZE(arch_events))
203 return PERF_COUNT_HW_MAX;
204
205 return arch_events[i].event_type;
206}
207
208static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
209{
210 unsigned config, type = PERF_TYPE_RAW;
211 u8 event_select, unit_mask;
212
213 pmc->eventsel = eventsel;
214
215 stop_counter(pmc);
216
217 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
218 return;
219
220 event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
221 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
222
223 if (!(event_select & (ARCH_PERFMON_EVENTSEL_EDGE |
224 ARCH_PERFMON_EVENTSEL_INV |
225 ARCH_PERFMON_EVENTSEL_CMASK))) {
226 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
227 unit_mask);
228 if (config != PERF_COUNT_HW_MAX)
229 type = PERF_TYPE_HARDWARE;
230 }
231
232 if (type == PERF_TYPE_RAW)
233 config = eventsel & X86_RAW_EVENT_MASK;
234
235 reprogram_counter(pmc, type, config,
236 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
237 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
238 eventsel & ARCH_PERFMON_EVENTSEL_INT);
239}
240
241static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
242{
243 unsigned en = en_pmi & 0x3;
244 bool pmi = en_pmi & 0x8;
245
246 stop_counter(pmc);
247
248 if (!en || !pmc_enabled(pmc))
249 return;
250
251 reprogram_counter(pmc, PERF_TYPE_HARDWARE,
252 arch_events[fixed_pmc_events[idx]].event_type,
253 !(en & 0x2), /* exclude user */
254 !(en & 0x1), /* exclude kernel */
255 pmi);
256}
257
258static inline u8 fixed_en_pmi(u64 ctrl, int idx)
259{
260 return (ctrl >> (idx * 4)) & 0xf;
261}
262
263static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
264{
265 int i;
266
267 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
268 u8 en_pmi = fixed_en_pmi(data, i);
269 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
270
271 if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
272 continue;
273
274 reprogram_fixed_counter(pmc, en_pmi, i);
275 }
276
277 pmu->fixed_ctr_ctrl = data;
278}
279
280static void reprogram_idx(struct kvm_pmu *pmu, int idx)
281{
282 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
283
284 if (!pmc)
285 return;
286
287 if (pmc_is_gp(pmc))
288 reprogram_gp_counter(pmc, pmc->eventsel);
289 else {
290 int fidx = idx - X86_PMC_IDX_FIXED;
291 reprogram_fixed_counter(pmc,
292 fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
293 }
294}
295
296static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
297{
298 int bit;
299 u64 diff = pmu->global_ctrl ^ data;
300
301 pmu->global_ctrl = data;
302
303 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
304 reprogram_idx(pmu, bit);
305}
306
307bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
308{
309 struct kvm_pmu *pmu = &vcpu->arch.pmu;
310 int ret;
311
312 switch (msr) {
313 case MSR_CORE_PERF_FIXED_CTR_CTRL:
314 case MSR_CORE_PERF_GLOBAL_STATUS:
315 case MSR_CORE_PERF_GLOBAL_CTRL:
316 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
317 ret = pmu->version > 1;
318 break;
319 default:
320 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
321 || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
322 || get_fixed_pmc(pmu, msr);
323 break;
324 }
325 return ret;
326}
327
328int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
329{
330 struct kvm_pmu *pmu = &vcpu->arch.pmu;
331 struct kvm_pmc *pmc;
332
333 switch (index) {
334 case MSR_CORE_PERF_FIXED_CTR_CTRL:
335 *data = pmu->fixed_ctr_ctrl;
336 return 0;
337 case MSR_CORE_PERF_GLOBAL_STATUS:
338 *data = pmu->global_status;
339 return 0;
340 case MSR_CORE_PERF_GLOBAL_CTRL:
341 *data = pmu->global_ctrl;
342 return 0;
343 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
344 *data = pmu->global_ovf_ctrl;
345 return 0;
346 default:
347 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
348 (pmc = get_fixed_pmc(pmu, index))) {
349 *data = read_pmc(pmc);
350 return 0;
351 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
352 *data = pmc->eventsel;
353 return 0;
354 }
355 }
356 return 1;
357}
358
359int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
360{
361 struct kvm_pmu *pmu = &vcpu->arch.pmu;
362 struct kvm_pmc *pmc;
363
364 switch (index) {
365 case MSR_CORE_PERF_FIXED_CTR_CTRL:
366 if (pmu->fixed_ctr_ctrl == data)
367 return 0;
368 if (!(data & 0xfffffffffffff444)) {
369 reprogram_fixed_counters(pmu, data);
370 return 0;
371 }
372 break;
373 case MSR_CORE_PERF_GLOBAL_STATUS:
374 break; /* RO MSR */
375 case MSR_CORE_PERF_GLOBAL_CTRL:
376 if (pmu->global_ctrl == data)
377 return 0;
378 if (!(data & pmu->global_ctrl_mask)) {
379 global_ctrl_changed(pmu, data);
380 return 0;
381 }
382 break;
383 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
384 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
385 pmu->global_status &= ~data;
386 pmu->global_ovf_ctrl = data;
387 return 0;
388 }
389 break;
390 default:
391 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
392 (pmc = get_fixed_pmc(pmu, index))) {
393 data = (s64)(s32)data;
394 pmc->counter += data - read_pmc(pmc);
395 return 0;
396 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
397 if (data == pmc->eventsel)
398 return 0;
399 if (!(data & 0xffffffff00200000ull)) {
400 reprogram_gp_counter(pmc, data);
401 return 0;
402 }
403 }
404 }
405 return 1;
406}
407
408int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
409{
410 struct kvm_pmu *pmu = &vcpu->arch.pmu;
411 bool fast_mode = pmc & (1u << 31);
412 bool fixed = pmc & (1u << 30);
413 struct kvm_pmc *counters;
414 u64 ctr;
415
416 pmc &= (3u << 30) - 1;
417 if (!fixed && pmc >= pmu->nr_arch_gp_counters)
418 return 1;
419 if (fixed && pmc >= pmu->nr_arch_fixed_counters)
420 return 1;
421 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
422 ctr = read_pmc(&counters[pmc]);
423 if (fast_mode)
424 ctr = (u32)ctr;
425 *data = ctr;
426
427 return 0;
428}
429
430void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
431{
432 struct kvm_pmu *pmu = &vcpu->arch.pmu;
433 struct kvm_cpuid_entry2 *entry;
434 unsigned bitmap_len;
435
436 pmu->nr_arch_gp_counters = 0;
437 pmu->nr_arch_fixed_counters = 0;
438 pmu->counter_bitmask[KVM_PMC_GP] = 0;
439 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
440 pmu->version = 0;
441
442 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
443 if (!entry)
444 return;
445
446 pmu->version = entry->eax & 0xff;
447 if (!pmu->version)
448 return;
449
450 pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
451 X86_PMC_MAX_GENERIC);
452 pmu->counter_bitmask[KVM_PMC_GP] =
453 ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
454 bitmap_len = (entry->eax >> 24) & 0xff;
455 pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
456
457 if (pmu->version == 1) {
458 pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
459 return;
460 }
461
462 pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
463 X86_PMC_MAX_FIXED);
464 pmu->counter_bitmask[KVM_PMC_FIXED] =
465 ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
466 pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
467 | (((1ull << pmu->nr_arch_fixed_counters) - 1)
468 << X86_PMC_IDX_FIXED));
469}
470
471void kvm_pmu_init(struct kvm_vcpu *vcpu)
472{
473 int i;
474 struct kvm_pmu *pmu = &vcpu->arch.pmu;
475
476 memset(pmu, 0, sizeof(*pmu));
477 for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
478 pmu->gp_counters[i].type = KVM_PMC_GP;
479 pmu->gp_counters[i].vcpu = vcpu;
480 pmu->gp_counters[i].idx = i;
481 }
482 for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
483 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
484 pmu->fixed_counters[i].vcpu = vcpu;
485 pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
486 }
487 init_irq_work(&pmu->irq_work, trigger_pmi);
488 kvm_pmu_cpuid_update(vcpu);
489}
490
491void kvm_pmu_reset(struct kvm_vcpu *vcpu)
492{
493 struct kvm_pmu *pmu = &vcpu->arch.pmu;
494 int i;
495
496 irq_work_sync(&pmu->irq_work);
497 for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
498 struct kvm_pmc *pmc = &pmu->gp_counters[i];
499 stop_counter(pmc);
500 pmc->counter = pmc->eventsel = 0;
501 }
502
503 for (i = 0; i < X86_PMC_MAX_FIXED; i++)
504 stop_counter(&pmu->fixed_counters[i]);
505
506 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
507 pmu->global_ovf_ctrl = 0;
508}
509
510void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
511{
512 kvm_pmu_reset(vcpu);
513}
514
515void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
516{
517 struct kvm_pmu *pmu = &vcpu->arch.pmu;
518 u64 bitmask;
519 int bit;
520
521 bitmask = pmu->reprogram_pmi;
522
523 for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
524 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
525
526 if (unlikely(!pmc || !pmc->perf_event)) {
527 clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
528 continue;
529 }
530
531 reprogram_idx(pmu, bit);
532 }
533}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0a646e2b57c..08ae951ecc5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1602,8 +1602,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1602 * which we perfectly emulate ;-). Any other value should be at least 1602 * which we perfectly emulate ;-). Any other value should be at least
1603 * reported, some guests depend on them. 1603 * reported, some guests depend on them.
1604 */ 1604 */
1605 case MSR_P6_EVNTSEL0:
1606 case MSR_P6_EVNTSEL1:
1607 case MSR_K7_EVNTSEL0: 1605 case MSR_K7_EVNTSEL0:
1608 case MSR_K7_EVNTSEL1: 1606 case MSR_K7_EVNTSEL1:
1609 case MSR_K7_EVNTSEL2: 1607 case MSR_K7_EVNTSEL2:
@@ -1615,8 +1613,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1615 /* at least RHEL 4 unconditionally writes to the perfctr registers, 1613 /* at least RHEL 4 unconditionally writes to the perfctr registers,
1616 * so we ignore writes to make it happy. 1614 * so we ignore writes to make it happy.
1617 */ 1615 */
1618 case MSR_P6_PERFCTR0:
1619 case MSR_P6_PERFCTR1:
1620 case MSR_K7_PERFCTR0: 1616 case MSR_K7_PERFCTR0:
1621 case MSR_K7_PERFCTR1: 1617 case MSR_K7_PERFCTR1:
1622 case MSR_K7_PERFCTR2: 1618 case MSR_K7_PERFCTR2:
@@ -1653,6 +1649,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1653 default: 1649 default:
1654 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) 1650 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1655 return xen_hvm_config(vcpu, data); 1651 return xen_hvm_config(vcpu, data);
1652 if (kvm_pmu_msr(vcpu, msr))
1653 return kvm_pmu_set_msr(vcpu, msr, data);
1656 if (!ignore_msrs) { 1654 if (!ignore_msrs) {
1657 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", 1655 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1658 msr, data); 1656 msr, data);
@@ -1815,10 +1813,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1815 case MSR_K8_SYSCFG: 1813 case MSR_K8_SYSCFG:
1816 case MSR_K7_HWCR: 1814 case MSR_K7_HWCR:
1817 case MSR_VM_HSAVE_PA: 1815 case MSR_VM_HSAVE_PA:
1818 case MSR_P6_PERFCTR0:
1819 case MSR_P6_PERFCTR1:
1820 case MSR_P6_EVNTSEL0:
1821 case MSR_P6_EVNTSEL1:
1822 case MSR_K7_EVNTSEL0: 1816 case MSR_K7_EVNTSEL0:
1823 case MSR_K7_PERFCTR0: 1817 case MSR_K7_PERFCTR0:
1824 case MSR_K8_INT_PENDING_MSG: 1818 case MSR_K8_INT_PENDING_MSG:
@@ -1929,6 +1923,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1929 data = 0xbe702111; 1923 data = 0xbe702111;
1930 break; 1924 break;
1931 default: 1925 default:
1926 if (kvm_pmu_msr(vcpu, msr))
1927 return kvm_pmu_get_msr(vcpu, msr, pdata);
1932 if (!ignore_msrs) { 1928 if (!ignore_msrs) {
1933 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); 1929 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1934 return 1; 1930 return 1;
@@ -4650,7 +4646,7 @@ static void kvm_timer_init(void)
4650 4646
4651static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); 4647static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4652 4648
4653static int kvm_is_in_guest(void) 4649int kvm_is_in_guest(void)
4654{ 4650{
4655 return __this_cpu_read(current_vcpu) != NULL; 4651 return __this_cpu_read(current_vcpu) != NULL;
4656} 4652}
@@ -5114,6 +5110,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5114 process_nmi(vcpu); 5110 process_nmi(vcpu);
5115 req_immediate_exit = 5111 req_immediate_exit =
5116 kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); 5112 kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
5113 if (kvm_check_request(KVM_REQ_PMU, vcpu))
5114 kvm_handle_pmu_event(vcpu);
5115 if (kvm_check_request(KVM_REQ_PMI, vcpu))
5116 kvm_deliver_pmi(vcpu);
5117 } 5117 }
5118 5118
5119 r = kvm_mmu_reload(vcpu); 5119 r = kvm_mmu_reload(vcpu);
@@ -5850,6 +5850,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5850 kvm_async_pf_hash_reset(vcpu); 5850 kvm_async_pf_hash_reset(vcpu);
5851 vcpu->arch.apf.halted = false; 5851 vcpu->arch.apf.halted = false;
5852 5852
5853 kvm_pmu_reset(vcpu);
5854
5853 return kvm_x86_ops->vcpu_reset(vcpu); 5855 return kvm_x86_ops->vcpu_reset(vcpu);
5854} 5856}
5855 5857
@@ -5934,6 +5936,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5934 goto fail_free_mce_banks; 5936 goto fail_free_mce_banks;
5935 5937
5936 kvm_async_pf_hash_reset(vcpu); 5938 kvm_async_pf_hash_reset(vcpu);
5939 kvm_pmu_init(vcpu);
5937 5940
5938 return 0; 5941 return 0;
5939fail_free_mce_banks: 5942fail_free_mce_banks:
@@ -5952,6 +5955,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5952{ 5955{
5953 int idx; 5956 int idx;
5954 5957
5958 kvm_pmu_destroy(vcpu);
5955 kfree(vcpu->arch.mce_banks); 5959 kfree(vcpu->arch.mce_banks);
5956 kvm_free_lapic(vcpu); 5960 kvm_free_lapic(vcpu);
5957 idx = srcu_read_lock(&vcpu->kvm->srcu); 5961 idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7a080383935..900c76337e8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -52,6 +52,8 @@
52#define KVM_REQ_STEAL_UPDATE 13 52#define KVM_REQ_STEAL_UPDATE 13
53#define KVM_REQ_NMI 14 53#define KVM_REQ_NMI 14
54#define KVM_REQ_IMMEDIATE_EXIT 15 54#define KVM_REQ_IMMEDIATE_EXIT 15
55#define KVM_REQ_PMU 16
56#define KVM_REQ_PMI 17
55 57
56#define KVM_USERSPACE_IRQ_SOURCE_ID 0 58#define KVM_USERSPACE_IRQ_SOURCE_ID 0
57 59