summaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2015-08-10 16:34:34 -0400
committerDavid Vrabel <david.vrabel@citrix.com>2015-08-20 07:25:20 -0400
commit65d0cf0be79feebeb19e7626fd3ed41ae73f642d (patch)
treed8ade5462b3332084ca9fef267638d30a0795fd7 /arch/x86/include
parent5f141548824cebbff2e838ff401c34e667797467 (diff)
xen/PMU: Initialization code for Xen PMU
Map shared data structure that will hold CPU registers, VPMU context, V/PCPU IDs of the CPU interrupted by PMU interrupt. Hypervisor fills this information in its handler and passes it to the guest for further processing. Set up PMU VIRQ. Now that perf infrastructure will assume that PMU is available on a PV guest we need to be careful and make sure that accesses via RDPMC instruction don't cause fatal traps by the hypervisor. Provide a nop RDPMC handler. For the same reason avoid issuing a warning on a write to APIC's LVTPC. Both of these will be made functional in later patches. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/xen/interface.h123
1 files changed, 123 insertions, 0 deletions
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 3b88eeacdbda..62ca03ef5c65 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -250,6 +250,129 @@ struct vcpu_guest_context {
250#endif 250#endif
251}; 251};
252DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); 252DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
253
254/* AMD PMU registers and structures */
255struct xen_pmu_amd_ctxt {
256 /*
257 * Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd).
258 * For PV(H) guests these fields are RO.
259 */
260 uint32_t counters;
261 uint32_t ctrls;
262
263 /* Counter MSRs */
264#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
265 uint64_t regs[];
266#elif defined(__GNUC__)
267 uint64_t regs[0];
268#endif
269};
270
271/* Intel PMU registers and structures */
272struct xen_pmu_cntr_pair {
273 uint64_t counter;
274 uint64_t control;
275};
276
277struct xen_pmu_intel_ctxt {
278 /*
279 * Offsets to fixed and architectural counter MSRs (relative to
280 * xen_pmu_arch.c.intel).
281 * For PV(H) guests these fields are RO.
282 */
283 uint32_t fixed_counters;
284 uint32_t arch_counters;
285
286 /* PMU registers */
287 uint64_t global_ctrl;
288 uint64_t global_ovf_ctrl;
289 uint64_t global_status;
290 uint64_t fixed_ctrl;
291 uint64_t ds_area;
292 uint64_t pebs_enable;
293 uint64_t debugctl;
294
295 /* Fixed and architectural counter MSRs */
296#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
297 uint64_t regs[];
298#elif defined(__GNUC__)
299 uint64_t regs[0];
300#endif
301};
302
303/* Sampled domain's registers */
304struct xen_pmu_regs {
305 uint64_t ip;
306 uint64_t sp;
307 uint64_t flags;
308 uint16_t cs;
309 uint16_t ss;
310 uint8_t cpl;
311 uint8_t pad[3];
312};
313
314/* PMU flags */
315#define PMU_CACHED (1<<0) /* PMU MSRs are cached in the context */
316#define PMU_SAMPLE_USER (1<<1) /* Sample is from user or kernel mode */
317#define PMU_SAMPLE_REAL (1<<2) /* Sample is from realmode */
318#define PMU_SAMPLE_PV (1<<3) /* Sample from a PV guest */
319
320/*
321 * Architecture-specific information describing state of the processor at
322 * the time of PMU interrupt.
323 * Fields of this structure marked as RW for guest should only be written by
324 * the guest when PMU_CACHED bit in pmu_flags is set (which is done by the
325 * hypervisor during PMU interrupt). Hypervisor will read updated data in
326 * XENPMU_flush hypercall and clear PMU_CACHED bit.
327 */
328struct xen_pmu_arch {
329 union {
330 /*
331 * Processor's registers at the time of interrupt.
332 * WO for hypervisor, RO for guests.
333 */
334 struct xen_pmu_regs regs;
335 /*
336 * Padding for adding new registers to xen_pmu_regs in
337 * the future
338 */
339#define XENPMU_REGS_PAD_SZ 64
340 uint8_t pad[XENPMU_REGS_PAD_SZ];
341 } r;
342
343 /* WO for hypervisor, RO for guest */
344 uint64_t pmu_flags;
345
346 /*
347 * APIC LVTPC register.
348 * RW for both hypervisor and guest.
349 * Only APIC_LVT_MASKED bit is loaded by the hypervisor into hardware
350 * during XENPMU_flush or XENPMU_lvtpc_set.
351 */
352 union {
353 uint32_t lapic_lvtpc;
354 uint64_t pad;
355 } l;
356
357 /*
358 * Vendor-specific PMU registers.
359 * RW for both hypervisor and guest (see exceptions above).
360 * Guest's updates to this field are verified and then loaded by the
361 * hypervisor into hardware during XENPMU_flush
362 */
363 union {
364 struct xen_pmu_amd_ctxt amd;
365 struct xen_pmu_intel_ctxt intel;
366
367 /*
368 * Padding for contexts (fixed parts only, does not include
369 * MSR banks that are specified by offsets)
370 */
371#define XENPMU_CTXT_PAD_SZ 128
372 uint8_t pad[XENPMU_CTXT_PAD_SZ];
373 } c;
374};
375
253#endif /* !__ASSEMBLY__ */ 376#endif /* !__ASSEMBLY__ */
254 377
255/* 378/*