summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2015-08-10 16:34:34 -0400
committerDavid Vrabel <david.vrabel@citrix.com>2015-08-20 07:25:20 -0400
commit65d0cf0be79feebeb19e7626fd3ed41ae73f642d (patch)
treed8ade5462b3332084ca9fef267638d30a0795fd7 /arch
parent5f141548824cebbff2e838ff401c34e667797467 (diff)
xen/PMU: Initialization code for Xen PMU
Map shared data structure that will hold CPU registers, VPMU context, V/PCPU IDs of the CPU interrupted by PMU interrupt. Hypervisor fills this information in its handler and passes it to the guest for further processing. Set up PMU VIRQ. Now that perf infrastructure will assume that PMU is available on a PV guest we need to be careful and make sure that accesses via RDPMC instruction don't cause fatal traps by the hypervisor. Provide a nop RDPMC handler. For the same reason avoid issuing a warning on a write to APIC's LVTPC. Both of these will be made functional in later patches. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/xen/interface.h123
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/apic.c3
-rw-r--r--arch/x86/xen/enlighten.c12
-rw-r--r--arch/x86/xen/pmu.c170
-rw-r--r--arch/x86/xen/pmu.h11
-rw-r--r--arch/x86/xen/smp.c29
-rw-r--r--arch/x86/xen/suspend.c23
8 files changed, 364 insertions, 9 deletions
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 3b88eeacdbda..62ca03ef5c65 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -250,6 +250,129 @@ struct vcpu_guest_context {
250#endif 250#endif
251}; 251};
252DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); 252DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
253
254/* AMD PMU registers and structures */
255struct xen_pmu_amd_ctxt {
256 /*
257 * Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd).
258 * For PV(H) guests these fields are RO.
259 */
260 uint32_t counters;
261 uint32_t ctrls;
262
263 /* Counter MSRs */
264#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
265 uint64_t regs[];
266#elif defined(__GNUC__)
267 uint64_t regs[0];
268#endif
269};
270
271/* Intel PMU registers and structures */
272struct xen_pmu_cntr_pair {
273 uint64_t counter;
274 uint64_t control;
275};
276
277struct xen_pmu_intel_ctxt {
278 /*
279 * Offsets to fixed and architectural counter MSRs (relative to
280 * xen_pmu_arch.c.intel).
281 * For PV(H) guests these fields are RO.
282 */
283 uint32_t fixed_counters;
284 uint32_t arch_counters;
285
286 /* PMU registers */
287 uint64_t global_ctrl;
288 uint64_t global_ovf_ctrl;
289 uint64_t global_status;
290 uint64_t fixed_ctrl;
291 uint64_t ds_area;
292 uint64_t pebs_enable;
293 uint64_t debugctl;
294
295 /* Fixed and architectural counter MSRs */
296#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
297 uint64_t regs[];
298#elif defined(__GNUC__)
299 uint64_t regs[0];
300#endif
301};
302
303/* Sampled domain's registers */
304struct xen_pmu_regs {
305 uint64_t ip;
306 uint64_t sp;
307 uint64_t flags;
308 uint16_t cs;
309 uint16_t ss;
310 uint8_t cpl;
311 uint8_t pad[3];
312};
313
314/* PMU flags */
315#define PMU_CACHED (1<<0) /* PMU MSRs are cached in the context */
316#define PMU_SAMPLE_USER (1<<1) /* Sample is from user or kernel mode */
317#define PMU_SAMPLE_REAL (1<<2) /* Sample is from realmode */
318#define PMU_SAMPLE_PV (1<<3) /* Sample from a PV guest */
319
320/*
321 * Architecture-specific information describing state of the processor at
322 * the time of PMU interrupt.
323 * Fields of this structure marked as RW for guest should only be written by
324 * the guest when PMU_CACHED bit in pmu_flags is set (which is done by the
325 * hypervisor during PMU interrupt). Hypervisor will read updated data in
326 * XENPMU_flush hypercall and clear PMU_CACHED bit.
327 */
328struct xen_pmu_arch {
329 union {
330 /*
331 * Processor's registers at the time of interrupt.
332 * WO for hypervisor, RO for guests.
333 */
334 struct xen_pmu_regs regs;
335 /*
336 * Padding for adding new registers to xen_pmu_regs in
337 * the future
338 */
339#define XENPMU_REGS_PAD_SZ 64
340 uint8_t pad[XENPMU_REGS_PAD_SZ];
341 } r;
342
343 /* WO for hypervisor, RO for guest */
344 uint64_t pmu_flags;
345
346 /*
347 * APIC LVTPC register.
348 * RW for both hypervisor and guest.
349 * Only APIC_LVT_MASKED bit is loaded by the hypervisor into hardware
350 * during XENPMU_flush or XENPMU_lvtpc_set.
351 */
352 union {
353 uint32_t lapic_lvtpc;
354 uint64_t pad;
355 } l;
356
357 /*
358 * Vendor-specific PMU registers.
359 * RW for both hypervisor and guest (see exceptions above).
360 * Guest's updates to this field are verified and then loaded by the
361 * hypervisor into hardware during XENPMU_flush
362 */
363 union {
364 struct xen_pmu_amd_ctxt amd;
365 struct xen_pmu_intel_ctxt intel;
366
367 /*
368 * Padding for contexts (fixed parts only, does not include
369 * MSR banks that are specified by offsets)
370 */
371#define XENPMU_CTXT_PAD_SZ 128
372 uint8_t pad[XENPMU_CTXT_PAD_SZ];
373 } c;
374};
375
253#endif /* !__ASSEMBLY__ */ 376#endif /* !__ASSEMBLY__ */
254 377
255/* 378/*
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 4b6e29ac0968..e47e52787d32 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,7 +13,7 @@ CFLAGS_mmu.o := $(nostackp)
13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
14 time.o xen-asm.o xen-asm_$(BITS).o \ 14 time.o xen-asm.o xen-asm_$(BITS).o \
15 grant-table.o suspend.o platform-pci-unplug.o \ 15 grant-table.o suspend.o platform-pci-unplug.o \
16 p2m.o apic.o 16 p2m.o apic.o pmu.o
17 17
18obj-$(CONFIG_EVENT_TRACING) += trace.o 18obj-$(CONFIG_EVENT_TRACING) += trace.o
19 19
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 70e060ad879a..d03ebfa89b9f 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -72,6 +72,9 @@ static u32 xen_apic_read(u32 reg)
72 72
73static void xen_apic_write(u32 reg, u32 val) 73static void xen_apic_write(u32 reg, u32 val)
74{ 74{
75 if (reg == APIC_LVTPC)
76 return;
77
75 /* Warn to see if there's any stray references */ 78 /* Warn to see if there's any stray references */
76 WARN(1,"register: %x, value: %x\n", reg, val); 79 WARN(1,"register: %x, value: %x\n", reg, val);
77} 80}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 373dbc9810d1..19072f91a8e2 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -84,6 +84,7 @@
84#include "mmu.h" 84#include "mmu.h"
85#include "smp.h" 85#include "smp.h"
86#include "multicalls.h" 86#include "multicalls.h"
87#include "pmu.h"
87 88
88EXPORT_SYMBOL_GPL(hypercall_page); 89EXPORT_SYMBOL_GPL(hypercall_page);
89 90
@@ -1082,6 +1083,11 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1082 return ret; 1083 return ret;
1083} 1084}
1084 1085
1086unsigned long long xen_read_pmc(int counter)
1087{
1088 return 0;
1089}
1090
1085void xen_setup_shared_info(void) 1091void xen_setup_shared_info(void)
1086{ 1092{
1087 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1093 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1216,7 +1222,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1216 .write_msr = xen_write_msr_safe, 1222 .write_msr = xen_write_msr_safe,
1217 1223
1218 .read_tsc = native_read_tsc, 1224 .read_tsc = native_read_tsc,
1219 .read_pmc = native_read_pmc, 1225 .read_pmc = xen_read_pmc,
1220 1226
1221 .read_tscp = native_read_tscp, 1227 .read_tscp = native_read_tscp,
1222 1228
@@ -1267,6 +1273,10 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
1267static void xen_reboot(int reason) 1273static void xen_reboot(int reason)
1268{ 1274{
1269 struct sched_shutdown r = { .reason = reason }; 1275 struct sched_shutdown r = { .reason = reason };
1276 int cpu;
1277
1278 for_each_online_cpu(cpu)
1279 xen_pmu_finish(cpu);
1270 1280
1271 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 1281 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1272 BUG(); 1282 BUG();
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
new file mode 100644
index 000000000000..1d1ae1b874ea
--- /dev/null
+++ b/arch/x86/xen/pmu.c
@@ -0,0 +1,170 @@
1#include <linux/types.h>
2#include <linux/interrupt.h>
3
4#include <asm/xen/hypercall.h>
5#include <xen/page.h>
6#include <xen/interface/xen.h>
7#include <xen/interface/vcpu.h>
8#include <xen/interface/xenpmu.h>
9
10#include "xen-ops.h"
11#include "pmu.h"
12
13/* x86_pmu.handle_irq definition */
14#include "../kernel/cpu/perf_event.h"
15
16
17/* Shared page between hypervisor and domain */
18static DEFINE_PER_CPU(struct xen_pmu_data *, xenpmu_shared);
19#define get_xenpmu_data() per_cpu(xenpmu_shared, smp_processor_id())
20
21/* perf callbacks */
22static int xen_is_in_guest(void)
23{
24 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
25
26 if (!xenpmu_data) {
27 pr_warn_once("%s: pmudata not initialized\n", __func__);
28 return 0;
29 }
30
31 if (!xen_initial_domain() || (xenpmu_data->domain_id >= DOMID_SELF))
32 return 0;
33
34 return 1;
35}
36
37static int xen_is_user_mode(void)
38{
39 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
40
41 if (!xenpmu_data) {
42 pr_warn_once("%s: pmudata not initialized\n", __func__);
43 return 0;
44 }
45
46 if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV)
47 return (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER);
48 else
49 return !!(xenpmu_data->pmu.r.regs.cpl & 3);
50}
51
52static unsigned long xen_get_guest_ip(void)
53{
54 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
55
56 if (!xenpmu_data) {
57 pr_warn_once("%s: pmudata not initialized\n", __func__);
58 return 0;
59 }
60
61 return xenpmu_data->pmu.r.regs.ip;
62}
63
64static struct perf_guest_info_callbacks xen_guest_cbs = {
65 .is_in_guest = xen_is_in_guest,
66 .is_user_mode = xen_is_user_mode,
67 .get_guest_ip = xen_get_guest_ip,
68};
69
70/* Convert registers from Xen's format to Linux' */
71static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
72 struct pt_regs *regs, uint64_t pmu_flags)
73{
74 regs->ip = xen_regs->ip;
75 regs->cs = xen_regs->cs;
76 regs->sp = xen_regs->sp;
77
78 if (pmu_flags & PMU_SAMPLE_PV) {
79 if (pmu_flags & PMU_SAMPLE_USER)
80 regs->cs |= 3;
81 else
82 regs->cs &= ~3;
83 } else {
84 if (xen_regs->cpl)
85 regs->cs |= 3;
86 else
87 regs->cs &= ~3;
88 }
89}
90
91irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
92{
93 int ret = IRQ_NONE;
94 struct pt_regs regs;
95 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
96
97 if (!xenpmu_data) {
98 pr_warn_once("%s: pmudata not initialized\n", __func__);
99 return ret;
100 }
101
102 xen_convert_regs(&xenpmu_data->pmu.r.regs, &regs,
103 xenpmu_data->pmu.pmu_flags);
104 if (x86_pmu.handle_irq(&regs))
105 ret = IRQ_HANDLED;
106
107 return ret;
108}
109
110bool is_xen_pmu(int cpu)
111{
112 return (per_cpu(xenpmu_shared, cpu) != NULL);
113}
114
115void xen_pmu_init(int cpu)
116{
117 int err;
118 struct xen_pmu_params xp;
119 unsigned long pfn;
120 struct xen_pmu_data *xenpmu_data;
121
122 BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
123
124 if (xen_hvm_domain())
125 return;
126
127 xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL);
128 if (!xenpmu_data) {
129 pr_err("VPMU init: No memory\n");
130 return;
131 }
132 pfn = virt_to_pfn(xenpmu_data);
133
134 xp.val = pfn_to_mfn(pfn);
135 xp.vcpu = cpu;
136 xp.version.maj = XENPMU_VER_MAJ;
137 xp.version.min = XENPMU_VER_MIN;
138 err = HYPERVISOR_xenpmu_op(XENPMU_init, &xp);
139 if (err)
140 goto fail;
141
142 per_cpu(xenpmu_shared, cpu) = xenpmu_data;
143
144 if (cpu == 0)
145 perf_register_guest_info_callbacks(&xen_guest_cbs);
146
147 return;
148
149fail:
150 pr_warn_once("Could not initialize VPMU for cpu %d, error %d\n",
151 cpu, err);
152 free_pages((unsigned long)xenpmu_data, 0);
153}
154
155void xen_pmu_finish(int cpu)
156{
157 struct xen_pmu_params xp;
158
159 if (xen_hvm_domain())
160 return;
161
162 xp.vcpu = cpu;
163 xp.version.maj = XENPMU_VER_MAJ;
164 xp.version.min = XENPMU_VER_MIN;
165
166 (void)HYPERVISOR_xenpmu_op(XENPMU_finish, &xp);
167
168 free_pages((unsigned long)per_cpu(xenpmu_shared, cpu), 0);
169 per_cpu(xenpmu_shared, cpu) = NULL;
170}
diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h
new file mode 100644
index 000000000000..a76d2cf83581
--- /dev/null
+++ b/arch/x86/xen/pmu.h
@@ -0,0 +1,11 @@
1#ifndef __XEN_PMU_H
2#define __XEN_PMU_H
3
4#include <xen/interface/xenpmu.h>
5
6irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
7void xen_pmu_init(int cpu);
8void xen_pmu_finish(int cpu);
9bool is_xen_pmu(int cpu);
10
11#endif /* __XEN_PMU_H */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 86484384492e..2a9ff7342791 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -26,6 +26,7 @@
26 26
27#include <xen/interface/xen.h> 27#include <xen/interface/xen.h>
28#include <xen/interface/vcpu.h> 28#include <xen/interface/vcpu.h>
29#include <xen/interface/xenpmu.h>
29 30
30#include <asm/xen/interface.h> 31#include <asm/xen/interface.h>
31#include <asm/xen/hypercall.h> 32#include <asm/xen/hypercall.h>
@@ -38,6 +39,7 @@
38#include "xen-ops.h" 39#include "xen-ops.h"
39#include "mmu.h" 40#include "mmu.h"
40#include "smp.h" 41#include "smp.h"
42#include "pmu.h"
41 43
42cpumask_var_t xen_cpu_initialized_map; 44cpumask_var_t xen_cpu_initialized_map;
43 45
@@ -50,6 +52,7 @@ static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
50static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 }; 52static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
51static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 }; 53static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
52static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 }; 54static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
55static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
53 56
54static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 57static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
55static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 58static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -148,11 +151,18 @@ static void xen_smp_intr_free(unsigned int cpu)
148 kfree(per_cpu(xen_irq_work, cpu).name); 151 kfree(per_cpu(xen_irq_work, cpu).name);
149 per_cpu(xen_irq_work, cpu).name = NULL; 152 per_cpu(xen_irq_work, cpu).name = NULL;
150 } 153 }
154
155 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
156 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
157 per_cpu(xen_pmu_irq, cpu).irq = -1;
158 kfree(per_cpu(xen_pmu_irq, cpu).name);
159 per_cpu(xen_pmu_irq, cpu).name = NULL;
160 }
151}; 161};
152static int xen_smp_intr_init(unsigned int cpu) 162static int xen_smp_intr_init(unsigned int cpu)
153{ 163{
154 int rc; 164 int rc;
155 char *resched_name, *callfunc_name, *debug_name; 165 char *resched_name, *callfunc_name, *debug_name, *pmu_name;
156 166
157 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 167 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
158 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 168 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -218,6 +228,18 @@ static int xen_smp_intr_init(unsigned int cpu)
218 per_cpu(xen_irq_work, cpu).irq = rc; 228 per_cpu(xen_irq_work, cpu).irq = rc;
219 per_cpu(xen_irq_work, cpu).name = callfunc_name; 229 per_cpu(xen_irq_work, cpu).name = callfunc_name;
220 230
231 if (is_xen_pmu(cpu)) {
232 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
233 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
234 xen_pmu_irq_handler,
235 IRQF_PERCPU|IRQF_NOBALANCING,
236 pmu_name, NULL);
237 if (rc < 0)
238 goto fail;
239 per_cpu(xen_pmu_irq, cpu).irq = rc;
240 per_cpu(xen_pmu_irq, cpu).name = pmu_name;
241 }
242
221 return 0; 243 return 0;
222 244
223 fail: 245 fail:
@@ -335,6 +357,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
335 } 357 }
336 set_cpu_sibling_map(0); 358 set_cpu_sibling_map(0);
337 359
360 xen_pmu_init(0);
361
338 if (xen_smp_intr_init(0)) 362 if (xen_smp_intr_init(0))
339 BUG(); 363 BUG();
340 364
@@ -462,6 +486,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
462 if (rc) 486 if (rc)
463 return rc; 487 return rc;
464 488
489 xen_pmu_init(cpu);
490
465 rc = xen_smp_intr_init(cpu); 491 rc = xen_smp_intr_init(cpu);
466 if (rc) 492 if (rc)
467 return rc; 493 return rc;
@@ -503,6 +529,7 @@ static void xen_cpu_die(unsigned int cpu)
503 xen_smp_intr_free(cpu); 529 xen_smp_intr_free(cpu);
504 xen_uninit_lock_cpu(cpu); 530 xen_uninit_lock_cpu(cpu);
505 xen_teardown_timer(cpu); 531 xen_teardown_timer(cpu);
532 xen_pmu_finish(cpu);
506 } 533 }
507} 534}
508 535
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 53b4c0811f4f..feddabdab448 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -11,6 +11,7 @@
11 11
12#include "xen-ops.h" 12#include "xen-ops.h"
13#include "mmu.h" 13#include "mmu.h"
14#include "pmu.h"
14 15
15static void xen_pv_pre_suspend(void) 16static void xen_pv_pre_suspend(void)
16{ 17{
@@ -67,16 +68,26 @@ static void xen_pv_post_suspend(int suspend_cancelled)
67 68
68void xen_arch_pre_suspend(void) 69void xen_arch_pre_suspend(void)
69{ 70{
70 if (xen_pv_domain()) 71 int cpu;
71 xen_pv_pre_suspend(); 72
73 for_each_online_cpu(cpu)
74 xen_pmu_finish(cpu);
75
76 if (xen_pv_domain())
77 xen_pv_pre_suspend();
72} 78}
73 79
74void xen_arch_post_suspend(int cancelled) 80void xen_arch_post_suspend(int cancelled)
75{ 81{
76 if (xen_pv_domain()) 82 int cpu;
77 xen_pv_post_suspend(cancelled); 83
78 else 84 if (xen_pv_domain())
79 xen_hvm_post_suspend(cancelled); 85 xen_pv_post_suspend(cancelled);
86 else
87 xen_hvm_post_suspend(cancelled);
88
89 for_each_online_cpu(cpu)
90 xen_pmu_init(cpu);
80} 91}
81 92
82static void xen_vcpu_notify_restore(void *data) 93static void xen_vcpu_notify_restore(void *data)