aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-09 06:16:59 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 06:16:59 -0500
commiteca217b36e5d7d4377493d5cedd89105e66a5a72 (patch)
tree71f0ecd5225c3033d509b77a23ad7bc576cf0ab6 /arch/x86/kernel
parent54a353a0f845c1dad5fc8183872e750d667838ac (diff)
parente4d0407185cdbdcfd99fc23bde2e5454bbc46329 (diff)
Merge branch 'x86/paravirt' into x86/apic
Conflicts: arch/x86/mach-voyager/voyager_smp.c
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/common.c25
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/paravirt.c54
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c12
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c15
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tlb_uv.c2
-rw-r--r--arch/x86/kernel/vmi_32.c9
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S5
-rw-r--r--arch/x86/kernel/vsmp_64.c12
11 files changed, 103 insertions, 37 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c4bdc7f00207..cbcdb796d47f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -296,23 +296,28 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
296 296
297__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 297__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
298 298
299void load_percpu_segment(int cpu)
300{
301#ifdef CONFIG_X86_32
302 loadsegment(fs, __KERNEL_PERCPU);
303#else
304 loadsegment(gs, 0);
305 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
306#endif
307}
308
299/* Current gdt points %fs at the "master" per-cpu area: after this, 309/* Current gdt points %fs at the "master" per-cpu area: after this,
300 * it's on the real one. */ 310 * it's on the real one. */
301void switch_to_new_gdt(void) 311void switch_to_new_gdt(int cpu)
302{ 312{
303 struct desc_ptr gdt_descr; 313 struct desc_ptr gdt_descr;
304 int cpu = smp_processor_id();
305 314
306 gdt_descr.address = (long)get_cpu_gdt_table(cpu); 315 gdt_descr.address = (long)get_cpu_gdt_table(cpu);
307 gdt_descr.size = GDT_SIZE - 1; 316 gdt_descr.size = GDT_SIZE - 1;
308 load_gdt(&gdt_descr); 317 load_gdt(&gdt_descr);
309 /* Reload the per-cpu base */ 318 /* Reload the per-cpu base */
310#ifdef CONFIG_X86_32 319
311 loadsegment(fs, __KERNEL_PERCPU); 320 load_percpu_segment(cpu);
312#else
313 loadsegment(gs, 0);
314 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
315#endif
316} 321}
317 322
318static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 323static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
@@ -1029,7 +1034,7 @@ void __cpuinit cpu_init(void)
1029 * and set up the GDT descriptor: 1034 * and set up the GDT descriptor:
1030 */ 1035 */
1031 1036
1032 switch_to_new_gdt(); 1037 switch_to_new_gdt(cpu);
1033 loadsegment(fs, 0); 1038 loadsegment(fs, 0);
1034 1039
1035 load_idt((const struct desc_ptr *)&idt_descr); 1040 load_idt((const struct desc_ptr *)&idt_descr);
@@ -1131,7 +1136,7 @@ void __cpuinit cpu_init(void)
1131 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1136 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1132 1137
1133 load_idt(&idt_descr); 1138 load_idt(&idt_descr);
1134 switch_to_new_gdt(); 1139 switch_to_new_gdt(cpu);
1135 1140
1136 /* 1141 /*
1137 * Set up and load the per-CPU TSS and LDT 1142 * Set up and load the per-CPU TSS and LDT
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1f7d697b5c00..fbcf96b295ff 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1143,7 +1143,7 @@ ENTRY(native_load_gs_index)
1143 CFI_STARTPROC 1143 CFI_STARTPROC
1144 pushf 1144 pushf
1145 CFI_ADJUST_CFA_OFFSET 8 1145 CFI_ADJUST_CFA_OFFSET 8
1146 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) 1146 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1147 SWAPGS 1147 SWAPGS
1148gs_change: 1148gs_change:
1149 movl %edi,%gs 1149 movl %edi,%gs
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 202514be5923..cea11c8e3049 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -44,6 +44,17 @@ void _paravirt_nop(void)
44{ 44{
45} 45}
46 46
47/* identity function, which can be inlined */
48u32 _paravirt_ident_32(u32 x)
49{
50 return x;
51}
52
53u64 _paravirt_ident_64(u64 x)
54{
55 return x;
56}
57
47static void __init default_banner(void) 58static void __init default_banner(void)
48{ 59{
49 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 60 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
138 if (opfunc == NULL) 149 if (opfunc == NULL)
139 /* If there's no function, patch it with a ud2a (BUG) */ 150 /* If there's no function, patch it with a ud2a (BUG) */
140 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); 151 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
141 else if (opfunc == paravirt_nop) 152 else if (opfunc == _paravirt_nop)
142 /* If the operation is a nop, then nop the callsite */ 153 /* If the operation is a nop, then nop the callsite */
143 ret = paravirt_patch_nop(); 154 ret = paravirt_patch_nop();
155
156 /* identity functions just return their single argument */
157 else if (opfunc == _paravirt_ident_32)
158 ret = paravirt_patch_ident_32(insnbuf, len);
159 else if (opfunc == _paravirt_ident_64)
160 ret = paravirt_patch_ident_64(insnbuf, len);
161
144 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 162 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
145 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || 163 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
146 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || 164 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
@@ -292,10 +310,10 @@ struct pv_time_ops pv_time_ops = {
292 310
293struct pv_irq_ops pv_irq_ops = { 311struct pv_irq_ops pv_irq_ops = {
294 .init_IRQ = native_init_IRQ, 312 .init_IRQ = native_init_IRQ,
295 .save_fl = native_save_fl, 313 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
296 .restore_fl = native_restore_fl, 314 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
297 .irq_disable = native_irq_disable, 315 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
298 .irq_enable = native_irq_enable, 316 .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
299 .safe_halt = native_safe_halt, 317 .safe_halt = native_safe_halt,
300 .halt = native_halt, 318 .halt = native_halt,
301#ifdef CONFIG_X86_64 319#ifdef CONFIG_X86_64
@@ -373,6 +391,14 @@ struct pv_apic_ops pv_apic_ops = {
373#endif 391#endif
374}; 392};
375 393
394#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
395/* 32-bit pagetable entries */
396#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
397#else
398/* 64-bit pagetable entries */
399#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
400#endif
401
376struct pv_mmu_ops pv_mmu_ops = { 402struct pv_mmu_ops pv_mmu_ops = {
377#ifndef CONFIG_X86_64 403#ifndef CONFIG_X86_64
378 .pagetable_setup_start = native_pagetable_setup_start, 404 .pagetable_setup_start = native_pagetable_setup_start,
@@ -424,21 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = {
424 .pmd_clear = native_pmd_clear, 450 .pmd_clear = native_pmd_clear,
425#endif 451#endif
426 .set_pud = native_set_pud, 452 .set_pud = native_set_pud,
427 .pmd_val = native_pmd_val, 453
428 .make_pmd = native_make_pmd, 454 .pmd_val = PTE_IDENT,
455 .make_pmd = PTE_IDENT,
429 456
430#if PAGETABLE_LEVELS == 4 457#if PAGETABLE_LEVELS == 4
431 .pud_val = native_pud_val, 458 .pud_val = PTE_IDENT,
432 .make_pud = native_make_pud, 459 .make_pud = PTE_IDENT,
460
433 .set_pgd = native_set_pgd, 461 .set_pgd = native_set_pgd,
434#endif 462#endif
435#endif /* PAGETABLE_LEVELS >= 3 */ 463#endif /* PAGETABLE_LEVELS >= 3 */
436 464
437 .pte_val = native_pte_val, 465 .pte_val = PTE_IDENT,
438 .pgd_val = native_pgd_val, 466 .pgd_val = PTE_IDENT,
439 467
440 .make_pte = native_make_pte, 468 .make_pte = PTE_IDENT,
441 .make_pgd = native_make_pgd, 469 .make_pgd = PTE_IDENT,
442 470
443 .dup_mmap = paravirt_nop, 471 .dup_mmap = paravirt_nop,
444 .exit_mmap = paravirt_nop, 472 .exit_mmap = paravirt_nop,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 9fe644f4861d..d9f32e6d6ab6 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
12DEF_NATIVE(pv_cpu_ops, clts, "clts"); 12DEF_NATIVE(pv_cpu_ops, clts, "clts");
13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); 13DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
14 14
15unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
16{
17 /* arg in %eax, return in %eax */
18 return 0;
19}
20
21unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
22{
23 /* arg in %edx:%eax, return in %edx:%eax */
24 return 0;
25}
26
15unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 27unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
16 unsigned long addr, unsigned len) 28 unsigned long addr, unsigned len)
17{ 29{
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 061d01df9ae6..3f08f34f93eb 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
19DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); 19DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
20DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); 20DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
21 21
22DEF_NATIVE(, mov32, "mov %edi, %eax");
23DEF_NATIVE(, mov64, "mov %rdi, %rax");
24
25unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
26{
27 return paravirt_patch_insns(insnbuf, len,
28 start__mov32, end__mov32);
29}
30
31unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
32{
33 return paravirt_patch_insns(insnbuf, len,
34 start__mov64, end__mov64);
35}
36
22unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 37unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
23 unsigned long addr, unsigned len) 38 unsigned long addr, unsigned len)
24{ 39{
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 0d1e7ac439f4..ef91747bbed5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -122,7 +122,7 @@ void __init setup_per_cpu_areas(void)
122 * area. Reload any changed state for the boot CPU. 122 * area. Reload any changed state for the boot CPU.
123 */ 123 */
124 if (cpu == boot_cpu_id) 124 if (cpu == boot_cpu_id)
125 switch_to_new_gdt(); 125 switch_to_new_gdt(cpu);
126 126
127 DBG("PERCPU: cpu %4d %p\n", cpu, ptr); 127 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
128 } 128 }
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 96f7d304f5c9..af57f88186e7 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1188,7 +1188,7 @@ out:
1188void __init native_smp_prepare_boot_cpu(void) 1188void __init native_smp_prepare_boot_cpu(void)
1189{ 1189{
1190 int me = smp_processor_id(); 1190 int me = smp_processor_id();
1191 switch_to_new_gdt(); 1191 switch_to_new_gdt(me);
1192 /* already set me in cpu_online_mask in boot_cpu_init() */ 1192 /* already set me in cpu_online_mask in boot_cpu_init() */
1193 cpumask_set_cpu(me, cpu_callout_mask); 1193 cpumask_set_cpu(me, cpu_callout_mask);
1194 per_cpu(cpu_state, me) = CPU_ONLINE; 1194 per_cpu(cpu_state, me) = CPU_ONLINE;
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 755ede02b13f..f396e61bcb34 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -259,7 +259,7 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
259 * the cpu's, all of which are still in the mask. 259 * the cpu's, all of which are still in the mask.
260 */ 260 */
261 __get_cpu_var(ptcstats).ptc_i++; 261 __get_cpu_var(ptcstats).ptc_i++;
262 return 0; 262 return flush_mask;
263 } 263 }
264 264
265 /* 265 /*
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 1d3302cc2ddf..eb9e7347928e 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -670,10 +670,11 @@ static inline int __init activate_vmi(void)
670 para_fill(pv_mmu_ops.write_cr2, SetCR2); 670 para_fill(pv_mmu_ops.write_cr2, SetCR2);
671 para_fill(pv_mmu_ops.write_cr3, SetCR3); 671 para_fill(pv_mmu_ops.write_cr3, SetCR3);
672 para_fill(pv_cpu_ops.write_cr4, SetCR4); 672 para_fill(pv_cpu_ops.write_cr4, SetCR4);
673 para_fill(pv_irq_ops.save_fl, GetInterruptMask); 673
674 para_fill(pv_irq_ops.restore_fl, SetInterruptMask); 674 para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
675 para_fill(pv_irq_ops.irq_disable, DisableInterrupts); 675 para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
676 para_fill(pv_irq_ops.irq_enable, EnableInterrupts); 676 para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
677 para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
677 678
678 para_fill(pv_cpu_ops.wbinvd, WBINVD); 679 para_fill(pv_cpu_ops.wbinvd, WBINVD);
679 para_fill(pv_cpu_ops.read_tsc, RDTSC); 680 para_fill(pv_cpu_ops.read_tsc, RDTSC);
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index c9740996430a..07f62d287ff0 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -22,6 +22,7 @@ PHDRS {
22#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
23 percpu PT_LOAD FLAGS(7); /* RWE */ 23 percpu PT_LOAD FLAGS(7); /* RWE */
24#endif 24#endif
25 data.init2 PT_LOAD FLAGS(7); /* RWE */
25 note PT_NOTE FLAGS(0); /* ___ */ 26 note PT_NOTE FLAGS(0); /* ___ */
26} 27}
27SECTIONS 28SECTIONS
@@ -215,7 +216,7 @@ SECTIONS
215 /* 216 /*
216 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the 217 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
217 * output PHDR, so the next output section - __data_nosave - should 218 * output PHDR, so the next output section - __data_nosave - should
218 * switch it back to data.init. Also, pda should be at the head of 219 * start another section data.init2. Also, pda should be at the head of
219 * percpu area. Preallocate it and define the percpu offset symbol 220 * percpu area. Preallocate it and define the percpu offset symbol
220 * so that it can be accessed as a percpu variable. 221 * so that it can be accessed as a percpu variable.
221 */ 222 */
@@ -232,7 +233,7 @@ SECTIONS
232 __nosave_begin = .; 233 __nosave_begin = .;
233 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 234 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
234 *(.data.nosave) 235 *(.data.nosave)
235 } :data.init /* switch back to data.init, see PERCPU_VADDR() above */ 236 } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
236 . = ALIGN(PAGE_SIZE); 237 . = ALIGN(PAGE_SIZE);
237 __nosave_end = .; 238 __nosave_end = .;
238 239
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index a688f3bfaec2..c609205df594 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
37 flags &= ~X86_EFLAGS_IF; 37 flags &= ~X86_EFLAGS_IF;
38 return flags; 38 return flags;
39} 39}
40PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
40 41
41static void vsmp_restore_fl(unsigned long flags) 42static void vsmp_restore_fl(unsigned long flags)
42{ 43{
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
46 flags |= X86_EFLAGS_AC; 47 flags |= X86_EFLAGS_AC;
47 native_restore_fl(flags); 48 native_restore_fl(flags);
48} 49}
50PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
49 51
50static void vsmp_irq_disable(void) 52static void vsmp_irq_disable(void)
51{ 53{
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
53 55
54 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); 56 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
55} 57}
58PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
56 59
57static void vsmp_irq_enable(void) 60static void vsmp_irq_enable(void)
58{ 61{
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
60 63
61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); 64 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
62} 65}
66PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
63 67
64static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, 68static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
65 unsigned long addr, unsigned len) 69 unsigned long addr, unsigned len)
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
90 cap, ctl); 94 cap, ctl);
91 if (cap & ctl & (1 << 4)) { 95 if (cap & ctl & (1 << 4)) {
92 /* Setup irq ops and turn on vSMP IRQ fastpath handling */ 96 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
93 pv_irq_ops.irq_disable = vsmp_irq_disable; 97 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
94 pv_irq_ops.irq_enable = vsmp_irq_enable; 98 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
95 pv_irq_ops.save_fl = vsmp_save_fl; 99 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
96 pv_irq_ops.restore_fl = vsmp_restore_fl; 100 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
97 pv_init_ops.patch = vsmp_patch; 101 pv_init_ops.patch = vsmp_patch;
98 102
99 ctl &= ~(1 << 4); 103 ctl &= ~(1 << 4);