aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-10-16 14:51:29 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-10-16 14:51:29 -0400
commit93b1eab3d29e7ea32ee583de3362da84db06ded8 (patch)
tree8dc7eb61d4c65a48f9ce21a49e392f4967185cfd /arch/x86
parentab9c232286c2b77be78441c2d8396500b045777e (diff)
paravirt: refactor struct paravirt_ops into smaller pv_*_ops
This patch refactors the paravirt_ops structure into groups of functionally related ops: pv_info - random info, rather than function entrypoints pv_init_ops - functions used at boot time (some for module_init too) pv_misc_ops - lazy mode, which didn't fit well anywhere else pv_time_ops - time-related functions pv_cpu_ops - various privileged instruction ops pv_irq_ops - operations for managing interrupt state pv_apic_ops - APIC operations pv_mmu_ops - operations for managing pagetables There are several motivations for this: 1. Some of these ops will be general to all x86, and some will be i386/x86-64 specific. This makes it easier to share common stuff while allowing separate implementations where needed. 2. At the moment we must export all of paravirt_ops, but modules only need selected parts of it. This allows us to export on a case by case basis (and also choose which export license we want to apply). 3. Functional groupings make things a bit more readable. Struct paravirt_ops is now only used as a template to generate patch-site identifiers, and to extract function pointers for inserting into jmp/calls when patching. It is only instantiated when needed. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Andi Kleen <ak@suse.de> Cc: Zach Amsden <zach@vmware.com> Cc: Avi Kivity <avi@qumranet.com> Cc: Anthony Liguory <aliguori@us.ibm.com> Cc: "Glauber de Oliveira Costa" <glommer@gmail.com> Cc: Jun Nakajima <jun.nakajima@intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/alternative.c4
-rw-r--r--arch/x86/kernel/asm-offsets_32.c14
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/paravirt_32.c174
-rw-r--r--arch/x86/kernel/vmi_32.c164
-rw-r--r--arch/x86/xen/enlighten.c98
6 files changed, 257 insertions, 199 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index bd72d94e713e..63c55148dd05 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -368,8 +368,8 @@ void apply_paravirt(struct paravirt_patch_site *start,
368 BUG_ON(p->len > MAX_PATCH_LEN); 368 BUG_ON(p->len > MAX_PATCH_LEN);
369 /* prep the buffer with the original instructions */ 369 /* prep the buffer with the original instructions */
370 memcpy(insnbuf, p->instr, p->len); 370 memcpy(insnbuf, p->instr, p->len);
371 used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf, 371 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
372 (unsigned long)p->instr, p->len); 372 (unsigned long)p->instr, p->len);
373 373
374 BUG_ON(used > p->len); 374 BUG_ON(used > p->len);
375 375
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 8029742c0fc1..f1b7cdda82b3 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -116,12 +116,14 @@ void foo(void)
116 116
117#ifdef CONFIG_PARAVIRT 117#ifdef CONFIG_PARAVIRT
118 BLANK(); 118 BLANK();
119 OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled); 119 OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
120 OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable); 120 OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
121 OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable); 121 OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
122 OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit); 122 OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
123 OFFSET(PARAVIRT_iret, paravirt_ops, iret); 123 OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
124 OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0); 124 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
125 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
126 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
125#endif 127#endif
126 128
127#ifdef CONFIG_XEN 129#ifdef CONFIG_XEN
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 290b7bc82da3..1f2062e94d82 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -434,7 +434,7 @@ ldt_ss:
434 * is still available to implement the setting of the high 434 * is still available to implement the setting of the high
435 * 16-bits in the INTERRUPT_RETURN paravirt-op. 435 * 16-bits in the INTERRUPT_RETURN paravirt-op.
436 */ 436 */
437 cmpl $0, paravirt_ops+PARAVIRT_enabled 437 cmpl $0, pv_info+PARAVIRT_enabled
438 jne restore_nocheck 438 jne restore_nocheck
439#endif 439#endif
440 440
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c
index 739cfb207dd7..fa412515af79 100644
--- a/arch/x86/kernel/paravirt_32.c
+++ b/arch/x86/kernel/paravirt_32.c
@@ -42,32 +42,33 @@ void _paravirt_nop(void)
42static void __init default_banner(void) 42static void __init default_banner(void)
43{ 43{
44 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 44 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
45 paravirt_ops.name); 45 pv_info.name);
46} 46}
47 47
48char *memory_setup(void) 48char *memory_setup(void)
49{ 49{
50 return paravirt_ops.memory_setup(); 50 return pv_init_ops.memory_setup();
51} 51}
52 52
53/* Simple instruction patching code. */ 53/* Simple instruction patching code. */
54#define DEF_NATIVE(name, code) \ 54#define DEF_NATIVE(ops, name, code) \
55 extern const char start_##name[], end_##name[]; \ 55 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
56 asm("start_" #name ": " code "; end_" #name ":") 56 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
57 57
58DEF_NATIVE(irq_disable, "cli"); 58DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
59DEF_NATIVE(irq_enable, "sti"); 59DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
60DEF_NATIVE(restore_fl, "push %eax; popf"); 60DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
61DEF_NATIVE(save_fl, "pushf; pop %eax"); 61DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
62DEF_NATIVE(iret, "iret"); 62DEF_NATIVE(pv_cpu_ops, iret, "iret");
63DEF_NATIVE(irq_enable_sysexit, "sti; sysexit"); 63DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
64DEF_NATIVE(read_cr2, "mov %cr2, %eax"); 64DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
65DEF_NATIVE(write_cr3, "mov %eax, %cr3"); 65DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
66DEF_NATIVE(read_cr3, "mov %cr3, %eax"); 66DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
67DEF_NATIVE(clts, "clts"); 67DEF_NATIVE(pv_cpu_ops, clts, "clts");
68DEF_NATIVE(read_tsc, "rdtsc"); 68DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
69 69
70DEF_NATIVE(ud2a, "ud2a"); 70/* Undefined instruction for dealing with missing ops pointers. */
71static const unsigned char ud2a[] = { 0x0f, 0x0b };
71 72
72static unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 73static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
73 unsigned long addr, unsigned len) 74 unsigned long addr, unsigned len)
@@ -76,37 +77,29 @@ static unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
76 unsigned ret; 77 unsigned ret;
77 78
78 switch(type) { 79 switch(type) {
79#define SITE(x) case PARAVIRT_PATCH(x): start = start_##x; end = end_##x; goto patch_site 80#define SITE(ops, x) \
80 SITE(irq_disable); 81 case PARAVIRT_PATCH(ops.x): \
81 SITE(irq_enable); 82 start = start_##ops##_##x; \
82 SITE(restore_fl); 83 end = end_##ops##_##x; \
83 SITE(save_fl); 84 goto patch_site
84 SITE(iret); 85
85 SITE(irq_enable_sysexit); 86 SITE(pv_irq_ops, irq_disable);
86 SITE(read_cr2); 87 SITE(pv_irq_ops, irq_enable);
87 SITE(read_cr3); 88 SITE(pv_irq_ops, restore_fl);
88 SITE(write_cr3); 89 SITE(pv_irq_ops, save_fl);
89 SITE(clts); 90 SITE(pv_cpu_ops, iret);
90 SITE(read_tsc); 91 SITE(pv_cpu_ops, irq_enable_sysexit);
92 SITE(pv_mmu_ops, read_cr2);
93 SITE(pv_mmu_ops, read_cr3);
94 SITE(pv_mmu_ops, write_cr3);
95 SITE(pv_cpu_ops, clts);
96 SITE(pv_cpu_ops, read_tsc);
91#undef SITE 97#undef SITE
92 98
93 patch_site: 99 patch_site:
94 ret = paravirt_patch_insns(ibuf, len, start, end); 100 ret = paravirt_patch_insns(ibuf, len, start, end);
95 break; 101 break;
96 102
97 case PARAVIRT_PATCH(make_pgd):
98 case PARAVIRT_PATCH(make_pte):
99 case PARAVIRT_PATCH(pgd_val):
100 case PARAVIRT_PATCH(pte_val):
101#ifdef CONFIG_X86_PAE
102 case PARAVIRT_PATCH(make_pmd):
103 case PARAVIRT_PATCH(pmd_val):
104#endif
105 /* These functions end up returning exactly what
106 they're passed, in the same registers. */
107 ret = paravirt_patch_nop();
108 break;
109
110 default: 103 default:
111 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 104 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
112 break; 105 break;
@@ -150,7 +143,7 @@ unsigned paravirt_patch_call(void *insnbuf,
150 return 5; 143 return 5;
151} 144}
152 145
153unsigned paravirt_patch_jmp(const void *target, void *insnbuf, 146unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
154 unsigned long addr, unsigned len) 147 unsigned long addr, unsigned len)
155{ 148{
156 struct branch *b = insnbuf; 149 struct branch *b = insnbuf;
@@ -165,22 +158,38 @@ unsigned paravirt_patch_jmp(const void *target, void *insnbuf,
165 return 5; 158 return 5;
166} 159}
167 160
161/* Neat trick to map patch type back to the call within the
162 * corresponding structure. */
163static void *get_call_destination(u8 type)
164{
165 struct paravirt_patch_template tmpl = {
166 .pv_init_ops = pv_init_ops,
167 .pv_misc_ops = pv_misc_ops,
168 .pv_time_ops = pv_time_ops,
169 .pv_cpu_ops = pv_cpu_ops,
170 .pv_irq_ops = pv_irq_ops,
171 .pv_apic_ops = pv_apic_ops,
172 .pv_mmu_ops = pv_mmu_ops,
173 };
174 return *((void **)&tmpl + type);
175}
176
168unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, 177unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
169 unsigned long addr, unsigned len) 178 unsigned long addr, unsigned len)
170{ 179{
171 void *opfunc = *((void **)&paravirt_ops + type); 180 void *opfunc = get_call_destination(type);
172 unsigned ret; 181 unsigned ret;
173 182
174 if (opfunc == NULL) 183 if (opfunc == NULL)
175 /* If there's no function, patch it with a ud2a (BUG) */ 184 /* If there's no function, patch it with a ud2a (BUG) */
176 ret = paravirt_patch_insns(insnbuf, len, start_ud2a, end_ud2a); 185 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
177 else if (opfunc == paravirt_nop) 186 else if (opfunc == paravirt_nop)
178 /* If the operation is a nop, then nop the callsite */ 187 /* If the operation is a nop, then nop the callsite */
179 ret = paravirt_patch_nop(); 188 ret = paravirt_patch_nop();
180 else if (type == PARAVIRT_PATCH(iret) || 189 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
181 type == PARAVIRT_PATCH(irq_enable_sysexit)) 190 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit))
182 /* If operation requires a jmp, then jmp */ 191 /* If operation requires a jmp, then jmp */
183 ret = paravirt_patch_jmp(opfunc, insnbuf, addr, len); 192 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
184 else 193 else
185 /* Otherwise call the function; assume target could 194 /* Otherwise call the function; assume target could
186 clobber any caller-save reg */ 195 clobber any caller-save reg */
@@ -205,7 +214,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
205 214
206void init_IRQ(void) 215void init_IRQ(void)
207{ 216{
208 paravirt_ops.init_IRQ(); 217 pv_irq_ops.init_IRQ();
209} 218}
210 219
211static void native_flush_tlb(void) 220static void native_flush_tlb(void)
@@ -233,7 +242,7 @@ extern void native_irq_enable_sysexit(void);
233 242
234static int __init print_banner(void) 243static int __init print_banner(void)
235{ 244{
236 paravirt_ops.banner(); 245 pv_init_ops.banner();
237 return 0; 246 return 0;
238} 247}
239core_initcall(print_banner); 248core_initcall(print_banner);
@@ -273,47 +282,53 @@ int paravirt_disable_iospace(void)
273 return ret; 282 return ret;
274} 283}
275 284
276struct paravirt_ops paravirt_ops = { 285struct pv_info pv_info = {
277 .name = "bare hardware", 286 .name = "bare hardware",
278 .paravirt_enabled = 0, 287 .paravirt_enabled = 0,
279 .kernel_rpl = 0, 288 .kernel_rpl = 0,
280 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ 289 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
290};
281 291
282 .patch = native_patch, 292struct pv_init_ops pv_init_ops = {
293 .patch = native_patch,
283 .banner = default_banner, 294 .banner = default_banner,
284 .arch_setup = paravirt_nop, 295 .arch_setup = paravirt_nop,
285 .memory_setup = machine_specific_memory_setup, 296 .memory_setup = machine_specific_memory_setup,
297};
298
299struct pv_time_ops pv_time_ops = {
300 .time_init = hpet_time_init,
286 .get_wallclock = native_get_wallclock, 301 .get_wallclock = native_get_wallclock,
287 .set_wallclock = native_set_wallclock, 302 .set_wallclock = native_set_wallclock,
288 .time_init = hpet_time_init, 303 .sched_clock = native_sched_clock,
304 .get_cpu_khz = native_calculate_cpu_khz,
305};
306
307struct pv_irq_ops pv_irq_ops = {
289 .init_IRQ = native_init_IRQ, 308 .init_IRQ = native_init_IRQ,
309 .save_fl = native_save_fl,
310 .restore_fl = native_restore_fl,
311 .irq_disable = native_irq_disable,
312 .irq_enable = native_irq_enable,
313 .safe_halt = native_safe_halt,
314 .halt = native_halt,
315};
290 316
317struct pv_cpu_ops pv_cpu_ops = {
291 .cpuid = native_cpuid, 318 .cpuid = native_cpuid,
292 .get_debugreg = native_get_debugreg, 319 .get_debugreg = native_get_debugreg,
293 .set_debugreg = native_set_debugreg, 320 .set_debugreg = native_set_debugreg,
294 .clts = native_clts, 321 .clts = native_clts,
295 .read_cr0 = native_read_cr0, 322 .read_cr0 = native_read_cr0,
296 .write_cr0 = native_write_cr0, 323 .write_cr0 = native_write_cr0,
297 .read_cr2 = native_read_cr2,
298 .write_cr2 = native_write_cr2,
299 .read_cr3 = native_read_cr3,
300 .write_cr3 = native_write_cr3,
301 .read_cr4 = native_read_cr4, 324 .read_cr4 = native_read_cr4,
302 .read_cr4_safe = native_read_cr4_safe, 325 .read_cr4_safe = native_read_cr4_safe,
303 .write_cr4 = native_write_cr4, 326 .write_cr4 = native_write_cr4,
304 .save_fl = native_save_fl,
305 .restore_fl = native_restore_fl,
306 .irq_disable = native_irq_disable,
307 .irq_enable = native_irq_enable,
308 .safe_halt = native_safe_halt,
309 .halt = native_halt,
310 .wbinvd = native_wbinvd, 327 .wbinvd = native_wbinvd,
311 .read_msr = native_read_msr_safe, 328 .read_msr = native_read_msr_safe,
312 .write_msr = native_write_msr_safe, 329 .write_msr = native_write_msr_safe,
313 .read_tsc = native_read_tsc, 330 .read_tsc = native_read_tsc,
314 .read_pmc = native_read_pmc, 331 .read_pmc = native_read_pmc,
315 .sched_clock = native_sched_clock,
316 .get_cpu_khz = native_calculate_cpu_khz,
317 .load_tr_desc = native_load_tr_desc, 332 .load_tr_desc = native_load_tr_desc,
318 .set_ldt = native_set_ldt, 333 .set_ldt = native_set_ldt,
319 .load_gdt = native_load_gdt, 334 .load_gdt = native_load_gdt,
@@ -327,9 +342,14 @@ struct paravirt_ops paravirt_ops = {
327 .write_idt_entry = write_dt_entry, 342 .write_idt_entry = write_dt_entry,
328 .load_esp0 = native_load_esp0, 343 .load_esp0 = native_load_esp0,
329 344
345 .irq_enable_sysexit = native_irq_enable_sysexit,
346 .iret = native_iret,
347
330 .set_iopl_mask = native_set_iopl_mask, 348 .set_iopl_mask = native_set_iopl_mask,
331 .io_delay = native_io_delay, 349 .io_delay = native_io_delay,
350};
332 351
352struct pv_apic_ops pv_apic_ops = {
333#ifdef CONFIG_X86_LOCAL_APIC 353#ifdef CONFIG_X86_LOCAL_APIC
334 .apic_write = native_apic_write, 354 .apic_write = native_apic_write,
335 .apic_write_atomic = native_apic_write_atomic, 355 .apic_write_atomic = native_apic_write_atomic,
@@ -338,11 +358,21 @@ struct paravirt_ops paravirt_ops = {
338 .setup_secondary_clock = setup_secondary_APIC_clock, 358 .setup_secondary_clock = setup_secondary_APIC_clock,
339 .startup_ipi_hook = paravirt_nop, 359 .startup_ipi_hook = paravirt_nop,
340#endif 360#endif
361};
362
363struct pv_misc_ops pv_misc_ops = {
341 .set_lazy_mode = paravirt_nop, 364 .set_lazy_mode = paravirt_nop,
365};
342 366
367struct pv_mmu_ops pv_mmu_ops = {
343 .pagetable_setup_start = native_pagetable_setup_start, 368 .pagetable_setup_start = native_pagetable_setup_start,
344 .pagetable_setup_done = native_pagetable_setup_done, 369 .pagetable_setup_done = native_pagetable_setup_done,
345 370
371 .read_cr2 = native_read_cr2,
372 .write_cr2 = native_write_cr2,
373 .read_cr3 = native_read_cr3,
374 .write_cr3 = native_write_cr3,
375
346 .flush_tlb_user = native_flush_tlb, 376 .flush_tlb_user = native_flush_tlb,
347 .flush_tlb_kernel = native_flush_tlb_global, 377 .flush_tlb_kernel = native_flush_tlb_global,
348 .flush_tlb_single = native_flush_tlb_single, 378 .flush_tlb_single = native_flush_tlb_single,
@@ -381,12 +411,14 @@ struct paravirt_ops paravirt_ops = {
381 .make_pte = native_make_pte, 411 .make_pte = native_make_pte,
382 .make_pgd = native_make_pgd, 412 .make_pgd = native_make_pgd,
383 413
384 .irq_enable_sysexit = native_irq_enable_sysexit,
385 .iret = native_iret,
386
387 .dup_mmap = paravirt_nop, 414 .dup_mmap = paravirt_nop,
388 .exit_mmap = paravirt_nop, 415 .exit_mmap = paravirt_nop,
389 .activate_mm = paravirt_nop, 416 .activate_mm = paravirt_nop,
390}; 417};
391 418
392EXPORT_SYMBOL(paravirt_ops); 419EXPORT_SYMBOL_GPL(pv_time_ops);
420EXPORT_SYMBOL_GPL(pv_cpu_ops);
421EXPORT_SYMBOL_GPL(pv_mmu_ops);
422EXPORT_SYMBOL_GPL(pv_apic_ops);
423EXPORT_SYMBOL_GPL(pv_info);
424EXPORT_SYMBOL (pv_irq_ops);
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 18673e0f193b..67cea5c2e3e0 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -134,21 +134,21 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
134 unsigned long eip, unsigned len) 134 unsigned long eip, unsigned len)
135{ 135{
136 switch (type) { 136 switch (type) {
137 case PARAVIRT_PATCH(irq_disable): 137 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
138 return patch_internal(VMI_CALL_DisableInterrupts, len, 138 return patch_internal(VMI_CALL_DisableInterrupts, len,
139 insns, eip); 139 insns, eip);
140 case PARAVIRT_PATCH(irq_enable): 140 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
141 return patch_internal(VMI_CALL_EnableInterrupts, len, 141 return patch_internal(VMI_CALL_EnableInterrupts, len,
142 insns, eip); 142 insns, eip);
143 case PARAVIRT_PATCH(restore_fl): 143 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
144 return patch_internal(VMI_CALL_SetInterruptMask, len, 144 return patch_internal(VMI_CALL_SetInterruptMask, len,
145 insns, eip); 145 insns, eip);
146 case PARAVIRT_PATCH(save_fl): 146 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
147 return patch_internal(VMI_CALL_GetInterruptMask, len, 147 return patch_internal(VMI_CALL_GetInterruptMask, len,
148 insns, eip); 148 insns, eip);
149 case PARAVIRT_PATCH(iret): 149 case PARAVIRT_PATCH(pv_cpu_ops.iret):
150 return patch_internal(VMI_CALL_IRET, len, insns, eip); 150 return patch_internal(VMI_CALL_IRET, len, insns, eip);
151 case PARAVIRT_PATCH(irq_enable_sysexit): 151 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
152 return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); 152 return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip);
153 default: 153 default:
154 break; 154 break;
@@ -690,9 +690,9 @@ do { \
690 reloc = call_vrom_long_func(vmi_rom, get_reloc, \ 690 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
691 VMI_CALL_##vmicall); \ 691 VMI_CALL_##vmicall); \
692 if (rel->type == VMI_RELOCATION_CALL_REL) \ 692 if (rel->type == VMI_RELOCATION_CALL_REL) \
693 paravirt_ops.opname = (void *)rel->eip; \ 693 opname = (void *)rel->eip; \
694 else if (rel->type == VMI_RELOCATION_NOP) \ 694 else if (rel->type == VMI_RELOCATION_NOP) \
695 paravirt_ops.opname = (void *)vmi_nop; \ 695 opname = (void *)vmi_nop; \
696 else if (rel->type != VMI_RELOCATION_NONE) \ 696 else if (rel->type != VMI_RELOCATION_NONE) \
697 printk(KERN_WARNING "VMI: Unknown relocation " \ 697 printk(KERN_WARNING "VMI: Unknown relocation " \
698 "type %d for " #vmicall"\n",\ 698 "type %d for " #vmicall"\n",\
@@ -712,7 +712,7 @@ do { \
712 VMI_CALL_##vmicall); \ 712 VMI_CALL_##vmicall); \
713 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ 713 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
714 if (rel->type == VMI_RELOCATION_CALL_REL) { \ 714 if (rel->type == VMI_RELOCATION_CALL_REL) { \
715 paravirt_ops.opname = wrapper; \ 715 opname = wrapper; \
716 vmi_ops.cache = (void *)rel->eip; \ 716 vmi_ops.cache = (void *)rel->eip; \
717 } \ 717 } \
718} while (0) 718} while (0)
@@ -732,11 +732,11 @@ static inline int __init activate_vmi(void)
732 } 732 }
733 savesegment(cs, kernel_cs); 733 savesegment(cs, kernel_cs);
734 734
735 paravirt_ops.paravirt_enabled = 1; 735 pv_info.paravirt_enabled = 1;
736 paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; 736 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
737 pv_info.name = "vmi";
737 738
738 paravirt_ops.patch = vmi_patch; 739 pv_init_ops.patch = vmi_patch;
739 paravirt_ops.name = "vmi";
740 740
741 /* 741 /*
742 * Many of these operations are ABI compatible with VMI. 742 * Many of these operations are ABI compatible with VMI.
@@ -754,26 +754,26 @@ static inline int __init activate_vmi(void)
754 */ 754 */
755 755
756 /* CPUID is special, so very special it gets wrapped like a present */ 756 /* CPUID is special, so very special it gets wrapped like a present */
757 para_wrap(cpuid, vmi_cpuid, cpuid, CPUID); 757 para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
758 758
759 para_fill(clts, CLTS); 759 para_fill(pv_cpu_ops.clts, CLTS);
760 para_fill(get_debugreg, GetDR); 760 para_fill(pv_cpu_ops.get_debugreg, GetDR);
761 para_fill(set_debugreg, SetDR); 761 para_fill(pv_cpu_ops.set_debugreg, SetDR);
762 para_fill(read_cr0, GetCR0); 762 para_fill(pv_cpu_ops.read_cr0, GetCR0);
763 para_fill(read_cr2, GetCR2); 763 para_fill(pv_mmu_ops.read_cr2, GetCR2);
764 para_fill(read_cr3, GetCR3); 764 para_fill(pv_mmu_ops.read_cr3, GetCR3);
765 para_fill(read_cr4, GetCR4); 765 para_fill(pv_cpu_ops.read_cr4, GetCR4);
766 para_fill(write_cr0, SetCR0); 766 para_fill(pv_cpu_ops.write_cr0, SetCR0);
767 para_fill(write_cr2, SetCR2); 767 para_fill(pv_mmu_ops.write_cr2, SetCR2);
768 para_fill(write_cr3, SetCR3); 768 para_fill(pv_mmu_ops.write_cr3, SetCR3);
769 para_fill(write_cr4, SetCR4); 769 para_fill(pv_cpu_ops.write_cr4, SetCR4);
770 para_fill(save_fl, GetInterruptMask); 770 para_fill(pv_irq_ops.save_fl, GetInterruptMask);
771 para_fill(restore_fl, SetInterruptMask); 771 para_fill(pv_irq_ops.restore_fl, SetInterruptMask);
772 para_fill(irq_disable, DisableInterrupts); 772 para_fill(pv_irq_ops.irq_disable, DisableInterrupts);
773 para_fill(irq_enable, EnableInterrupts); 773 para_fill(pv_irq_ops.irq_enable, EnableInterrupts);
774 774
775 para_fill(wbinvd, WBINVD); 775 para_fill(pv_cpu_ops.wbinvd, WBINVD);
776 para_fill(read_tsc, RDTSC); 776 para_fill(pv_cpu_ops.read_tsc, RDTSC);
777 777
778 /* The following we emulate with trap and emulate for now */ 778 /* The following we emulate with trap and emulate for now */
779 /* paravirt_ops.read_msr = vmi_rdmsr */ 779 /* paravirt_ops.read_msr = vmi_rdmsr */
@@ -781,29 +781,29 @@ static inline int __init activate_vmi(void)
781 /* paravirt_ops.rdpmc = vmi_rdpmc */ 781 /* paravirt_ops.rdpmc = vmi_rdpmc */
782 782
783 /* TR interface doesn't pass TR value, wrap */ 783 /* TR interface doesn't pass TR value, wrap */
784 para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR); 784 para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
785 785
786 /* LDT is special, too */ 786 /* LDT is special, too */
787 para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT); 787 para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
788 788
789 para_fill(load_gdt, SetGDT); 789 para_fill(pv_cpu_ops.load_gdt, SetGDT);
790 para_fill(load_idt, SetIDT); 790 para_fill(pv_cpu_ops.load_idt, SetIDT);
791 para_fill(store_gdt, GetGDT); 791 para_fill(pv_cpu_ops.store_gdt, GetGDT);
792 para_fill(store_idt, GetIDT); 792 para_fill(pv_cpu_ops.store_idt, GetIDT);
793 para_fill(store_tr, GetTR); 793 para_fill(pv_cpu_ops.store_tr, GetTR);
794 paravirt_ops.load_tls = vmi_load_tls; 794 pv_cpu_ops.load_tls = vmi_load_tls;
795 para_fill(write_ldt_entry, WriteLDTEntry); 795 para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);
796 para_fill(write_gdt_entry, WriteGDTEntry); 796 para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry);
797 para_fill(write_idt_entry, WriteIDTEntry); 797 para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry);
798 para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); 798 para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
799 para_fill(set_iopl_mask, SetIOPLMask); 799 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
800 para_fill(io_delay, IODelay); 800 para_fill(pv_cpu_ops.io_delay, IODelay);
801 para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); 801 para_wrap(pv_misc_ops.set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode);
802 802
803 /* user and kernel flush are just handled with different flags to FlushTLB */ 803 /* user and kernel flush are just handled with different flags to FlushTLB */
804 para_wrap(flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); 804 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
805 para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); 805 para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
806 para_fill(flush_tlb_single, InvalPage); 806 para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
807 807
808 /* 808 /*
809 * Until a standard flag format can be agreed on, we need to 809 * Until a standard flag format can be agreed on, we need to
@@ -819,41 +819,41 @@ static inline int __init activate_vmi(void)
819#endif 819#endif
820 820
821 if (vmi_ops.set_pte) { 821 if (vmi_ops.set_pte) {
822 paravirt_ops.set_pte = vmi_set_pte; 822 pv_mmu_ops.set_pte = vmi_set_pte;
823 paravirt_ops.set_pte_at = vmi_set_pte_at; 823 pv_mmu_ops.set_pte_at = vmi_set_pte_at;
824 paravirt_ops.set_pmd = vmi_set_pmd; 824 pv_mmu_ops.set_pmd = vmi_set_pmd;
825#ifdef CONFIG_X86_PAE 825#ifdef CONFIG_X86_PAE
826 paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; 826 pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
827 paravirt_ops.set_pte_present = vmi_set_pte_present; 827 pv_mmu_ops.set_pte_present = vmi_set_pte_present;
828 paravirt_ops.set_pud = vmi_set_pud; 828 pv_mmu_ops.set_pud = vmi_set_pud;
829 paravirt_ops.pte_clear = vmi_pte_clear; 829 pv_mmu_ops.pte_clear = vmi_pte_clear;
830 paravirt_ops.pmd_clear = vmi_pmd_clear; 830 pv_mmu_ops.pmd_clear = vmi_pmd_clear;
831#endif 831#endif
832 } 832 }
833 833
834 if (vmi_ops.update_pte) { 834 if (vmi_ops.update_pte) {
835 paravirt_ops.pte_update = vmi_update_pte; 835 pv_mmu_ops.pte_update = vmi_update_pte;
836 paravirt_ops.pte_update_defer = vmi_update_pte_defer; 836 pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
837 } 837 }
838 838
839 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); 839 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
840 if (vmi_ops.allocate_page) { 840 if (vmi_ops.allocate_page) {
841 paravirt_ops.alloc_pt = vmi_allocate_pt; 841 pv_mmu_ops.alloc_pt = vmi_allocate_pt;
842 paravirt_ops.alloc_pd = vmi_allocate_pd; 842 pv_mmu_ops.alloc_pd = vmi_allocate_pd;
843 paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; 843 pv_mmu_ops.alloc_pd_clone = vmi_allocate_pd_clone;
844 } 844 }
845 845
846 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); 846 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
847 if (vmi_ops.release_page) { 847 if (vmi_ops.release_page) {
848 paravirt_ops.release_pt = vmi_release_pt; 848 pv_mmu_ops.release_pt = vmi_release_pt;
849 paravirt_ops.release_pd = vmi_release_pd; 849 pv_mmu_ops.release_pd = vmi_release_pd;
850 } 850 }
851 851
852 /* Set linear is needed in all cases */ 852 /* Set linear is needed in all cases */
853 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); 853 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
854#ifdef CONFIG_HIGHPTE 854#ifdef CONFIG_HIGHPTE
855 if (vmi_ops.set_linear_mapping) 855 if (vmi_ops.set_linear_mapping)
856 paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; 856 pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
857#endif 857#endif
858 858
859 /* 859 /*
@@ -863,17 +863,17 @@ static inline int __init activate_vmi(void)
863 * the backend. They are performance critical anyway, so requiring 863 * the backend. They are performance critical anyway, so requiring
864 * a patch is not a big problem. 864 * a patch is not a big problem.
865 */ 865 */
866 paravirt_ops.irq_enable_sysexit = (void *)0xfeedbab0; 866 pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
867 paravirt_ops.iret = (void *)0xbadbab0; 867 pv_cpu_ops.iret = (void *)0xbadbab0;
868 868
869#ifdef CONFIG_SMP 869#ifdef CONFIG_SMP
870 para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); 870 para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
871#endif 871#endif
872 872
873#ifdef CONFIG_X86_LOCAL_APIC 873#ifdef CONFIG_X86_LOCAL_APIC
874 para_fill(apic_read, APICRead); 874 para_fill(pv_apic_ops.apic_read, APICRead);
875 para_fill(apic_write, APICWrite); 875 para_fill(pv_apic_ops.apic_write, APICWrite);
876 para_fill(apic_write_atomic, APICWrite); 876 para_fill(pv_apic_ops.apic_write_atomic, APICWrite);
877#endif 877#endif
878 878
879 /* 879 /*
@@ -891,15 +891,15 @@ static inline int __init activate_vmi(void)
891 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); 891 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
892 vmi_timer_ops.cancel_alarm = 892 vmi_timer_ops.cancel_alarm =
893 vmi_get_function(VMI_CALL_CancelAlarm); 893 vmi_get_function(VMI_CALL_CancelAlarm);
894 paravirt_ops.time_init = vmi_time_init; 894 pv_time_ops.time_init = vmi_time_init;
895 paravirt_ops.get_wallclock = vmi_get_wallclock; 895 pv_time_ops.get_wallclock = vmi_get_wallclock;
896 paravirt_ops.set_wallclock = vmi_set_wallclock; 896 pv_time_ops.set_wallclock = vmi_set_wallclock;
897#ifdef CONFIG_X86_LOCAL_APIC 897#ifdef CONFIG_X86_LOCAL_APIC
898 paravirt_ops.setup_boot_clock = vmi_time_bsp_init; 898 pv_apic_ops.setup_boot_clock = vmi_time_bsp_init;
899 paravirt_ops.setup_secondary_clock = vmi_time_ap_init; 899 pv_apic_ops.setup_secondary_clock = vmi_time_ap_init;
900#endif 900#endif
901 paravirt_ops.sched_clock = vmi_sched_clock; 901 pv_time_ops.sched_clock = vmi_sched_clock;
902 paravirt_ops.get_cpu_khz = vmi_cpu_khz; 902 pv_time_ops.get_cpu_khz = vmi_cpu_khz;
903 903
904 /* We have true wallclock functions; disable CMOS clock sync */ 904 /* We have true wallclock functions; disable CMOS clock sync */
905 no_sync_cmos_clock = 1; 905 no_sync_cmos_clock = 1;
@@ -908,7 +908,7 @@ static inline int __init activate_vmi(void)
908 disable_vmi_timer = 1; 908 disable_vmi_timer = 1;
909 } 909 }
910 910
911 para_fill(safe_halt, Halt); 911 para_fill(pv_irq_ops.safe_halt, Halt);
912 912
913 /* 913 /*
914 * Alternative instruction rewriting doesn't happen soon enough 914 * Alternative instruction rewriting doesn't happen soon enough
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index f01bfcd4bdee..3d3bf05dec7f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -124,7 +124,7 @@ static void __init xen_vcpu_setup(int cpu)
124static void __init xen_banner(void) 124static void __init xen_banner(void)
125{ 125{
126 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 126 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
127 paravirt_ops.name); 127 pv_info.name);
128 printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); 128 printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
129} 129}
130 130
@@ -738,7 +738,7 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
738 pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; 738 pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
739 739
740 /* special set_pte for pagetable initialization */ 740 /* special set_pte for pagetable initialization */
741 paravirt_ops.set_pte = xen_set_pte_init; 741 pv_mmu_ops.set_pte = xen_set_pte_init;
742 742
743 init_mm.pgd = base; 743 init_mm.pgd = base;
744 /* 744 /*
@@ -785,8 +785,8 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
785{ 785{
786 /* This will work as long as patching hasn't happened yet 786 /* This will work as long as patching hasn't happened yet
787 (which it hasn't) */ 787 (which it hasn't) */
788 paravirt_ops.alloc_pt = xen_alloc_pt; 788 pv_mmu_ops.alloc_pt = xen_alloc_pt;
789 paravirt_ops.set_pte = xen_set_pte; 789 pv_mmu_ops.set_pte = xen_set_pte;
790 790
791 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 791 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
792 /* 792 /*
@@ -833,12 +833,12 @@ void __init xen_setup_vcpu_info_placement(void)
833 if (have_vcpu_info_placement) { 833 if (have_vcpu_info_placement) {
834 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 834 printk(KERN_INFO "Xen: using vcpu_info placement\n");
835 835
836 paravirt_ops.save_fl = xen_save_fl_direct; 836 pv_irq_ops.save_fl = xen_save_fl_direct;
837 paravirt_ops.restore_fl = xen_restore_fl_direct; 837 pv_irq_ops.restore_fl = xen_restore_fl_direct;
838 paravirt_ops.irq_disable = xen_irq_disable_direct; 838 pv_irq_ops.irq_disable = xen_irq_disable_direct;
839 paravirt_ops.irq_enable = xen_irq_enable_direct; 839 pv_irq_ops.irq_enable = xen_irq_enable_direct;
840 paravirt_ops.read_cr2 = xen_read_cr2_direct; 840 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
841 paravirt_ops.iret = xen_iret_direct; 841 pv_cpu_ops.iret = xen_iret_direct;
842 } 842 }
843} 843}
844 844
@@ -850,8 +850,8 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
850 850
851 start = end = reloc = NULL; 851 start = end = reloc = NULL;
852 852
853#define SITE(x) \ 853#define SITE(op, x) \
854 case PARAVIRT_PATCH(x): \ 854 case PARAVIRT_PATCH(op.x): \
855 if (have_vcpu_info_placement) { \ 855 if (have_vcpu_info_placement) { \
856 start = (char *)xen_##x##_direct; \ 856 start = (char *)xen_##x##_direct; \
857 end = xen_##x##_direct_end; \ 857 end = xen_##x##_direct_end; \
@@ -860,10 +860,10 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
860 goto patch_site 860 goto patch_site
861 861
862 switch (type) { 862 switch (type) {
863 SITE(irq_enable); 863 SITE(pv_irq_ops, irq_enable);
864 SITE(irq_disable); 864 SITE(pv_irq_ops, irq_disable);
865 SITE(save_fl); 865 SITE(pv_irq_ops, save_fl);
866 SITE(restore_fl); 866 SITE(pv_irq_ops, restore_fl);
867#undef SITE 867#undef SITE
868 868
869 patch_site: 869 patch_site:
@@ -895,26 +895,32 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
895 return ret; 895 return ret;
896} 896}
897 897
898static const struct paravirt_ops xen_paravirt_ops __initdata = { 898static const struct pv_info xen_info __initdata = {
899 .paravirt_enabled = 1, 899 .paravirt_enabled = 1,
900 .shared_kernel_pmd = 0, 900 .shared_kernel_pmd = 0,
901 901
902 .name = "Xen", 902 .name = "Xen",
903 .banner = xen_banner, 903};
904 904
905static const struct pv_init_ops xen_init_ops __initdata = {
905 .patch = xen_patch, 906 .patch = xen_patch,
906 907
908 .banner = xen_banner,
907 .memory_setup = xen_memory_setup, 909 .memory_setup = xen_memory_setup,
908 .arch_setup = xen_arch_setup, 910 .arch_setup = xen_arch_setup,
909 .init_IRQ = xen_init_IRQ,
910 .post_allocator_init = xen_mark_init_mm_pinned, 911 .post_allocator_init = xen_mark_init_mm_pinned,
912};
911 913
914static const struct pv_time_ops xen_time_ops __initdata = {
912 .time_init = xen_time_init, 915 .time_init = xen_time_init,
916
913 .set_wallclock = xen_set_wallclock, 917 .set_wallclock = xen_set_wallclock,
914 .get_wallclock = xen_get_wallclock, 918 .get_wallclock = xen_get_wallclock,
915 .get_cpu_khz = xen_cpu_khz, 919 .get_cpu_khz = xen_cpu_khz,
916 .sched_clock = xen_sched_clock, 920 .sched_clock = xen_sched_clock,
921};
917 922
923static const struct pv_cpu_ops xen_cpu_ops __initdata = {
918 .cpuid = xen_cpuid, 924 .cpuid = xen_cpuid,
919 925
920 .set_debugreg = xen_set_debugreg, 926 .set_debugreg = xen_set_debugreg,
@@ -925,22 +931,10 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
925 .read_cr0 = native_read_cr0, 931 .read_cr0 = native_read_cr0,
926 .write_cr0 = native_write_cr0, 932 .write_cr0 = native_write_cr0,
927 933
928 .read_cr2 = xen_read_cr2,
929 .write_cr2 = xen_write_cr2,
930
931 .read_cr3 = xen_read_cr3,
932 .write_cr3 = xen_write_cr3,
933
934 .read_cr4 = native_read_cr4, 934 .read_cr4 = native_read_cr4,
935 .read_cr4_safe = native_read_cr4_safe, 935 .read_cr4_safe = native_read_cr4_safe,
936 .write_cr4 = xen_write_cr4, 936 .write_cr4 = xen_write_cr4,
937 937
938 .save_fl = xen_save_fl,
939 .restore_fl = xen_restore_fl,
940 .irq_disable = xen_irq_disable,
941 .irq_enable = xen_irq_enable,
942 .safe_halt = xen_safe_halt,
943 .halt = xen_halt,
944 .wbinvd = native_wbinvd, 938 .wbinvd = native_wbinvd,
945 939
946 .read_msr = native_read_msr_safe, 940 .read_msr = native_read_msr_safe,
@@ -968,7 +962,19 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
968 962
969 .set_iopl_mask = xen_set_iopl_mask, 963 .set_iopl_mask = xen_set_iopl_mask,
970 .io_delay = xen_io_delay, 964 .io_delay = xen_io_delay,
965};
966
967static const struct pv_irq_ops xen_irq_ops __initdata = {
968 .init_IRQ = xen_init_IRQ,
969 .save_fl = xen_save_fl,
970 .restore_fl = xen_restore_fl,
971 .irq_disable = xen_irq_disable,
972 .irq_enable = xen_irq_enable,
973 .safe_halt = xen_safe_halt,
974 .halt = xen_halt,
975};
971 976
977static const struct pv_apic_ops xen_apic_ops __initdata = {
972#ifdef CONFIG_X86_LOCAL_APIC 978#ifdef CONFIG_X86_LOCAL_APIC
973 .apic_write = xen_apic_write, 979 .apic_write = xen_apic_write,
974 .apic_write_atomic = xen_apic_write, 980 .apic_write_atomic = xen_apic_write,
@@ -977,6 +983,17 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
977 .setup_secondary_clock = paravirt_nop, 983 .setup_secondary_clock = paravirt_nop,
978 .startup_ipi_hook = paravirt_nop, 984 .startup_ipi_hook = paravirt_nop,
979#endif 985#endif
986};
987
988static const struct pv_mmu_ops xen_mmu_ops __initdata = {
989 .pagetable_setup_start = xen_pagetable_setup_start,
990 .pagetable_setup_done = xen_pagetable_setup_done,
991
992 .read_cr2 = xen_read_cr2,
993 .write_cr2 = xen_write_cr2,
994
995 .read_cr3 = xen_read_cr3,
996 .write_cr3 = xen_write_cr3,
980 997
981 .flush_tlb_user = xen_flush_tlb, 998 .flush_tlb_user = xen_flush_tlb,
982 .flush_tlb_kernel = xen_flush_tlb, 999 .flush_tlb_kernel = xen_flush_tlb,
@@ -986,9 +1003,6 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
986 .pte_update = paravirt_nop, 1003 .pte_update = paravirt_nop,
987 .pte_update_defer = paravirt_nop, 1004 .pte_update_defer = paravirt_nop,
988 1005
989 .pagetable_setup_start = xen_pagetable_setup_start,
990 .pagetable_setup_done = xen_pagetable_setup_done,
991
992 .alloc_pt = xen_alloc_pt_init, 1006 .alloc_pt = xen_alloc_pt_init,
993 .release_pt = xen_release_pt, 1007 .release_pt = xen_release_pt,
994 .alloc_pd = paravirt_nop, 1008 .alloc_pd = paravirt_nop,
@@ -1023,7 +1037,9 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
1023 .activate_mm = xen_activate_mm, 1037 .activate_mm = xen_activate_mm,
1024 .dup_mmap = xen_dup_mmap, 1038 .dup_mmap = xen_dup_mmap,
1025 .exit_mmap = xen_exit_mmap, 1039 .exit_mmap = xen_exit_mmap,
1040};
1026 1041
1042static const struct pv_misc_ops xen_misc_ops __initdata = {
1027 .set_lazy_mode = xen_set_lazy_mode, 1043 .set_lazy_mode = xen_set_lazy_mode,
1028}; 1044};
1029 1045
@@ -1091,7 +1107,15 @@ asmlinkage void __init xen_start_kernel(void)
1091 BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0); 1107 BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0);
1092 1108
1093 /* Install Xen paravirt ops */ 1109 /* Install Xen paravirt ops */
1094 paravirt_ops = xen_paravirt_ops; 1110 pv_info = xen_info;
1111 pv_init_ops = xen_init_ops;
1112 pv_time_ops = xen_time_ops;
1113 pv_cpu_ops = xen_cpu_ops;
1114 pv_irq_ops = xen_irq_ops;
1115 pv_apic_ops = xen_apic_ops;
1116 pv_mmu_ops = xen_mmu_ops;
1117 pv_misc_ops = xen_misc_ops;
1118
1095 machine_ops = xen_machine_ops; 1119 machine_ops = xen_machine_ops;
1096 1120
1097#ifdef CONFIG_SMP 1121#ifdef CONFIG_SMP
@@ -1124,9 +1148,9 @@ asmlinkage void __init xen_start_kernel(void)
1124 xen_setup_vcpu_info_placement(); 1148 xen_setup_vcpu_info_placement();
1125#endif 1149#endif
1126 1150
1127 paravirt_ops.kernel_rpl = 1; 1151 pv_info.kernel_rpl = 1;
1128 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1152 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1129 paravirt_ops.kernel_rpl = 0; 1153 pv_info.kernel_rpl = 0;
1130 1154
1131 /* set the limit of our address space */ 1155 /* set the limit of our address space */
1132 reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE); 1156 reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE);