diff options
author | Jeremy Fitzhardinge <jeremy@xensource.com> | 2007-10-16 14:51:29 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-10-16 14:51:29 -0400 |
commit | 93b1eab3d29e7ea32ee583de3362da84db06ded8 (patch) | |
tree | 8dc7eb61d4c65a48f9ce21a49e392f4967185cfd | |
parent | ab9c232286c2b77be78441c2d8396500b045777e (diff) |
paravirt: refactor struct paravirt_ops into smaller pv_*_ops
This patch refactors the paravirt_ops structure into groups of
functionally related ops:
pv_info - random info, rather than function entrypoints
pv_init_ops - functions used at boot time (some for module_init too)
pv_misc_ops - lazy mode, which didn't fit well anywhere else
pv_time_ops - time-related functions
pv_cpu_ops - various privileged instruction ops
pv_irq_ops - operations for managing interrupt state
pv_apic_ops - APIC operations
pv_mmu_ops - operations for managing pagetables
There are several motivations for this:
1. Some of these ops will be general to all x86, and some will be
i386/x86-64 specific. This makes it easier to share common stuff
while allowing separate implementations where needed.
2. At the moment we must export all of paravirt_ops, but modules only
need selected parts of it. This allows us to export on a case by case
basis (and also choose which export license we want to apply).
3. Functional groupings make things a bit more readable.
Struct paravirt_ops is now only used as a template to generate
patch-site identifiers, and to extract function pointers for inserting
into jmp/calls when patching. It is only instantiated when needed.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
-rw-r--r-- | arch/x86/kernel/alternative.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/asm-offsets_32.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_32.c | 174 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 164 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 98 | ||||
-rw-r--r-- | drivers/char/hvc_lguest.c | 2 | ||||
-rw-r--r-- | drivers/lguest/core.c | 6 | ||||
-rw-r--r-- | drivers/lguest/lguest.c | 124 | ||||
-rw-r--r-- | drivers/lguest/lguest_bus.c | 2 | ||||
-rw-r--r-- | include/asm-x86/paravirt.h | 457 | ||||
-rw-r--r-- | include/asm-x86/pgtable-3level-defs.h | 2 |
12 files changed, 587 insertions, 462 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index bd72d94e713e..63c55148dd05 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -368,8 +368,8 @@ void apply_paravirt(struct paravirt_patch_site *start, | |||
368 | BUG_ON(p->len > MAX_PATCH_LEN); | 368 | BUG_ON(p->len > MAX_PATCH_LEN); |
369 | /* prep the buffer with the original instructions */ | 369 | /* prep the buffer with the original instructions */ |
370 | memcpy(insnbuf, p->instr, p->len); | 370 | memcpy(insnbuf, p->instr, p->len); |
371 | used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf, | 371 | used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, |
372 | (unsigned long)p->instr, p->len); | 372 | (unsigned long)p->instr, p->len); |
373 | 373 | ||
374 | BUG_ON(used > p->len); | 374 | BUG_ON(used > p->len); |
375 | 375 | ||
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 8029742c0fc1..f1b7cdda82b3 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -116,12 +116,14 @@ void foo(void) | |||
116 | 116 | ||
117 | #ifdef CONFIG_PARAVIRT | 117 | #ifdef CONFIG_PARAVIRT |
118 | BLANK(); | 118 | BLANK(); |
119 | OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled); | 119 | OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); |
120 | OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable); | 120 | OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); |
121 | OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable); | 121 | OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); |
122 | OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit); | 122 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); |
123 | OFFSET(PARAVIRT_iret, paravirt_ops, iret); | 123 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); |
124 | OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0); | 124 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); |
125 | OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); | ||
126 | OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); | ||
125 | #endif | 127 | #endif |
126 | 128 | ||
127 | #ifdef CONFIG_XEN | 129 | #ifdef CONFIG_XEN |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 290b7bc82da3..1f2062e94d82 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -434,7 +434,7 @@ ldt_ss: | |||
434 | * is still available to implement the setting of the high | 434 | * is still available to implement the setting of the high |
435 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | 435 | * 16-bits in the INTERRUPT_RETURN paravirt-op. |
436 | */ | 436 | */ |
437 | cmpl $0, paravirt_ops+PARAVIRT_enabled | 437 | cmpl $0, pv_info+PARAVIRT_enabled |
438 | jne restore_nocheck | 438 | jne restore_nocheck |
439 | #endif | 439 | #endif |
440 | 440 | ||
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c index 739cfb207dd7..fa412515af79 100644 --- a/arch/x86/kernel/paravirt_32.c +++ b/arch/x86/kernel/paravirt_32.c | |||
@@ -42,32 +42,33 @@ void _paravirt_nop(void) | |||
42 | static void __init default_banner(void) | 42 | static void __init default_banner(void) |
43 | { | 43 | { |
44 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | 44 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", |
45 | paravirt_ops.name); | 45 | pv_info.name); |
46 | } | 46 | } |
47 | 47 | ||
48 | char *memory_setup(void) | 48 | char *memory_setup(void) |
49 | { | 49 | { |
50 | return paravirt_ops.memory_setup(); | 50 | return pv_init_ops.memory_setup(); |
51 | } | 51 | } |
52 | 52 | ||
53 | /* Simple instruction patching code. */ | 53 | /* Simple instruction patching code. */ |
54 | #define DEF_NATIVE(name, code) \ | 54 | #define DEF_NATIVE(ops, name, code) \ |
55 | extern const char start_##name[], end_##name[]; \ | 55 | extern const char start_##ops##_##name[], end_##ops##_##name[]; \ |
56 | asm("start_" #name ": " code "; end_" #name ":") | 56 | asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") |
57 | 57 | ||
58 | DEF_NATIVE(irq_disable, "cli"); | 58 | DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); |
59 | DEF_NATIVE(irq_enable, "sti"); | 59 | DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); |
60 | DEF_NATIVE(restore_fl, "push %eax; popf"); | 60 | DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); |
61 | DEF_NATIVE(save_fl, "pushf; pop %eax"); | 61 | DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); |
62 | DEF_NATIVE(iret, "iret"); | 62 | DEF_NATIVE(pv_cpu_ops, iret, "iret"); |
63 | DEF_NATIVE(irq_enable_sysexit, "sti; sysexit"); | 63 | DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit"); |
64 | DEF_NATIVE(read_cr2, "mov %cr2, %eax"); | 64 | DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); |
65 | DEF_NATIVE(write_cr3, "mov %eax, %cr3"); | 65 | DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); |
66 | DEF_NATIVE(read_cr3, "mov %cr3, %eax"); | 66 | DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); |
67 | DEF_NATIVE(clts, "clts"); | 67 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); |
68 | DEF_NATIVE(read_tsc, "rdtsc"); | 68 | DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); |
69 | 69 | ||
70 | DEF_NATIVE(ud2a, "ud2a"); | 70 | /* Undefined instruction for dealing with missing ops pointers. */ |
71 | static const unsigned char ud2a[] = { 0x0f, 0x0b }; | ||
71 | 72 | ||
72 | static unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | 73 | static unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
73 | unsigned long addr, unsigned len) | 74 | unsigned long addr, unsigned len) |
@@ -76,37 +77,29 @@ static unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
76 | unsigned ret; | 77 | unsigned ret; |
77 | 78 | ||
78 | switch(type) { | 79 | switch(type) { |
79 | #define SITE(x) case PARAVIRT_PATCH(x): start = start_##x; end = end_##x; goto patch_site | 80 | #define SITE(ops, x) \ |
80 | SITE(irq_disable); | 81 | case PARAVIRT_PATCH(ops.x): \ |
81 | SITE(irq_enable); | 82 | start = start_##ops##_##x; \ |
82 | SITE(restore_fl); | 83 | end = end_##ops##_##x; \ |
83 | SITE(save_fl); | 84 | goto patch_site |
84 | SITE(iret); | 85 | |
85 | SITE(irq_enable_sysexit); | 86 | SITE(pv_irq_ops, irq_disable); |
86 | SITE(read_cr2); | 87 | SITE(pv_irq_ops, irq_enable); |
87 | SITE(read_cr3); | 88 | SITE(pv_irq_ops, restore_fl); |
88 | SITE(write_cr3); | 89 | SITE(pv_irq_ops, save_fl); |
89 | SITE(clts); | 90 | SITE(pv_cpu_ops, iret); |
90 | SITE(read_tsc); | 91 | SITE(pv_cpu_ops, irq_enable_sysexit); |
92 | SITE(pv_mmu_ops, read_cr2); | ||
93 | SITE(pv_mmu_ops, read_cr3); | ||
94 | SITE(pv_mmu_ops, write_cr3); | ||
95 | SITE(pv_cpu_ops, clts); | ||
96 | SITE(pv_cpu_ops, read_tsc); | ||
91 | #undef SITE | 97 | #undef SITE |
92 | 98 | ||
93 | patch_site: | 99 | patch_site: |
94 | ret = paravirt_patch_insns(ibuf, len, start, end); | 100 | ret = paravirt_patch_insns(ibuf, len, start, end); |
95 | break; | 101 | break; |
96 | 102 | ||
97 | case PARAVIRT_PATCH(make_pgd): | ||
98 | case PARAVIRT_PATCH(make_pte): | ||
99 | case PARAVIRT_PATCH(pgd_val): | ||
100 | case PARAVIRT_PATCH(pte_val): | ||
101 | #ifdef CONFIG_X86_PAE | ||
102 | case PARAVIRT_PATCH(make_pmd): | ||
103 | case PARAVIRT_PATCH(pmd_val): | ||
104 | #endif | ||
105 | /* These functions end up returning exactly what | ||
106 | they're passed, in the same registers. */ | ||
107 | ret = paravirt_patch_nop(); | ||
108 | break; | ||
109 | |||
110 | default: | 103 | default: |
111 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); | 104 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
112 | break; | 105 | break; |
@@ -150,7 +143,7 @@ unsigned paravirt_patch_call(void *insnbuf, | |||
150 | return 5; | 143 | return 5; |
151 | } | 144 | } |
152 | 145 | ||
153 | unsigned paravirt_patch_jmp(const void *target, void *insnbuf, | 146 | unsigned paravirt_patch_jmp(void *insnbuf, const void *target, |
154 | unsigned long addr, unsigned len) | 147 | unsigned long addr, unsigned len) |
155 | { | 148 | { |
156 | struct branch *b = insnbuf; | 149 | struct branch *b = insnbuf; |
@@ -165,22 +158,38 @@ unsigned paravirt_patch_jmp(const void *target, void *insnbuf, | |||
165 | return 5; | 158 | return 5; |
166 | } | 159 | } |
167 | 160 | ||
161 | /* Neat trick to map patch type back to the call within the | ||
162 | * corresponding structure. */ | ||
163 | static void *get_call_destination(u8 type) | ||
164 | { | ||
165 | struct paravirt_patch_template tmpl = { | ||
166 | .pv_init_ops = pv_init_ops, | ||
167 | .pv_misc_ops = pv_misc_ops, | ||
168 | .pv_time_ops = pv_time_ops, | ||
169 | .pv_cpu_ops = pv_cpu_ops, | ||
170 | .pv_irq_ops = pv_irq_ops, | ||
171 | .pv_apic_ops = pv_apic_ops, | ||
172 | .pv_mmu_ops = pv_mmu_ops, | ||
173 | }; | ||
174 | return *((void **)&tmpl + type); | ||
175 | } | ||
176 | |||
168 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | 177 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, |
169 | unsigned long addr, unsigned len) | 178 | unsigned long addr, unsigned len) |
170 | { | 179 | { |
171 | void *opfunc = *((void **)¶virt_ops + type); | 180 | void *opfunc = get_call_destination(type); |
172 | unsigned ret; | 181 | unsigned ret; |
173 | 182 | ||
174 | if (opfunc == NULL) | 183 | if (opfunc == NULL) |
175 | /* If there's no function, patch it with a ud2a (BUG) */ | 184 | /* If there's no function, patch it with a ud2a (BUG) */ |
176 | ret = paravirt_patch_insns(insnbuf, len, start_ud2a, end_ud2a); | 185 | ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); |
177 | else if (opfunc == paravirt_nop) | 186 | else if (opfunc == paravirt_nop) |
178 | /* If the operation is a nop, then nop the callsite */ | 187 | /* If the operation is a nop, then nop the callsite */ |
179 | ret = paravirt_patch_nop(); | 188 | ret = paravirt_patch_nop(); |
180 | else if (type == PARAVIRT_PATCH(iret) || | 189 | else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || |
181 | type == PARAVIRT_PATCH(irq_enable_sysexit)) | 190 | type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit)) |
182 | /* If operation requires a jmp, then jmp */ | 191 | /* If operation requires a jmp, then jmp */ |
183 | ret = paravirt_patch_jmp(opfunc, insnbuf, addr, len); | 192 | ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); |
184 | else | 193 | else |
185 | /* Otherwise call the function; assume target could | 194 | /* Otherwise call the function; assume target could |
186 | clobber any caller-save reg */ | 195 | clobber any caller-save reg */ |
@@ -205,7 +214,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | |||
205 | 214 | ||
206 | void init_IRQ(void) | 215 | void init_IRQ(void) |
207 | { | 216 | { |
208 | paravirt_ops.init_IRQ(); | 217 | pv_irq_ops.init_IRQ(); |
209 | } | 218 | } |
210 | 219 | ||
211 | static void native_flush_tlb(void) | 220 | static void native_flush_tlb(void) |
@@ -233,7 +242,7 @@ extern void native_irq_enable_sysexit(void); | |||
233 | 242 | ||
234 | static int __init print_banner(void) | 243 | static int __init print_banner(void) |
235 | { | 244 | { |
236 | paravirt_ops.banner(); | 245 | pv_init_ops.banner(); |
237 | return 0; | 246 | return 0; |
238 | } | 247 | } |
239 | core_initcall(print_banner); | 248 | core_initcall(print_banner); |
@@ -273,47 +282,53 @@ int paravirt_disable_iospace(void) | |||
273 | return ret; | 282 | return ret; |
274 | } | 283 | } |
275 | 284 | ||
276 | struct paravirt_ops paravirt_ops = { | 285 | struct pv_info pv_info = { |
277 | .name = "bare hardware", | 286 | .name = "bare hardware", |
278 | .paravirt_enabled = 0, | 287 | .paravirt_enabled = 0, |
279 | .kernel_rpl = 0, | 288 | .kernel_rpl = 0, |
280 | .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ | 289 | .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ |
290 | }; | ||
281 | 291 | ||
282 | .patch = native_patch, | 292 | struct pv_init_ops pv_init_ops = { |
293 | .patch = native_patch, | ||
283 | .banner = default_banner, | 294 | .banner = default_banner, |
284 | .arch_setup = paravirt_nop, | 295 | .arch_setup = paravirt_nop, |
285 | .memory_setup = machine_specific_memory_setup, | 296 | .memory_setup = machine_specific_memory_setup, |
297 | }; | ||
298 | |||
299 | struct pv_time_ops pv_time_ops = { | ||
300 | .time_init = hpet_time_init, | ||
286 | .get_wallclock = native_get_wallclock, | 301 | .get_wallclock = native_get_wallclock, |
287 | .set_wallclock = native_set_wallclock, | 302 | .set_wallclock = native_set_wallclock, |
288 | .time_init = hpet_time_init, | 303 | .sched_clock = native_sched_clock, |
304 | .get_cpu_khz = native_calculate_cpu_khz, | ||
305 | }; | ||
306 | |||
307 | struct pv_irq_ops pv_irq_ops = { | ||
289 | .init_IRQ = native_init_IRQ, | 308 | .init_IRQ = native_init_IRQ, |
309 | .save_fl = native_save_fl, | ||
310 | .restore_fl = native_restore_fl, | ||
311 | .irq_disable = native_irq_disable, | ||
312 | .irq_enable = native_irq_enable, | ||
313 | .safe_halt = native_safe_halt, | ||
314 | .halt = native_halt, | ||
315 | }; | ||
290 | 316 | ||
317 | struct pv_cpu_ops pv_cpu_ops = { | ||
291 | .cpuid = native_cpuid, | 318 | .cpuid = native_cpuid, |
292 | .get_debugreg = native_get_debugreg, | 319 | .get_debugreg = native_get_debugreg, |
293 | .set_debugreg = native_set_debugreg, | 320 | .set_debugreg = native_set_debugreg, |
294 | .clts = native_clts, | 321 | .clts = native_clts, |
295 | .read_cr0 = native_read_cr0, | 322 | .read_cr0 = native_read_cr0, |
296 | .write_cr0 = native_write_cr0, | 323 | .write_cr0 = native_write_cr0, |
297 | .read_cr2 = native_read_cr2, | ||
298 | .write_cr2 = native_write_cr2, | ||
299 | .read_cr3 = native_read_cr3, | ||
300 | .write_cr3 = native_write_cr3, | ||
301 | .read_cr4 = native_read_cr4, | 324 | .read_cr4 = native_read_cr4, |
302 | .read_cr4_safe = native_read_cr4_safe, | 325 | .read_cr4_safe = native_read_cr4_safe, |
303 | .write_cr4 = native_write_cr4, | 326 | .write_cr4 = native_write_cr4, |
304 | .save_fl = native_save_fl, | ||
305 | .restore_fl = native_restore_fl, | ||
306 | .irq_disable = native_irq_disable, | ||
307 | .irq_enable = native_irq_enable, | ||
308 | .safe_halt = native_safe_halt, | ||
309 | .halt = native_halt, | ||
310 | .wbinvd = native_wbinvd, | 327 | .wbinvd = native_wbinvd, |
311 | .read_msr = native_read_msr_safe, | 328 | .read_msr = native_read_msr_safe, |
312 | .write_msr = native_write_msr_safe, | 329 | .write_msr = native_write_msr_safe, |
313 | .read_tsc = native_read_tsc, | 330 | .read_tsc = native_read_tsc, |
314 | .read_pmc = native_read_pmc, | 331 | .read_pmc = native_read_pmc, |
315 | .sched_clock = native_sched_clock, | ||
316 | .get_cpu_khz = native_calculate_cpu_khz, | ||
317 | .load_tr_desc = native_load_tr_desc, | 332 | .load_tr_desc = native_load_tr_desc, |
318 | .set_ldt = native_set_ldt, | 333 | .set_ldt = native_set_ldt, |
319 | .load_gdt = native_load_gdt, | 334 | .load_gdt = native_load_gdt, |
@@ -327,9 +342,14 @@ struct paravirt_ops paravirt_ops = { | |||
327 | .write_idt_entry = write_dt_entry, | 342 | .write_idt_entry = write_dt_entry, |
328 | .load_esp0 = native_load_esp0, | 343 | .load_esp0 = native_load_esp0, |
329 | 344 | ||
345 | .irq_enable_sysexit = native_irq_enable_sysexit, | ||
346 | .iret = native_iret, | ||
347 | |||
330 | .set_iopl_mask = native_set_iopl_mask, | 348 | .set_iopl_mask = native_set_iopl_mask, |
331 | .io_delay = native_io_delay, | 349 | .io_delay = native_io_delay, |
350 | }; | ||
332 | 351 | ||
352 | struct pv_apic_ops pv_apic_ops = { | ||
333 | #ifdef CONFIG_X86_LOCAL_APIC | 353 | #ifdef CONFIG_X86_LOCAL_APIC |
334 | .apic_write = native_apic_write, | 354 | .apic_write = native_apic_write, |
335 | .apic_write_atomic = native_apic_write_atomic, | 355 | .apic_write_atomic = native_apic_write_atomic, |
@@ -338,11 +358,21 @@ struct paravirt_ops paravirt_ops = { | |||
338 | .setup_secondary_clock = setup_secondary_APIC_clock, | 358 | .setup_secondary_clock = setup_secondary_APIC_clock, |
339 | .startup_ipi_hook = paravirt_nop, | 359 | .startup_ipi_hook = paravirt_nop, |
340 | #endif | 360 | #endif |
361 | }; | ||
362 | |||
363 | struct pv_misc_ops pv_misc_ops = { | ||
341 | .set_lazy_mode = paravirt_nop, | 364 | .set_lazy_mode = paravirt_nop, |
365 | }; | ||
342 | 366 | ||
367 | struct pv_mmu_ops pv_mmu_ops = { | ||
343 | .pagetable_setup_start = native_pagetable_setup_start, | 368 | .pagetable_setup_start = native_pagetable_setup_start, |
344 | .pagetable_setup_done = native_pagetable_setup_done, | 369 | .pagetable_setup_done = native_pagetable_setup_done, |
345 | 370 | ||
371 | .read_cr2 = native_read_cr2, | ||
372 | .write_cr2 = native_write_cr2, | ||
373 | .read_cr3 = native_read_cr3, | ||
374 | .write_cr3 = native_write_cr3, | ||
375 | |||
346 | .flush_tlb_user = native_flush_tlb, | 376 | .flush_tlb_user = native_flush_tlb, |
347 | .flush_tlb_kernel = native_flush_tlb_global, | 377 | .flush_tlb_kernel = native_flush_tlb_global, |
348 | .flush_tlb_single = native_flush_tlb_single, | 378 | .flush_tlb_single = native_flush_tlb_single, |
@@ -381,12 +411,14 @@ struct paravirt_ops paravirt_ops = { | |||
381 | .make_pte = native_make_pte, | 411 | .make_pte = native_make_pte, |
382 | .make_pgd = native_make_pgd, | 412 | .make_pgd = native_make_pgd, |
383 | 413 | ||
384 | .irq_enable_sysexit = native_irq_enable_sysexit, | ||
385 | .iret = native_iret, | ||
386 | |||
387 | .dup_mmap = paravirt_nop, | 414 | .dup_mmap = paravirt_nop, |
388 | .exit_mmap = paravirt_nop, | 415 | .exit_mmap = paravirt_nop, |
389 | .activate_mm = paravirt_nop, | 416 | .activate_mm = paravirt_nop, |
390 | }; | 417 | }; |
391 | 418 | ||
392 | EXPORT_SYMBOL(paravirt_ops); | 419 | EXPORT_SYMBOL_GPL(pv_time_ops); |
420 | EXPORT_SYMBOL_GPL(pv_cpu_ops); | ||
421 | EXPORT_SYMBOL_GPL(pv_mmu_ops); | ||
422 | EXPORT_SYMBOL_GPL(pv_apic_ops); | ||
423 | EXPORT_SYMBOL_GPL(pv_info); | ||
424 | EXPORT_SYMBOL (pv_irq_ops); | ||
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 18673e0f193b..67cea5c2e3e0 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -134,21 +134,21 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, | |||
134 | unsigned long eip, unsigned len) | 134 | unsigned long eip, unsigned len) |
135 | { | 135 | { |
136 | switch (type) { | 136 | switch (type) { |
137 | case PARAVIRT_PATCH(irq_disable): | 137 | case PARAVIRT_PATCH(pv_irq_ops.irq_disable): |
138 | return patch_internal(VMI_CALL_DisableInterrupts, len, | 138 | return patch_internal(VMI_CALL_DisableInterrupts, len, |
139 | insns, eip); | 139 | insns, eip); |
140 | case PARAVIRT_PATCH(irq_enable): | 140 | case PARAVIRT_PATCH(pv_irq_ops.irq_enable): |
141 | return patch_internal(VMI_CALL_EnableInterrupts, len, | 141 | return patch_internal(VMI_CALL_EnableInterrupts, len, |
142 | insns, eip); | 142 | insns, eip); |
143 | case PARAVIRT_PATCH(restore_fl): | 143 | case PARAVIRT_PATCH(pv_irq_ops.restore_fl): |
144 | return patch_internal(VMI_CALL_SetInterruptMask, len, | 144 | return patch_internal(VMI_CALL_SetInterruptMask, len, |
145 | insns, eip); | 145 | insns, eip); |
146 | case PARAVIRT_PATCH(save_fl): | 146 | case PARAVIRT_PATCH(pv_irq_ops.save_fl): |
147 | return patch_internal(VMI_CALL_GetInterruptMask, len, | 147 | return patch_internal(VMI_CALL_GetInterruptMask, len, |
148 | insns, eip); | 148 | insns, eip); |
149 | case PARAVIRT_PATCH(iret): | 149 | case PARAVIRT_PATCH(pv_cpu_ops.iret): |
150 | return patch_internal(VMI_CALL_IRET, len, insns, eip); | 150 | return patch_internal(VMI_CALL_IRET, len, insns, eip); |
151 | case PARAVIRT_PATCH(irq_enable_sysexit): | 151 | case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit): |
152 | return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); | 152 | return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); |
153 | default: | 153 | default: |
154 | break; | 154 | break; |
@@ -690,9 +690,9 @@ do { \ | |||
690 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | 690 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ |
691 | VMI_CALL_##vmicall); \ | 691 | VMI_CALL_##vmicall); \ |
692 | if (rel->type == VMI_RELOCATION_CALL_REL) \ | 692 | if (rel->type == VMI_RELOCATION_CALL_REL) \ |
693 | paravirt_ops.opname = (void *)rel->eip; \ | 693 | opname = (void *)rel->eip; \ |
694 | else if (rel->type == VMI_RELOCATION_NOP) \ | 694 | else if (rel->type == VMI_RELOCATION_NOP) \ |
695 | paravirt_ops.opname = (void *)vmi_nop; \ | 695 | opname = (void *)vmi_nop; \ |
696 | else if (rel->type != VMI_RELOCATION_NONE) \ | 696 | else if (rel->type != VMI_RELOCATION_NONE) \ |
697 | printk(KERN_WARNING "VMI: Unknown relocation " \ | 697 | printk(KERN_WARNING "VMI: Unknown relocation " \ |
698 | "type %d for " #vmicall"\n",\ | 698 | "type %d for " #vmicall"\n",\ |
@@ -712,7 +712,7 @@ do { \ | |||
712 | VMI_CALL_##vmicall); \ | 712 | VMI_CALL_##vmicall); \ |
713 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ | 713 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ |
714 | if (rel->type == VMI_RELOCATION_CALL_REL) { \ | 714 | if (rel->type == VMI_RELOCATION_CALL_REL) { \ |
715 | paravirt_ops.opname = wrapper; \ | 715 | opname = wrapper; \ |
716 | vmi_ops.cache = (void *)rel->eip; \ | 716 | vmi_ops.cache = (void *)rel->eip; \ |
717 | } \ | 717 | } \ |
718 | } while (0) | 718 | } while (0) |
@@ -732,11 +732,11 @@ static inline int __init activate_vmi(void) | |||
732 | } | 732 | } |
733 | savesegment(cs, kernel_cs); | 733 | savesegment(cs, kernel_cs); |
734 | 734 | ||
735 | paravirt_ops.paravirt_enabled = 1; | 735 | pv_info.paravirt_enabled = 1; |
736 | paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; | 736 | pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; |
737 | pv_info.name = "vmi"; | ||
737 | 738 | ||
738 | paravirt_ops.patch = vmi_patch; | 739 | pv_init_ops.patch = vmi_patch; |
739 | paravirt_ops.name = "vmi"; | ||
740 | 740 | ||
741 | /* | 741 | /* |
742 | * Many of these operations are ABI compatible with VMI. | 742 | * Many of these operations are ABI compatible with VMI. |
@@ -754,26 +754,26 @@ static inline int __init activate_vmi(void) | |||
754 | */ | 754 | */ |
755 | 755 | ||
756 | /* CPUID is special, so very special it gets wrapped like a present */ | 756 | /* CPUID is special, so very special it gets wrapped like a present */ |
757 | para_wrap(cpuid, vmi_cpuid, cpuid, CPUID); | 757 | para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID); |
758 | 758 | ||
759 | para_fill(clts, CLTS); | 759 | para_fill(pv_cpu_ops.clts, CLTS); |
760 | para_fill(get_debugreg, GetDR); | 760 | para_fill(pv_cpu_ops.get_debugreg, GetDR); |
761 | para_fill(set_debugreg, SetDR); | 761 | para_fill(pv_cpu_ops.set_debugreg, SetDR); |
762 | para_fill(read_cr0, GetCR0); | 762 | para_fill(pv_cpu_ops.read_cr0, GetCR0); |
763 | para_fill(read_cr2, GetCR2); | 763 | para_fill(pv_mmu_ops.read_cr2, GetCR2); |
764 | para_fill(read_cr3, GetCR3); | 764 | para_fill(pv_mmu_ops.read_cr3, GetCR3); |
765 | para_fill(read_cr4, GetCR4); | 765 | para_fill(pv_cpu_ops.read_cr4, GetCR4); |
766 | para_fill(write_cr0, SetCR0); | 766 | para_fill(pv_cpu_ops.write_cr0, SetCR0); |
767 | para_fill(write_cr2, SetCR2); | 767 | para_fill(pv_mmu_ops.write_cr2, SetCR2); |
768 | para_fill(write_cr3, SetCR3); | 768 | para_fill(pv_mmu_ops.write_cr3, SetCR3); |
769 | para_fill(write_cr4, SetCR4); | 769 | para_fill(pv_cpu_ops.write_cr4, SetCR4); |
770 | para_fill(save_fl, GetInterruptMask); | 770 | para_fill(pv_irq_ops.save_fl, GetInterruptMask); |
771 | para_fill(restore_fl, SetInterruptMask); | 771 | para_fill(pv_irq_ops.restore_fl, SetInterruptMask); |
772 | para_fill(irq_disable, DisableInterrupts); | 772 | para_fill(pv_irq_ops.irq_disable, DisableInterrupts); |
773 | para_fill(irq_enable, EnableInterrupts); | 773 | para_fill(pv_irq_ops.irq_enable, EnableInterrupts); |
774 | 774 | ||
775 | para_fill(wbinvd, WBINVD); | 775 | para_fill(pv_cpu_ops.wbinvd, WBINVD); |
776 | para_fill(read_tsc, RDTSC); | 776 | para_fill(pv_cpu_ops.read_tsc, RDTSC); |
777 | 777 | ||
778 | /* The following we emulate with trap and emulate for now */ | 778 | /* The following we emulate with trap and emulate for now */ |
779 | /* paravirt_ops.read_msr = vmi_rdmsr */ | 779 | /* paravirt_ops.read_msr = vmi_rdmsr */ |
@@ -781,29 +781,29 @@ static inline int __init activate_vmi(void) | |||
781 | /* paravirt_ops.rdpmc = vmi_rdpmc */ | 781 | /* paravirt_ops.rdpmc = vmi_rdpmc */ |
782 | 782 | ||
783 | /* TR interface doesn't pass TR value, wrap */ | 783 | /* TR interface doesn't pass TR value, wrap */ |
784 | para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR); | 784 | para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR); |
785 | 785 | ||
786 | /* LDT is special, too */ | 786 | /* LDT is special, too */ |
787 | para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT); | 787 | para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT); |
788 | 788 | ||
789 | para_fill(load_gdt, SetGDT); | 789 | para_fill(pv_cpu_ops.load_gdt, SetGDT); |
790 | para_fill(load_idt, SetIDT); | 790 | para_fill(pv_cpu_ops.load_idt, SetIDT); |
791 | para_fill(store_gdt, GetGDT); | 791 | para_fill(pv_cpu_ops.store_gdt, GetGDT); |
792 | para_fill(store_idt, GetIDT); | 792 | para_fill(pv_cpu_ops.store_idt, GetIDT); |
793 | para_fill(store_tr, GetTR); | 793 | para_fill(pv_cpu_ops.store_tr, GetTR); |
794 | paravirt_ops.load_tls = vmi_load_tls; | 794 | pv_cpu_ops.load_tls = vmi_load_tls; |
795 | para_fill(write_ldt_entry, WriteLDTEntry); | 795 | para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry); |
796 | para_fill(write_gdt_entry, WriteGDTEntry); | 796 | para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry); |
797 | para_fill(write_idt_entry, WriteIDTEntry); | 797 | para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry); |
798 | para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); | 798 | para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); |
799 | para_fill(set_iopl_mask, SetIOPLMask); | 799 | para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); |
800 | para_fill(io_delay, IODelay); | 800 | para_fill(pv_cpu_ops.io_delay, IODelay); |
801 | para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); | 801 | para_wrap(pv_misc_ops.set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); |
802 | 802 | ||
803 | /* user and kernel flush are just handled with different flags to FlushTLB */ | 803 | /* user and kernel flush are just handled with different flags to FlushTLB */ |
804 | para_wrap(flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); | 804 | para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); |
805 | para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); | 805 | para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); |
806 | para_fill(flush_tlb_single, InvalPage); | 806 | para_fill(pv_mmu_ops.flush_tlb_single, InvalPage); |
807 | 807 | ||
808 | /* | 808 | /* |
809 | * Until a standard flag format can be agreed on, we need to | 809 | * Until a standard flag format can be agreed on, we need to |
@@ -819,41 +819,41 @@ static inline int __init activate_vmi(void) | |||
819 | #endif | 819 | #endif |
820 | 820 | ||
821 | if (vmi_ops.set_pte) { | 821 | if (vmi_ops.set_pte) { |
822 | paravirt_ops.set_pte = vmi_set_pte; | 822 | pv_mmu_ops.set_pte = vmi_set_pte; |
823 | paravirt_ops.set_pte_at = vmi_set_pte_at; | 823 | pv_mmu_ops.set_pte_at = vmi_set_pte_at; |
824 | paravirt_ops.set_pmd = vmi_set_pmd; | 824 | pv_mmu_ops.set_pmd = vmi_set_pmd; |
825 | #ifdef CONFIG_X86_PAE | 825 | #ifdef CONFIG_X86_PAE |
826 | paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; | 826 | pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic; |
827 | paravirt_ops.set_pte_present = vmi_set_pte_present; | 827 | pv_mmu_ops.set_pte_present = vmi_set_pte_present; |
828 | paravirt_ops.set_pud = vmi_set_pud; | 828 | pv_mmu_ops.set_pud = vmi_set_pud; |
829 | paravirt_ops.pte_clear = vmi_pte_clear; | 829 | pv_mmu_ops.pte_clear = vmi_pte_clear; |
830 | paravirt_ops.pmd_clear = vmi_pmd_clear; | 830 | pv_mmu_ops.pmd_clear = vmi_pmd_clear; |
831 | #endif | 831 | #endif |
832 | } | 832 | } |
833 | 833 | ||
834 | if (vmi_ops.update_pte) { | 834 | if (vmi_ops.update_pte) { |
835 | paravirt_ops.pte_update = vmi_update_pte; | 835 | pv_mmu_ops.pte_update = vmi_update_pte; |
836 | paravirt_ops.pte_update_defer = vmi_update_pte_defer; | 836 | pv_mmu_ops.pte_update_defer = vmi_update_pte_defer; |
837 | } | 837 | } |
838 | 838 | ||
839 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | 839 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); |
840 | if (vmi_ops.allocate_page) { | 840 | if (vmi_ops.allocate_page) { |
841 | paravirt_ops.alloc_pt = vmi_allocate_pt; | 841 | pv_mmu_ops.alloc_pt = vmi_allocate_pt; |
842 | paravirt_ops.alloc_pd = vmi_allocate_pd; | 842 | pv_mmu_ops.alloc_pd = vmi_allocate_pd; |
843 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; | 843 | pv_mmu_ops.alloc_pd_clone = vmi_allocate_pd_clone; |
844 | } | 844 | } |
845 | 845 | ||
846 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | 846 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); |
847 | if (vmi_ops.release_page) { | 847 | if (vmi_ops.release_page) { |
848 | paravirt_ops.release_pt = vmi_release_pt; | 848 | pv_mmu_ops.release_pt = vmi_release_pt; |
849 | paravirt_ops.release_pd = vmi_release_pd; | 849 | pv_mmu_ops.release_pd = vmi_release_pd; |
850 | } | 850 | } |
851 | 851 | ||
852 | /* Set linear is needed in all cases */ | 852 | /* Set linear is needed in all cases */ |
853 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); | 853 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); |
854 | #ifdef CONFIG_HIGHPTE | 854 | #ifdef CONFIG_HIGHPTE |
855 | if (vmi_ops.set_linear_mapping) | 855 | if (vmi_ops.set_linear_mapping) |
856 | paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; | 856 | pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; |
857 | #endif | 857 | #endif |
858 | 858 | ||
859 | /* | 859 | /* |
@@ -863,17 +863,17 @@ static inline int __init activate_vmi(void) | |||
863 | * the backend. They are performance critical anyway, so requiring | 863 | * the backend. They are performance critical anyway, so requiring |
864 | * a patch is not a big problem. | 864 | * a patch is not a big problem. |
865 | */ | 865 | */ |
866 | paravirt_ops.irq_enable_sysexit = (void *)0xfeedbab0; | 866 | pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0; |
867 | paravirt_ops.iret = (void *)0xbadbab0; | 867 | pv_cpu_ops.iret = (void *)0xbadbab0; |
868 | 868 | ||
869 | #ifdef CONFIG_SMP | 869 | #ifdef CONFIG_SMP |
870 | para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); | 870 | para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); |
871 | #endif | 871 | #endif |
872 | 872 | ||
873 | #ifdef CONFIG_X86_LOCAL_APIC | 873 | #ifdef CONFIG_X86_LOCAL_APIC |
874 | para_fill(apic_read, APICRead); | 874 | para_fill(pv_apic_ops.apic_read, APICRead); |
875 | para_fill(apic_write, APICWrite); | 875 | para_fill(pv_apic_ops.apic_write, APICWrite); |
876 | para_fill(apic_write_atomic, APICWrite); | 876 | para_fill(pv_apic_ops.apic_write_atomic, APICWrite); |
877 | #endif | 877 | #endif |
878 | 878 | ||
879 | /* | 879 | /* |
@@ -891,15 +891,15 @@ static inline int __init activate_vmi(void) | |||
891 | vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); | 891 | vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); |
892 | vmi_timer_ops.cancel_alarm = | 892 | vmi_timer_ops.cancel_alarm = |
893 | vmi_get_function(VMI_CALL_CancelAlarm); | 893 | vmi_get_function(VMI_CALL_CancelAlarm); |
894 | paravirt_ops.time_init = vmi_time_init; | 894 | pv_time_ops.time_init = vmi_time_init; |
895 | paravirt_ops.get_wallclock = vmi_get_wallclock; | 895 | pv_time_ops.get_wallclock = vmi_get_wallclock; |
896 | paravirt_ops.set_wallclock = vmi_set_wallclock; | 896 | pv_time_ops.set_wallclock = vmi_set_wallclock; |
897 | #ifdef CONFIG_X86_LOCAL_APIC | 897 | #ifdef CONFIG_X86_LOCAL_APIC |
898 | paravirt_ops.setup_boot_clock = vmi_time_bsp_init; | 898 | pv_apic_ops.setup_boot_clock = vmi_time_bsp_init; |
899 | paravirt_ops.setup_secondary_clock = vmi_time_ap_init; | 899 | pv_apic_ops.setup_secondary_clock = vmi_time_ap_init; |
900 | #endif | 900 | #endif |
901 | paravirt_ops.sched_clock = vmi_sched_clock; | 901 | pv_time_ops.sched_clock = vmi_sched_clock; |
902 | paravirt_ops.get_cpu_khz = vmi_cpu_khz; | 902 | pv_time_ops.get_cpu_khz = vmi_cpu_khz; |
903 | 903 | ||
904 | /* We have true wallclock functions; disable CMOS clock sync */ | 904 | /* We have true wallclock functions; disable CMOS clock sync */ |
905 | no_sync_cmos_clock = 1; | 905 | no_sync_cmos_clock = 1; |
@@ -908,7 +908,7 @@ static inline int __init activate_vmi(void) | |||
908 | disable_vmi_timer = 1; | 908 | disable_vmi_timer = 1; |
909 | } | 909 | } |
910 | 910 | ||
911 | para_fill(safe_halt, Halt); | 911 | para_fill(pv_irq_ops.safe_halt, Halt); |
912 | 912 | ||
913 | /* | 913 | /* |
914 | * Alternative instruction rewriting doesn't happen soon enough | 914 | * Alternative instruction rewriting doesn't happen soon enough |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index f01bfcd4bdee..3d3bf05dec7f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -124,7 +124,7 @@ static void __init xen_vcpu_setup(int cpu) | |||
124 | static void __init xen_banner(void) | 124 | static void __init xen_banner(void) |
125 | { | 125 | { |
126 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | 126 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", |
127 | paravirt_ops.name); | 127 | pv_info.name); |
128 | printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); | 128 | printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); |
129 | } | 129 | } |
130 | 130 | ||
@@ -738,7 +738,7 @@ static __init void xen_pagetable_setup_start(pgd_t *base) | |||
738 | pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; | 738 | pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; |
739 | 739 | ||
740 | /* special set_pte for pagetable initialization */ | 740 | /* special set_pte for pagetable initialization */ |
741 | paravirt_ops.set_pte = xen_set_pte_init; | 741 | pv_mmu_ops.set_pte = xen_set_pte_init; |
742 | 742 | ||
743 | init_mm.pgd = base; | 743 | init_mm.pgd = base; |
744 | /* | 744 | /* |
@@ -785,8 +785,8 @@ static __init void xen_pagetable_setup_done(pgd_t *base) | |||
785 | { | 785 | { |
786 | /* This will work as long as patching hasn't happened yet | 786 | /* This will work as long as patching hasn't happened yet |
787 | (which it hasn't) */ | 787 | (which it hasn't) */ |
788 | paravirt_ops.alloc_pt = xen_alloc_pt; | 788 | pv_mmu_ops.alloc_pt = xen_alloc_pt; |
789 | paravirt_ops.set_pte = xen_set_pte; | 789 | pv_mmu_ops.set_pte = xen_set_pte; |
790 | 790 | ||
791 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 791 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
792 | /* | 792 | /* |
@@ -833,12 +833,12 @@ void __init xen_setup_vcpu_info_placement(void) | |||
833 | if (have_vcpu_info_placement) { | 833 | if (have_vcpu_info_placement) { |
834 | printk(KERN_INFO "Xen: using vcpu_info placement\n"); | 834 | printk(KERN_INFO "Xen: using vcpu_info placement\n"); |
835 | 835 | ||
836 | paravirt_ops.save_fl = xen_save_fl_direct; | 836 | pv_irq_ops.save_fl = xen_save_fl_direct; |
837 | paravirt_ops.restore_fl = xen_restore_fl_direct; | 837 | pv_irq_ops.restore_fl = xen_restore_fl_direct; |
838 | paravirt_ops.irq_disable = xen_irq_disable_direct; | 838 | pv_irq_ops.irq_disable = xen_irq_disable_direct; |
839 | paravirt_ops.irq_enable = xen_irq_enable_direct; | 839 | pv_irq_ops.irq_enable = xen_irq_enable_direct; |
840 | paravirt_ops.read_cr2 = xen_read_cr2_direct; | 840 | pv_mmu_ops.read_cr2 = xen_read_cr2_direct; |
841 | paravirt_ops.iret = xen_iret_direct; | 841 | pv_cpu_ops.iret = xen_iret_direct; |
842 | } | 842 | } |
843 | } | 843 | } |
844 | 844 | ||
@@ -850,8 +850,8 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, | |||
850 | 850 | ||
851 | start = end = reloc = NULL; | 851 | start = end = reloc = NULL; |
852 | 852 | ||
853 | #define SITE(x) \ | 853 | #define SITE(op, x) \ |
854 | case PARAVIRT_PATCH(x): \ | 854 | case PARAVIRT_PATCH(op.x): \ |
855 | if (have_vcpu_info_placement) { \ | 855 | if (have_vcpu_info_placement) { \ |
856 | start = (char *)xen_##x##_direct; \ | 856 | start = (char *)xen_##x##_direct; \ |
857 | end = xen_##x##_direct_end; \ | 857 | end = xen_##x##_direct_end; \ |
@@ -860,10 +860,10 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, | |||
860 | goto patch_site | 860 | goto patch_site |
861 | 861 | ||
862 | switch (type) { | 862 | switch (type) { |
863 | SITE(irq_enable); | 863 | SITE(pv_irq_ops, irq_enable); |
864 | SITE(irq_disable); | 864 | SITE(pv_irq_ops, irq_disable); |
865 | SITE(save_fl); | 865 | SITE(pv_irq_ops, save_fl); |
866 | SITE(restore_fl); | 866 | SITE(pv_irq_ops, restore_fl); |
867 | #undef SITE | 867 | #undef SITE |
868 | 868 | ||
869 | patch_site: | 869 | patch_site: |
@@ -895,26 +895,32 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, | |||
895 | return ret; | 895 | return ret; |
896 | } | 896 | } |
897 | 897 | ||
898 | static const struct paravirt_ops xen_paravirt_ops __initdata = { | 898 | static const struct pv_info xen_info __initdata = { |
899 | .paravirt_enabled = 1, | 899 | .paravirt_enabled = 1, |
900 | .shared_kernel_pmd = 0, | 900 | .shared_kernel_pmd = 0, |
901 | 901 | ||
902 | .name = "Xen", | 902 | .name = "Xen", |
903 | .banner = xen_banner, | 903 | }; |
904 | 904 | ||
905 | static const struct pv_init_ops xen_init_ops __initdata = { | ||
905 | .patch = xen_patch, | 906 | .patch = xen_patch, |
906 | 907 | ||
908 | .banner = xen_banner, | ||
907 | .memory_setup = xen_memory_setup, | 909 | .memory_setup = xen_memory_setup, |
908 | .arch_setup = xen_arch_setup, | 910 | .arch_setup = xen_arch_setup, |
909 | .init_IRQ = xen_init_IRQ, | ||
910 | .post_allocator_init = xen_mark_init_mm_pinned, | 911 | .post_allocator_init = xen_mark_init_mm_pinned, |
912 | }; | ||
911 | 913 | ||
914 | static const struct pv_time_ops xen_time_ops __initdata = { | ||
912 | .time_init = xen_time_init, | 915 | .time_init = xen_time_init, |
916 | |||
913 | .set_wallclock = xen_set_wallclock, | 917 | .set_wallclock = xen_set_wallclock, |
914 | .get_wallclock = xen_get_wallclock, | 918 | .get_wallclock = xen_get_wallclock, |
915 | .get_cpu_khz = xen_cpu_khz, | 919 | .get_cpu_khz = xen_cpu_khz, |
916 | .sched_clock = xen_sched_clock, | 920 | .sched_clock = xen_sched_clock, |
921 | }; | ||
917 | 922 | ||
923 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { | ||
918 | .cpuid = xen_cpuid, | 924 | .cpuid = xen_cpuid, |
919 | 925 | ||
920 | .set_debugreg = xen_set_debugreg, | 926 | .set_debugreg = xen_set_debugreg, |
@@ -925,22 +931,10 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = { | |||
925 | .read_cr0 = native_read_cr0, | 931 | .read_cr0 = native_read_cr0, |
926 | .write_cr0 = native_write_cr0, | 932 | .write_cr0 = native_write_cr0, |
927 | 933 | ||
928 | .read_cr2 = xen_read_cr2, | ||
929 | .write_cr2 = xen_write_cr2, | ||
930 | |||
931 | .read_cr3 = xen_read_cr3, | ||
932 | .write_cr3 = xen_write_cr3, | ||
933 | |||
934 | .read_cr4 = native_read_cr4, | 934 | .read_cr4 = native_read_cr4, |
935 | .read_cr4_safe = native_read_cr4_safe, | 935 | .read_cr4_safe = native_read_cr4_safe, |
936 | .write_cr4 = xen_write_cr4, | 936 | .write_cr4 = xen_write_cr4, |
937 | 937 | ||
938 | .save_fl = xen_save_fl, | ||
939 | .restore_fl = xen_restore_fl, | ||
940 | .irq_disable = xen_irq_disable, | ||
941 | .irq_enable = xen_irq_enable, | ||
942 | .safe_halt = xen_safe_halt, | ||
943 | .halt = xen_halt, | ||
944 | .wbinvd = native_wbinvd, | 938 | .wbinvd = native_wbinvd, |
945 | 939 | ||
946 | .read_msr = native_read_msr_safe, | 940 | .read_msr = native_read_msr_safe, |
@@ -968,7 +962,19 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = { | |||
968 | 962 | ||
969 | .set_iopl_mask = xen_set_iopl_mask, | 963 | .set_iopl_mask = xen_set_iopl_mask, |
970 | .io_delay = xen_io_delay, | 964 | .io_delay = xen_io_delay, |
965 | }; | ||
966 | |||
967 | static const struct pv_irq_ops xen_irq_ops __initdata = { | ||
968 | .init_IRQ = xen_init_IRQ, | ||
969 | .save_fl = xen_save_fl, | ||
970 | .restore_fl = xen_restore_fl, | ||
971 | .irq_disable = xen_irq_disable, | ||
972 | .irq_enable = xen_irq_enable, | ||
973 | .safe_halt = xen_safe_halt, | ||
974 | .halt = xen_halt, | ||
975 | }; | ||
971 | 976 | ||
977 | static const struct pv_apic_ops xen_apic_ops __initdata = { | ||
972 | #ifdef CONFIG_X86_LOCAL_APIC | 978 | #ifdef CONFIG_X86_LOCAL_APIC |
973 | .apic_write = xen_apic_write, | 979 | .apic_write = xen_apic_write, |
974 | .apic_write_atomic = xen_apic_write, | 980 | .apic_write_atomic = xen_apic_write, |
@@ -977,6 +983,17 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = { | |||
977 | .setup_secondary_clock = paravirt_nop, | 983 | .setup_secondary_clock = paravirt_nop, |
978 | .startup_ipi_hook = paravirt_nop, | 984 | .startup_ipi_hook = paravirt_nop, |
979 | #endif | 985 | #endif |
986 | }; | ||
987 | |||
988 | static const struct pv_mmu_ops xen_mmu_ops __initdata = { | ||
989 | .pagetable_setup_start = xen_pagetable_setup_start, | ||
990 | .pagetable_setup_done = xen_pagetable_setup_done, | ||
991 | |||
992 | .read_cr2 = xen_read_cr2, | ||
993 | .write_cr2 = xen_write_cr2, | ||
994 | |||
995 | .read_cr3 = xen_read_cr3, | ||
996 | .write_cr3 = xen_write_cr3, | ||
980 | 997 | ||
981 | .flush_tlb_user = xen_flush_tlb, | 998 | .flush_tlb_user = xen_flush_tlb, |
982 | .flush_tlb_kernel = xen_flush_tlb, | 999 | .flush_tlb_kernel = xen_flush_tlb, |
@@ -986,9 +1003,6 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = { | |||
986 | .pte_update = paravirt_nop, | 1003 | .pte_update = paravirt_nop, |
987 | .pte_update_defer = paravirt_nop, | 1004 | .pte_update_defer = paravirt_nop, |
988 | 1005 | ||
989 | .pagetable_setup_start = xen_pagetable_setup_start, | ||
990 | .pagetable_setup_done = xen_pagetable_setup_done, | ||
991 | |||
992 | .alloc_pt = xen_alloc_pt_init, | 1006 | .alloc_pt = xen_alloc_pt_init, |
993 | .release_pt = xen_release_pt, | 1007 | .release_pt = xen_release_pt, |
994 | .alloc_pd = paravirt_nop, | 1008 | .alloc_pd = paravirt_nop, |
@@ -1023,7 +1037,9 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = { | |||
1023 | .activate_mm = xen_activate_mm, | 1037 | .activate_mm = xen_activate_mm, |
1024 | .dup_mmap = xen_dup_mmap, | 1038 | .dup_mmap = xen_dup_mmap, |
1025 | .exit_mmap = xen_exit_mmap, | 1039 | .exit_mmap = xen_exit_mmap, |
1040 | }; | ||
1026 | 1041 | ||
1042 | static const struct pv_misc_ops xen_misc_ops __initdata = { | ||
1027 | .set_lazy_mode = xen_set_lazy_mode, | 1043 | .set_lazy_mode = xen_set_lazy_mode, |
1028 | }; | 1044 | }; |
1029 | 1045 | ||
@@ -1091,7 +1107,15 @@ asmlinkage void __init xen_start_kernel(void) | |||
1091 | BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0); | 1107 | BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0); |
1092 | 1108 | ||
1093 | /* Install Xen paravirt ops */ | 1109 | /* Install Xen paravirt ops */ |
1094 | paravirt_ops = xen_paravirt_ops; | 1110 | pv_info = xen_info; |
1111 | pv_init_ops = xen_init_ops; | ||
1112 | pv_time_ops = xen_time_ops; | ||
1113 | pv_cpu_ops = xen_cpu_ops; | ||
1114 | pv_irq_ops = xen_irq_ops; | ||
1115 | pv_apic_ops = xen_apic_ops; | ||
1116 | pv_mmu_ops = xen_mmu_ops; | ||
1117 | pv_misc_ops = xen_misc_ops; | ||
1118 | |||
1095 | machine_ops = xen_machine_ops; | 1119 | machine_ops = xen_machine_ops; |
1096 | 1120 | ||
1097 | #ifdef CONFIG_SMP | 1121 | #ifdef CONFIG_SMP |
@@ -1124,9 +1148,9 @@ asmlinkage void __init xen_start_kernel(void) | |||
1124 | xen_setup_vcpu_info_placement(); | 1148 | xen_setup_vcpu_info_placement(); |
1125 | #endif | 1149 | #endif |
1126 | 1150 | ||
1127 | paravirt_ops.kernel_rpl = 1; | 1151 | pv_info.kernel_rpl = 1; |
1128 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) | 1152 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) |
1129 | paravirt_ops.kernel_rpl = 0; | 1153 | pv_info.kernel_rpl = 0; |
1130 | 1154 | ||
1131 | /* set the limit of our address space */ | 1155 | /* set the limit of our address space */ |
1132 | reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE); | 1156 | reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE); |
diff --git a/drivers/char/hvc_lguest.c b/drivers/char/hvc_lguest.c index 3d6bd0baa56d..efccb2155830 100644 --- a/drivers/char/hvc_lguest.c +++ b/drivers/char/hvc_lguest.c | |||
@@ -115,7 +115,7 @@ static struct hv_ops lguest_cons = { | |||
115 | * (0), and the struct hv_ops containing the put_chars() function. */ | 115 | * (0), and the struct hv_ops containing the put_chars() function. */ |
116 | static int __init cons_init(void) | 116 | static int __init cons_init(void) |
117 | { | 117 | { |
118 | if (strcmp(paravirt_ops.name, "lguest") != 0) | 118 | if (strcmp(pv_info.name, "lguest") != 0) |
119 | return 0; | 119 | return 0; |
120 | 120 | ||
121 | return hvc_instantiate(0, 0, &lguest_cons); | 121 | return hvc_instantiate(0, 0, &lguest_cons); |
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 4a315f08a567..a0788c12b392 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c | |||
@@ -248,8 +248,8 @@ static void unmap_switcher(void) | |||
248 | } | 248 | } |
249 | 249 | ||
250 | /*H:130 Our Guest is usually so well behaved; it never tries to do things it | 250 | /*H:130 Our Guest is usually so well behaved; it never tries to do things it |
251 | * isn't allowed to. Unfortunately, "struct paravirt_ops" isn't quite | 251 | * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't |
252 | * complete, because it doesn't contain replacements for the Intel I/O | 252 | * quite complete, because it doesn't contain replacements for the Intel I/O |
253 | * instructions. As a result, the Guest sometimes fumbles across one during | 253 | * instructions. As a result, the Guest sometimes fumbles across one during |
254 | * the boot process as it probes for various things which are usually attached | 254 | * the boot process as it probes for various things which are usually attached |
255 | * to a PC. | 255 | * to a PC. |
@@ -694,7 +694,7 @@ static int __init init(void) | |||
694 | 694 | ||
695 | /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */ | 695 | /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */ |
696 | if (paravirt_enabled()) { | 696 | if (paravirt_enabled()) { |
697 | printk("lguest is afraid of %s\n", paravirt_ops.name); | 697 | printk("lguest is afraid of %s\n", pv_info.name); |
698 | return -EPERM; | 698 | return -EPERM; |
699 | } | 699 | } |
700 | 700 | ||
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c index ee1c6d05c3d3..ca9b844f37c2 100644 --- a/drivers/lguest/lguest.c +++ b/drivers/lguest/lguest.c | |||
@@ -23,7 +23,7 @@ | |||
23 | * | 23 | * |
24 | * So how does the kernel know it's a Guest? The Guest starts at a special | 24 | * So how does the kernel know it's a Guest? The Guest starts at a special |
25 | * entry point marked with a magic string, which sets up a few things then | 25 | * entry point marked with a magic string, which sets up a few things then |
26 | * calls here. We replace the native functions in "struct paravirt_ops" | 26 | * calls here. We replace the native functions various "paravirt" structures |
27 | * with our Guest versions, then boot like normal. :*/ | 27 | * with our Guest versions, then boot like normal. :*/ |
28 | 28 | ||
29 | /* | 29 | /* |
@@ -331,7 +331,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) | |||
331 | } | 331 | } |
332 | 332 | ||
333 | /*G:038 That's enough excitement for now, back to ploughing through each of | 333 | /*G:038 That's enough excitement for now, back to ploughing through each of |
334 | * the paravirt_ops (we're about 1/3 of the way through). | 334 | * the different pv_ops structures (we're about 1/3 of the way through). |
335 | * | 335 | * |
336 | * This is the Local Descriptor Table, another weird Intel thingy. Linux only | 336 | * This is the Local Descriptor Table, another weird Intel thingy. Linux only |
337 | * uses this for some strange applications like Wine. We don't do anything | 337 | * uses this for some strange applications like Wine. We don't do anything |
@@ -558,7 +558,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) | |||
558 | lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); | 558 | lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); |
559 | } | 559 | } |
560 | 560 | ||
561 | /* Unfortunately for Lguest, the paravirt_ops for page tables were based on | 561 | /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on |
562 | * native page table operations. On native hardware you can set a new page | 562 | * native page table operations. On native hardware you can set a new page |
563 | * table entry whenever you want, but if you want to remove one you have to do | 563 | * table entry whenever you want, but if you want to remove one you have to do |
564 | * a TLB flush (a TLB is a little cache of page table entries kept by the CPU). | 564 | * a TLB flush (a TLB is a little cache of page table entries kept by the CPU). |
@@ -782,7 +782,7 @@ static void lguest_time_init(void) | |||
782 | clocksource_register(&lguest_clock); | 782 | clocksource_register(&lguest_clock); |
783 | 783 | ||
784 | /* Now we've set up our clock, we can use it as the scheduler clock */ | 784 | /* Now we've set up our clock, we can use it as the scheduler clock */ |
785 | paravirt_ops.sched_clock = lguest_sched_clock; | 785 | pv_time_ops.sched_clock = lguest_sched_clock; |
786 | 786 | ||
787 | /* We can't set cpumask in the initializer: damn C limitations! Set it | 787 | /* We can't set cpumask in the initializer: damn C limitations! Set it |
788 | * here and register our timer device. */ | 788 | * here and register our timer device. */ |
@@ -902,7 +902,7 @@ static __init char *lguest_memory_setup(void) | |||
902 | /*G:050 | 902 | /*G:050 |
903 | * Patching (Powerfully Placating Performance Pedants) | 903 | * Patching (Powerfully Placating Performance Pedants) |
904 | * | 904 | * |
905 | * We have already seen that "struct paravirt_ops" lets us replace simple | 905 | * We have already seen that pv_ops structures let us replace simple |
906 | * native instructions with calls to the appropriate back end all throughout | 906 | * native instructions with calls to the appropriate back end all throughout |
907 | * the kernel. This allows the same kernel to run as a Guest and as a native | 907 | * the kernel. This allows the same kernel to run as a Guest and as a native |
908 | * kernel, but it's slow because of all the indirect branches. | 908 | * kernel, but it's slow because of all the indirect branches. |
@@ -927,10 +927,10 @@ static const struct lguest_insns | |||
927 | { | 927 | { |
928 | const char *start, *end; | 928 | const char *start, *end; |
929 | } lguest_insns[] = { | 929 | } lguest_insns[] = { |
930 | [PARAVIRT_PATCH(irq_disable)] = { lgstart_cli, lgend_cli }, | 930 | [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli }, |
931 | [PARAVIRT_PATCH(irq_enable)] = { lgstart_sti, lgend_sti }, | 931 | [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti }, |
932 | [PARAVIRT_PATCH(restore_fl)] = { lgstart_popf, lgend_popf }, | 932 | [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf }, |
933 | [PARAVIRT_PATCH(save_fl)] = { lgstart_pushf, lgend_pushf }, | 933 | [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, |
934 | }; | 934 | }; |
935 | 935 | ||
936 | /* Now our patch routine is fairly simple (based on the native one in | 936 | /* Now our patch routine is fairly simple (based on the native one in |
@@ -957,9 +957,9 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, | |||
957 | return insn_len; | 957 | return insn_len; |
958 | } | 958 | } |
959 | 959 | ||
960 | /*G:030 Once we get to lguest_init(), we know we're a Guest. The paravirt_ops | 960 | /*G:030 Once we get to lguest_init(), we know we're a Guest. The pv_ops |
961 | * structure in the kernel provides a single point for (almost) every routine | 961 | * structures in the kernel provide points for (almost) every routine we have |
962 | * we have to override to avoid privileged instructions. */ | 962 | * to override to avoid privileged instructions. */ |
963 | __init void lguest_init(void *boot) | 963 | __init void lguest_init(void *boot) |
964 | { | 964 | { |
965 | /* Copy boot parameters first: the Launcher put the physical location | 965 | /* Copy boot parameters first: the Launcher put the physical location |
@@ -974,54 +974,68 @@ __init void lguest_init(void *boot) | |||
974 | 974 | ||
975 | /* We're under lguest, paravirt is enabled, and we're running at | 975 | /* We're under lguest, paravirt is enabled, and we're running at |
976 | * privilege level 1, not 0 as normal. */ | 976 | * privilege level 1, not 0 as normal. */ |
977 | paravirt_ops.name = "lguest"; | 977 | pv_info.name = "lguest"; |
978 | paravirt_ops.paravirt_enabled = 1; | 978 | pv_info.paravirt_enabled = 1; |
979 | paravirt_ops.kernel_rpl = 1; | 979 | pv_info.kernel_rpl = 1; |
980 | 980 | ||
981 | /* We set up all the lguest overrides for sensitive operations. These | 981 | /* We set up all the lguest overrides for sensitive operations. These |
982 | * are detailed with the operations themselves. */ | 982 | * are detailed with the operations themselves. */ |
983 | paravirt_ops.save_fl = save_fl; | 983 | |
984 | paravirt_ops.restore_fl = restore_fl; | 984 | /* interrupt-related operations */ |
985 | paravirt_ops.irq_disable = irq_disable; | 985 | pv_irq_ops.init_IRQ = lguest_init_IRQ; |
986 | paravirt_ops.irq_enable = irq_enable; | 986 | pv_irq_ops.save_fl = save_fl; |
987 | paravirt_ops.load_gdt = lguest_load_gdt; | 987 | pv_irq_ops.restore_fl = restore_fl; |
988 | paravirt_ops.memory_setup = lguest_memory_setup; | 988 | pv_irq_ops.irq_disable = irq_disable; |
989 | paravirt_ops.cpuid = lguest_cpuid; | 989 | pv_irq_ops.irq_enable = irq_enable; |
990 | paravirt_ops.write_cr3 = lguest_write_cr3; | 990 | pv_irq_ops.safe_halt = lguest_safe_halt; |
991 | paravirt_ops.flush_tlb_user = lguest_flush_tlb_user; | 991 | |
992 | paravirt_ops.flush_tlb_single = lguest_flush_tlb_single; | 992 | /* init-time operations */ |
993 | paravirt_ops.flush_tlb_kernel = lguest_flush_tlb_kernel; | 993 | pv_init_ops.memory_setup = lguest_memory_setup; |
994 | paravirt_ops.set_pte = lguest_set_pte; | 994 | pv_init_ops.patch = lguest_patch; |
995 | paravirt_ops.set_pte_at = lguest_set_pte_at; | 995 | |
996 | paravirt_ops.set_pmd = lguest_set_pmd; | 996 | /* Intercepts of various cpu instructions */ |
997 | pv_cpu_ops.load_gdt = lguest_load_gdt; | ||
998 | pv_cpu_ops.cpuid = lguest_cpuid; | ||
999 | pv_cpu_ops.load_idt = lguest_load_idt; | ||
1000 | pv_cpu_ops.iret = lguest_iret; | ||
1001 | pv_cpu_ops.load_esp0 = lguest_load_esp0; | ||
1002 | pv_cpu_ops.load_tr_desc = lguest_load_tr_desc; | ||
1003 | pv_cpu_ops.set_ldt = lguest_set_ldt; | ||
1004 | pv_cpu_ops.load_tls = lguest_load_tls; | ||
1005 | pv_cpu_ops.set_debugreg = lguest_set_debugreg; | ||
1006 | pv_cpu_ops.clts = lguest_clts; | ||
1007 | pv_cpu_ops.read_cr0 = lguest_read_cr0; | ||
1008 | pv_cpu_ops.write_cr0 = lguest_write_cr0; | ||
1009 | pv_cpu_ops.read_cr4 = lguest_read_cr4; | ||
1010 | pv_cpu_ops.write_cr4 = lguest_write_cr4; | ||
1011 | pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry; | ||
1012 | pv_cpu_ops.write_idt_entry = lguest_write_idt_entry; | ||
1013 | pv_cpu_ops.wbinvd = lguest_wbinvd; | ||
1014 | |||
1015 | /* pagetable management */ | ||
1016 | pv_mmu_ops.write_cr3 = lguest_write_cr3; | ||
1017 | pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user; | ||
1018 | pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single; | ||
1019 | pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel; | ||
1020 | pv_mmu_ops.set_pte = lguest_set_pte; | ||
1021 | pv_mmu_ops.set_pte_at = lguest_set_pte_at; | ||
1022 | pv_mmu_ops.set_pmd = lguest_set_pmd; | ||
1023 | pv_mmu_ops.read_cr2 = lguest_read_cr2; | ||
1024 | pv_mmu_ops.read_cr3 = lguest_read_cr3; | ||
1025 | |||
997 | #ifdef CONFIG_X86_LOCAL_APIC | 1026 | #ifdef CONFIG_X86_LOCAL_APIC |
998 | paravirt_ops.apic_write = lguest_apic_write; | 1027 | /* apic read/write intercepts */ |
999 | paravirt_ops.apic_write_atomic = lguest_apic_write; | 1028 | pv_apic_ops.apic_write = lguest_apic_write; |
1000 | paravirt_ops.apic_read = lguest_apic_read; | 1029 | pv_apic_ops.apic_write_atomic = lguest_apic_write; |
1030 | pv_apic_ops.apic_read = lguest_apic_read; | ||
1001 | #endif | 1031 | #endif |
1002 | paravirt_ops.load_idt = lguest_load_idt; | 1032 | |
1003 | paravirt_ops.iret = lguest_iret; | 1033 | /* time operations */ |
1004 | paravirt_ops.load_esp0 = lguest_load_esp0; | 1034 | pv_time_ops.get_wallclock = lguest_get_wallclock; |
1005 | paravirt_ops.load_tr_desc = lguest_load_tr_desc; | 1035 | pv_time_ops.time_init = lguest_time_init; |
1006 | paravirt_ops.set_ldt = lguest_set_ldt; | 1036 | |
1007 | paravirt_ops.load_tls = lguest_load_tls; | 1037 | pv_misc_ops.set_lazy_mode = lguest_lazy_mode; |
1008 | paravirt_ops.set_debugreg = lguest_set_debugreg; | 1038 | |
1009 | paravirt_ops.clts = lguest_clts; | ||
1010 | paravirt_ops.read_cr0 = lguest_read_cr0; | ||
1011 | paravirt_ops.write_cr0 = lguest_write_cr0; | ||
1012 | paravirt_ops.init_IRQ = lguest_init_IRQ; | ||
1013 | paravirt_ops.read_cr2 = lguest_read_cr2; | ||
1014 | paravirt_ops.read_cr3 = lguest_read_cr3; | ||
1015 | paravirt_ops.read_cr4 = lguest_read_cr4; | ||
1016 | paravirt_ops.write_cr4 = lguest_write_cr4; | ||
1017 | paravirt_ops.write_gdt_entry = lguest_write_gdt_entry; | ||
1018 | paravirt_ops.write_idt_entry = lguest_write_idt_entry; | ||
1019 | paravirt_ops.patch = lguest_patch; | ||
1020 | paravirt_ops.safe_halt = lguest_safe_halt; | ||
1021 | paravirt_ops.get_wallclock = lguest_get_wallclock; | ||
1022 | paravirt_ops.time_init = lguest_time_init; | ||
1023 | paravirt_ops.set_lazy_mode = lguest_lazy_mode; | ||
1024 | paravirt_ops.wbinvd = lguest_wbinvd; | ||
1025 | /* Now is a good time to look at the implementations of these functions | 1039 | /* Now is a good time to look at the implementations of these functions |
1026 | * before returning to the rest of lguest_init(). */ | 1040 | * before returning to the rest of lguest_init(). */ |
1027 | 1041 | ||
diff --git a/drivers/lguest/lguest_bus.c b/drivers/lguest/lguest_bus.c index 9e7752cc8002..57329788f8a7 100644 --- a/drivers/lguest/lguest_bus.c +++ b/drivers/lguest/lguest_bus.c | |||
@@ -201,7 +201,7 @@ static void scan_devices(void) | |||
201 | * "struct lguest_device_desc" array. */ | 201 | * "struct lguest_device_desc" array. */ |
202 | static int __init lguest_bus_init(void) | 202 | static int __init lguest_bus_init(void) |
203 | { | 203 | { |
204 | if (strcmp(paravirt_ops.name, "lguest") != 0) | 204 | if (strcmp(pv_info.name, "lguest") != 0) |
205 | return 0; | 205 | return 0; |
206 | 206 | ||
207 | /* Devices are in a single page above top of "normal" mem */ | 207 | /* Devices are in a single page above top of "normal" mem */ |
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 9fa3fa9e62d1..19726e12051e 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -33,19 +33,23 @@ enum paravirt_lazy_mode { | |||
33 | PARAVIRT_LAZY_FLUSH = 3, | 33 | PARAVIRT_LAZY_FLUSH = 3, |
34 | }; | 34 | }; |
35 | 35 | ||
36 | struct paravirt_ops | 36 | |
37 | { | 37 | /* general info */ |
38 | struct pv_info { | ||
38 | unsigned int kernel_rpl; | 39 | unsigned int kernel_rpl; |
39 | int shared_kernel_pmd; | 40 | int shared_kernel_pmd; |
40 | int paravirt_enabled; | 41 | int paravirt_enabled; |
41 | const char *name; | 42 | const char *name; |
43 | }; | ||
42 | 44 | ||
45 | struct pv_init_ops { | ||
43 | /* | 46 | /* |
44 | * Patch may replace one of the defined code sequences with arbitrary | 47 | * Patch may replace one of the defined code sequences with |
45 | * code, subject to the same register constraints. This generally | 48 | * arbitrary code, subject to the same register constraints. |
46 | * means the code is not free to clobber any registers other than EAX. | 49 | * This generally means the code is not free to clobber any |
47 | * The patch function should return the number of bytes of code | 50 | * registers other than EAX. The patch function should return |
48 | * generated, as we nop pad the rest in generic code. | 51 | * the number of bytes of code generated, as we nop pad the |
52 | * rest in generic code. | ||
49 | */ | 53 | */ |
50 | unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, | 54 | unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, |
51 | unsigned long addr, unsigned len); | 55 | unsigned long addr, unsigned len); |
@@ -55,29 +59,28 @@ struct paravirt_ops | |||
55 | char *(*memory_setup)(void); | 59 | char *(*memory_setup)(void); |
56 | void (*post_allocator_init)(void); | 60 | void (*post_allocator_init)(void); |
57 | 61 | ||
58 | void (*init_IRQ)(void); | ||
59 | void (*time_init)(void); | ||
60 | |||
61 | /* | ||
62 | * Called before/after init_mm pagetable setup. setup_start | ||
63 | * may reset %cr3, and may pre-install parts of the pagetable; | ||
64 | * pagetable setup is expected to preserve any existing | ||
65 | * mapping. | ||
66 | */ | ||
67 | void (*pagetable_setup_start)(pgd_t *pgd_base); | ||
68 | void (*pagetable_setup_done)(pgd_t *pgd_base); | ||
69 | |||
70 | /* Print a banner to identify the environment */ | 62 | /* Print a banner to identify the environment */ |
71 | void (*banner)(void); | 63 | void (*banner)(void); |
64 | }; | ||
65 | |||
66 | |||
67 | struct pv_misc_ops { | ||
68 | /* Set deferred update mode, used for batching operations. */ | ||
69 | void (*set_lazy_mode)(enum paravirt_lazy_mode mode); | ||
70 | }; | ||
71 | |||
72 | struct pv_time_ops { | ||
73 | void (*time_init)(void); | ||
72 | 74 | ||
73 | /* Set and set time of day */ | 75 | /* Set and set time of day */ |
74 | unsigned long (*get_wallclock)(void); | 76 | unsigned long (*get_wallclock)(void); |
75 | int (*set_wallclock)(unsigned long); | 77 | int (*set_wallclock)(unsigned long); |
76 | 78 | ||
77 | /* cpuid emulation, mostly so that caps bits can be disabled */ | 79 | unsigned long long (*sched_clock)(void); |
78 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, | 80 | unsigned long (*get_cpu_khz)(void); |
79 | unsigned int *ecx, unsigned int *edx); | 81 | }; |
80 | 82 | ||
83 | struct pv_cpu_ops { | ||
81 | /* hooks for various privileged instructions */ | 84 | /* hooks for various privileged instructions */ |
82 | unsigned long (*get_debugreg)(int regno); | 85 | unsigned long (*get_debugreg)(int regno); |
83 | void (*set_debugreg)(int regno, unsigned long value); | 86 | void (*set_debugreg)(int regno, unsigned long value); |
@@ -87,41 +90,10 @@ struct paravirt_ops | |||
87 | unsigned long (*read_cr0)(void); | 90 | unsigned long (*read_cr0)(void); |
88 | void (*write_cr0)(unsigned long); | 91 | void (*write_cr0)(unsigned long); |
89 | 92 | ||
90 | unsigned long (*read_cr2)(void); | ||
91 | void (*write_cr2)(unsigned long); | ||
92 | |||
93 | unsigned long (*read_cr3)(void); | ||
94 | void (*write_cr3)(unsigned long); | ||
95 | |||
96 | unsigned long (*read_cr4_safe)(void); | 93 | unsigned long (*read_cr4_safe)(void); |
97 | unsigned long (*read_cr4)(void); | 94 | unsigned long (*read_cr4)(void); |
98 | void (*write_cr4)(unsigned long); | 95 | void (*write_cr4)(unsigned long); |
99 | 96 | ||
100 | /* | ||
101 | * Get/set interrupt state. save_fl and restore_fl are only | ||
102 | * expected to use X86_EFLAGS_IF; all other bits | ||
103 | * returned from save_fl are undefined, and may be ignored by | ||
104 | * restore_fl. | ||
105 | */ | ||
106 | unsigned long (*save_fl)(void); | ||
107 | void (*restore_fl)(unsigned long); | ||
108 | void (*irq_disable)(void); | ||
109 | void (*irq_enable)(void); | ||
110 | void (*safe_halt)(void); | ||
111 | void (*halt)(void); | ||
112 | |||
113 | void (*wbinvd)(void); | ||
114 | |||
115 | /* MSR, PMC and TSR operations. | ||
116 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | ||
117 | u64 (*read_msr)(unsigned int msr, int *err); | ||
118 | int (*write_msr)(unsigned int msr, u64 val); | ||
119 | |||
120 | u64 (*read_tsc)(void); | ||
121 | u64 (*read_pmc)(void); | ||
122 | unsigned long long (*sched_clock)(void); | ||
123 | unsigned long (*get_cpu_khz)(void); | ||
124 | |||
125 | /* Segment descriptor handling */ | 97 | /* Segment descriptor handling */ |
126 | void (*load_tr_desc)(void); | 98 | void (*load_tr_desc)(void); |
127 | void (*load_gdt)(const struct Xgt_desc_struct *); | 99 | void (*load_gdt)(const struct Xgt_desc_struct *); |
@@ -140,18 +112,45 @@ struct paravirt_ops | |||
140 | void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); | 112 | void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); |
141 | 113 | ||
142 | void (*set_iopl_mask)(unsigned mask); | 114 | void (*set_iopl_mask)(unsigned mask); |
115 | |||
116 | void (*wbinvd)(void); | ||
143 | void (*io_delay)(void); | 117 | void (*io_delay)(void); |
144 | 118 | ||
119 | /* cpuid emulation, mostly so that caps bits can be disabled */ | ||
120 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, | ||
121 | unsigned int *ecx, unsigned int *edx); | ||
122 | |||
123 | /* MSR, PMC and TSR operations. | ||
124 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | ||
125 | u64 (*read_msr)(unsigned int msr, int *err); | ||
126 | int (*write_msr)(unsigned int msr, u64 val); | ||
127 | |||
128 | u64 (*read_tsc)(void); | ||
129 | u64 (*read_pmc)(void); | ||
130 | |||
131 | /* These two are jmp to, not actually called. */ | ||
132 | void (*irq_enable_sysexit)(void); | ||
133 | void (*iret)(void); | ||
134 | }; | ||
135 | |||
136 | struct pv_irq_ops { | ||
137 | void (*init_IRQ)(void); | ||
138 | |||
145 | /* | 139 | /* |
146 | * Hooks for intercepting the creation/use/destruction of an | 140 | * Get/set interrupt state. save_fl and restore_fl are only |
147 | * mm_struct. | 141 | * expected to use X86_EFLAGS_IF; all other bits |
142 | * returned from save_fl are undefined, and may be ignored by | ||
143 | * restore_fl. | ||
148 | */ | 144 | */ |
149 | void (*activate_mm)(struct mm_struct *prev, | 145 | unsigned long (*save_fl)(void); |
150 | struct mm_struct *next); | 146 | void (*restore_fl)(unsigned long); |
151 | void (*dup_mmap)(struct mm_struct *oldmm, | 147 | void (*irq_disable)(void); |
152 | struct mm_struct *mm); | 148 | void (*irq_enable)(void); |
153 | void (*exit_mmap)(struct mm_struct *mm); | 149 | void (*safe_halt)(void); |
150 | void (*halt)(void); | ||
151 | }; | ||
154 | 152 | ||
153 | struct pv_apic_ops { | ||
155 | #ifdef CONFIG_X86_LOCAL_APIC | 154 | #ifdef CONFIG_X86_LOCAL_APIC |
156 | /* | 155 | /* |
157 | * Direct APIC operations, principally for VMI. Ideally | 156 | * Direct APIC operations, principally for VMI. Ideally |
@@ -167,6 +166,34 @@ struct paravirt_ops | |||
167 | unsigned long start_eip, | 166 | unsigned long start_eip, |
168 | unsigned long start_esp); | 167 | unsigned long start_esp); |
169 | #endif | 168 | #endif |
169 | }; | ||
170 | |||
171 | struct pv_mmu_ops { | ||
172 | /* | ||
173 | * Called before/after init_mm pagetable setup. setup_start | ||
174 | * may reset %cr3, and may pre-install parts of the pagetable; | ||
175 | * pagetable setup is expected to preserve any existing | ||
176 | * mapping. | ||
177 | */ | ||
178 | void (*pagetable_setup_start)(pgd_t *pgd_base); | ||
179 | void (*pagetable_setup_done)(pgd_t *pgd_base); | ||
180 | |||
181 | unsigned long (*read_cr2)(void); | ||
182 | void (*write_cr2)(unsigned long); | ||
183 | |||
184 | unsigned long (*read_cr3)(void); | ||
185 | void (*write_cr3)(unsigned long); | ||
186 | |||
187 | /* | ||
188 | * Hooks for intercepting the creation/use/destruction of an | ||
189 | * mm_struct. | ||
190 | */ | ||
191 | void (*activate_mm)(struct mm_struct *prev, | ||
192 | struct mm_struct *next); | ||
193 | void (*dup_mmap)(struct mm_struct *oldmm, | ||
194 | struct mm_struct *mm); | ||
195 | void (*exit_mmap)(struct mm_struct *mm); | ||
196 | |||
170 | 197 | ||
171 | /* TLB operations */ | 198 | /* TLB operations */ |
172 | void (*flush_tlb_user)(void); | 199 | void (*flush_tlb_user)(void); |
@@ -191,15 +218,12 @@ struct paravirt_ops | |||
191 | void (*pte_update_defer)(struct mm_struct *mm, | 218 | void (*pte_update_defer)(struct mm_struct *mm, |
192 | unsigned long addr, pte_t *ptep); | 219 | unsigned long addr, pte_t *ptep); |
193 | 220 | ||
194 | #ifdef CONFIG_HIGHPTE | ||
195 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | ||
196 | #endif | ||
197 | |||
198 | #ifdef CONFIG_X86_PAE | 221 | #ifdef CONFIG_X86_PAE |
199 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | 222 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); |
200 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); | 223 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, |
224 | pte_t *ptep, pte_t pte); | ||
201 | void (*set_pud)(pud_t *pudp, pud_t pudval); | 225 | void (*set_pud)(pud_t *pudp, pud_t pudval); |
202 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 226 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
203 | void (*pmd_clear)(pmd_t *pmdp); | 227 | void (*pmd_clear)(pmd_t *pmdp); |
204 | 228 | ||
205 | unsigned long long (*pte_val)(pte_t); | 229 | unsigned long long (*pte_val)(pte_t); |
@@ -217,21 +241,40 @@ struct paravirt_ops | |||
217 | pgd_t (*make_pgd)(unsigned long pgd); | 241 | pgd_t (*make_pgd)(unsigned long pgd); |
218 | #endif | 242 | #endif |
219 | 243 | ||
220 | /* Set deferred update mode, used for batching operations. */ | 244 | #ifdef CONFIG_HIGHPTE |
221 | void (*set_lazy_mode)(enum paravirt_lazy_mode mode); | 245 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); |
246 | #endif | ||
247 | }; | ||
222 | 248 | ||
223 | /* These two are jmp to, not actually called. */ | 249 | /* This contains all the paravirt structures: we get a convenient |
224 | void (*irq_enable_sysexit)(void); | 250 | * number for each function using the offset which we use to indicate |
225 | void (*iret)(void); | 251 | * what to patch. */ |
252 | struct paravirt_patch_template | ||
253 | { | ||
254 | struct pv_init_ops pv_init_ops; | ||
255 | struct pv_misc_ops pv_misc_ops; | ||
256 | struct pv_time_ops pv_time_ops; | ||
257 | struct pv_cpu_ops pv_cpu_ops; | ||
258 | struct pv_irq_ops pv_irq_ops; | ||
259 | struct pv_apic_ops pv_apic_ops; | ||
260 | struct pv_mmu_ops pv_mmu_ops; | ||
226 | }; | 261 | }; |
227 | 262 | ||
228 | extern struct paravirt_ops paravirt_ops; | 263 | extern struct pv_info pv_info; |
264 | extern struct pv_init_ops pv_init_ops; | ||
265 | extern struct pv_misc_ops pv_misc_ops; | ||
266 | extern struct pv_time_ops pv_time_ops; | ||
267 | extern struct pv_cpu_ops pv_cpu_ops; | ||
268 | extern struct pv_irq_ops pv_irq_ops; | ||
269 | extern struct pv_apic_ops pv_apic_ops; | ||
270 | extern struct pv_mmu_ops pv_mmu_ops; | ||
229 | 271 | ||
230 | #define PARAVIRT_PATCH(x) \ | 272 | #define PARAVIRT_PATCH(x) \ |
231 | (offsetof(struct paravirt_ops, x) / sizeof(void *)) | 273 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) |
232 | 274 | ||
233 | #define paravirt_type(type) \ | 275 | #define paravirt_type(op) \ |
234 | [paravirt_typenum] "i" (PARAVIRT_PATCH(type)) | 276 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ |
277 | [paravirt_opptr] "m" (op) | ||
235 | #define paravirt_clobber(clobber) \ | 278 | #define paravirt_clobber(clobber) \ |
236 | [paravirt_clobber] "i" (clobber) | 279 | [paravirt_clobber] "i" (clobber) |
237 | 280 | ||
@@ -258,7 +301,7 @@ unsigned paravirt_patch_call(void *insnbuf, | |||
258 | const void *target, u16 tgt_clobbers, | 301 | const void *target, u16 tgt_clobbers, |
259 | unsigned long addr, u16 site_clobbers, | 302 | unsigned long addr, u16 site_clobbers, |
260 | unsigned len); | 303 | unsigned len); |
261 | unsigned paravirt_patch_jmp(const void *target, void *insnbuf, | 304 | unsigned paravirt_patch_jmp(void *insnbuf, const void *target, |
262 | unsigned long addr, unsigned len); | 305 | unsigned long addr, unsigned len); |
263 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | 306 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, |
264 | unsigned long addr, unsigned len); | 307 | unsigned long addr, unsigned len); |
@@ -271,14 +314,14 @@ int paravirt_disable_iospace(void); | |||
271 | /* | 314 | /* |
272 | * This generates an indirect call based on the operation type number. | 315 | * This generates an indirect call based on the operation type number. |
273 | * The type number, computed in PARAVIRT_PATCH, is derived from the | 316 | * The type number, computed in PARAVIRT_PATCH, is derived from the |
274 | * offset into the paravirt_ops structure, and can therefore be freely | 317 | * offset into the paravirt_patch_template structure, and can therefore be |
275 | * converted back into a structure offset. | 318 | * freely converted back into a structure offset. |
276 | */ | 319 | */ |
277 | #define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);" | 320 | #define PARAVIRT_CALL "call *%[paravirt_opptr];" |
278 | 321 | ||
279 | /* | 322 | /* |
280 | * These macros are intended to wrap calls into a paravirt_ops | 323 | * These macros are intended to wrap calls through one of the paravirt |
281 | * operation, so that they can be later identified and patched at | 324 | * ops structs, so that they can be later identified and patched at |
282 | * runtime. | 325 | * runtime. |
283 | * | 326 | * |
284 | * Normally, a call to a pv_op function is a simple indirect call: | 327 | * Normally, a call to a pv_op function is a simple indirect call: |
@@ -301,7 +344,7 @@ int paravirt_disable_iospace(void); | |||
301 | * The call instruction itself is marked by placing its start address | 344 | * The call instruction itself is marked by placing its start address |
302 | * and size into the .parainstructions section, so that | 345 | * and size into the .parainstructions section, so that |
303 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the | 346 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the |
304 | * appropriate patching under the control of the backend paravirt_ops | 347 | * appropriate patching under the control of the backend pv_init_ops |
305 | * implementation. | 348 | * implementation. |
306 | * | 349 | * |
307 | * Unfortunately there's no way to get gcc to generate the args setup | 350 | * Unfortunately there's no way to get gcc to generate the args setup |
@@ -409,36 +452,36 @@ int paravirt_disable_iospace(void); | |||
409 | 452 | ||
410 | static inline int paravirt_enabled(void) | 453 | static inline int paravirt_enabled(void) |
411 | { | 454 | { |
412 | return paravirt_ops.paravirt_enabled; | 455 | return pv_info.paravirt_enabled; |
413 | } | 456 | } |
414 | 457 | ||
415 | static inline void load_esp0(struct tss_struct *tss, | 458 | static inline void load_esp0(struct tss_struct *tss, |
416 | struct thread_struct *thread) | 459 | struct thread_struct *thread) |
417 | { | 460 | { |
418 | PVOP_VCALL2(load_esp0, tss, thread); | 461 | PVOP_VCALL2(pv_cpu_ops.load_esp0, tss, thread); |
419 | } | 462 | } |
420 | 463 | ||
421 | #define ARCH_SETUP paravirt_ops.arch_setup(); | 464 | #define ARCH_SETUP pv_init_ops.arch_setup(); |
422 | static inline unsigned long get_wallclock(void) | 465 | static inline unsigned long get_wallclock(void) |
423 | { | 466 | { |
424 | return PVOP_CALL0(unsigned long, get_wallclock); | 467 | return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock); |
425 | } | 468 | } |
426 | 469 | ||
427 | static inline int set_wallclock(unsigned long nowtime) | 470 | static inline int set_wallclock(unsigned long nowtime) |
428 | { | 471 | { |
429 | return PVOP_CALL1(int, set_wallclock, nowtime); | 472 | return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime); |
430 | } | 473 | } |
431 | 474 | ||
432 | static inline void (*choose_time_init(void))(void) | 475 | static inline void (*choose_time_init(void))(void) |
433 | { | 476 | { |
434 | return paravirt_ops.time_init; | 477 | return pv_time_ops.time_init; |
435 | } | 478 | } |
436 | 479 | ||
437 | /* The paravirtualized CPUID instruction. */ | 480 | /* The paravirtualized CPUID instruction. */ |
438 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | 481 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, |
439 | unsigned int *ecx, unsigned int *edx) | 482 | unsigned int *ecx, unsigned int *edx) |
440 | { | 483 | { |
441 | PVOP_VCALL4(cpuid, eax, ebx, ecx, edx); | 484 | PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx); |
442 | } | 485 | } |
443 | 486 | ||
444 | /* | 487 | /* |
@@ -446,87 +489,87 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | |||
446 | */ | 489 | */ |
447 | static inline unsigned long paravirt_get_debugreg(int reg) | 490 | static inline unsigned long paravirt_get_debugreg(int reg) |
448 | { | 491 | { |
449 | return PVOP_CALL1(unsigned long, get_debugreg, reg); | 492 | return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg); |
450 | } | 493 | } |
451 | #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) | 494 | #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) |
452 | static inline void set_debugreg(unsigned long val, int reg) | 495 | static inline void set_debugreg(unsigned long val, int reg) |
453 | { | 496 | { |
454 | PVOP_VCALL2(set_debugreg, reg, val); | 497 | PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val); |
455 | } | 498 | } |
456 | 499 | ||
457 | static inline void clts(void) | 500 | static inline void clts(void) |
458 | { | 501 | { |
459 | PVOP_VCALL0(clts); | 502 | PVOP_VCALL0(pv_cpu_ops.clts); |
460 | } | 503 | } |
461 | 504 | ||
462 | static inline unsigned long read_cr0(void) | 505 | static inline unsigned long read_cr0(void) |
463 | { | 506 | { |
464 | return PVOP_CALL0(unsigned long, read_cr0); | 507 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0); |
465 | } | 508 | } |
466 | 509 | ||
467 | static inline void write_cr0(unsigned long x) | 510 | static inline void write_cr0(unsigned long x) |
468 | { | 511 | { |
469 | PVOP_VCALL1(write_cr0, x); | 512 | PVOP_VCALL1(pv_cpu_ops.write_cr0, x); |
470 | } | 513 | } |
471 | 514 | ||
472 | static inline unsigned long read_cr2(void) | 515 | static inline unsigned long read_cr2(void) |
473 | { | 516 | { |
474 | return PVOP_CALL0(unsigned long, read_cr2); | 517 | return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2); |
475 | } | 518 | } |
476 | 519 | ||
477 | static inline void write_cr2(unsigned long x) | 520 | static inline void write_cr2(unsigned long x) |
478 | { | 521 | { |
479 | PVOP_VCALL1(write_cr2, x); | 522 | PVOP_VCALL1(pv_mmu_ops.write_cr2, x); |
480 | } | 523 | } |
481 | 524 | ||
482 | static inline unsigned long read_cr3(void) | 525 | static inline unsigned long read_cr3(void) |
483 | { | 526 | { |
484 | return PVOP_CALL0(unsigned long, read_cr3); | 527 | return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3); |
485 | } | 528 | } |
486 | 529 | ||
487 | static inline void write_cr3(unsigned long x) | 530 | static inline void write_cr3(unsigned long x) |
488 | { | 531 | { |
489 | PVOP_VCALL1(write_cr3, x); | 532 | PVOP_VCALL1(pv_mmu_ops.write_cr3, x); |
490 | } | 533 | } |
491 | 534 | ||
492 | static inline unsigned long read_cr4(void) | 535 | static inline unsigned long read_cr4(void) |
493 | { | 536 | { |
494 | return PVOP_CALL0(unsigned long, read_cr4); | 537 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); |
495 | } | 538 | } |
496 | static inline unsigned long read_cr4_safe(void) | 539 | static inline unsigned long read_cr4_safe(void) |
497 | { | 540 | { |
498 | return PVOP_CALL0(unsigned long, read_cr4_safe); | 541 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe); |
499 | } | 542 | } |
500 | 543 | ||
501 | static inline void write_cr4(unsigned long x) | 544 | static inline void write_cr4(unsigned long x) |
502 | { | 545 | { |
503 | PVOP_VCALL1(write_cr4, x); | 546 | PVOP_VCALL1(pv_cpu_ops.write_cr4, x); |
504 | } | 547 | } |
505 | 548 | ||
506 | static inline void raw_safe_halt(void) | 549 | static inline void raw_safe_halt(void) |
507 | { | 550 | { |
508 | PVOP_VCALL0(safe_halt); | 551 | PVOP_VCALL0(pv_irq_ops.safe_halt); |
509 | } | 552 | } |
510 | 553 | ||
511 | static inline void halt(void) | 554 | static inline void halt(void) |
512 | { | 555 | { |
513 | PVOP_VCALL0(safe_halt); | 556 | PVOP_VCALL0(pv_irq_ops.safe_halt); |
514 | } | 557 | } |
515 | 558 | ||
516 | static inline void wbinvd(void) | 559 | static inline void wbinvd(void) |
517 | { | 560 | { |
518 | PVOP_VCALL0(wbinvd); | 561 | PVOP_VCALL0(pv_cpu_ops.wbinvd); |
519 | } | 562 | } |
520 | 563 | ||
521 | #define get_kernel_rpl() (paravirt_ops.kernel_rpl) | 564 | #define get_kernel_rpl() (pv_info.kernel_rpl) |
522 | 565 | ||
523 | static inline u64 paravirt_read_msr(unsigned msr, int *err) | 566 | static inline u64 paravirt_read_msr(unsigned msr, int *err) |
524 | { | 567 | { |
525 | return PVOP_CALL2(u64, read_msr, msr, err); | 568 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); |
526 | } | 569 | } |
527 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | 570 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) |
528 | { | 571 | { |
529 | return PVOP_CALL3(int, write_msr, msr, low, high); | 572 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); |
530 | } | 573 | } |
531 | 574 | ||
532 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | 575 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
@@ -560,7 +603,7 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | |||
560 | 603 | ||
561 | static inline u64 paravirt_read_tsc(void) | 604 | static inline u64 paravirt_read_tsc(void) |
562 | { | 605 | { |
563 | return PVOP_CALL0(u64, read_tsc); | 606 | return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); |
564 | } | 607 | } |
565 | 608 | ||
566 | #define rdtscl(low) do { \ | 609 | #define rdtscl(low) do { \ |
@@ -572,15 +615,15 @@ static inline u64 paravirt_read_tsc(void) | |||
572 | 615 | ||
573 | static inline unsigned long long paravirt_sched_clock(void) | 616 | static inline unsigned long long paravirt_sched_clock(void) |
574 | { | 617 | { |
575 | return PVOP_CALL0(unsigned long long, sched_clock); | 618 | return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); |
576 | } | 619 | } |
577 | #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) | 620 | #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz()) |
578 | 621 | ||
579 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | 622 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) |
580 | 623 | ||
581 | static inline unsigned long long paravirt_read_pmc(int counter) | 624 | static inline unsigned long long paravirt_read_pmc(int counter) |
582 | { | 625 | { |
583 | return PVOP_CALL1(u64, read_pmc, counter); | 626 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); |
584 | } | 627 | } |
585 | 628 | ||
586 | #define rdpmc(counter,low,high) do { \ | 629 | #define rdpmc(counter,low,high) do { \ |
@@ -591,61 +634,61 @@ static inline unsigned long long paravirt_read_pmc(int counter) | |||
591 | 634 | ||
592 | static inline void load_TR_desc(void) | 635 | static inline void load_TR_desc(void) |
593 | { | 636 | { |
594 | PVOP_VCALL0(load_tr_desc); | 637 | PVOP_VCALL0(pv_cpu_ops.load_tr_desc); |
595 | } | 638 | } |
596 | static inline void load_gdt(const struct Xgt_desc_struct *dtr) | 639 | static inline void load_gdt(const struct Xgt_desc_struct *dtr) |
597 | { | 640 | { |
598 | PVOP_VCALL1(load_gdt, dtr); | 641 | PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); |
599 | } | 642 | } |
600 | static inline void load_idt(const struct Xgt_desc_struct *dtr) | 643 | static inline void load_idt(const struct Xgt_desc_struct *dtr) |
601 | { | 644 | { |
602 | PVOP_VCALL1(load_idt, dtr); | 645 | PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); |
603 | } | 646 | } |
604 | static inline void set_ldt(const void *addr, unsigned entries) | 647 | static inline void set_ldt(const void *addr, unsigned entries) |
605 | { | 648 | { |
606 | PVOP_VCALL2(set_ldt, addr, entries); | 649 | PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); |
607 | } | 650 | } |
608 | static inline void store_gdt(struct Xgt_desc_struct *dtr) | 651 | static inline void store_gdt(struct Xgt_desc_struct *dtr) |
609 | { | 652 | { |
610 | PVOP_VCALL1(store_gdt, dtr); | 653 | PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); |
611 | } | 654 | } |
612 | static inline void store_idt(struct Xgt_desc_struct *dtr) | 655 | static inline void store_idt(struct Xgt_desc_struct *dtr) |
613 | { | 656 | { |
614 | PVOP_VCALL1(store_idt, dtr); | 657 | PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); |
615 | } | 658 | } |
616 | static inline unsigned long paravirt_store_tr(void) | 659 | static inline unsigned long paravirt_store_tr(void) |
617 | { | 660 | { |
618 | return PVOP_CALL0(unsigned long, store_tr); | 661 | return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); |
619 | } | 662 | } |
620 | #define store_tr(tr) ((tr) = paravirt_store_tr()) | 663 | #define store_tr(tr) ((tr) = paravirt_store_tr()) |
621 | static inline void load_TLS(struct thread_struct *t, unsigned cpu) | 664 | static inline void load_TLS(struct thread_struct *t, unsigned cpu) |
622 | { | 665 | { |
623 | PVOP_VCALL2(load_tls, t, cpu); | 666 | PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); |
624 | } | 667 | } |
625 | static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) | 668 | static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) |
626 | { | 669 | { |
627 | PVOP_VCALL4(write_ldt_entry, dt, entry, low, high); | 670 | PVOP_VCALL4(pv_cpu_ops.write_ldt_entry, dt, entry, low, high); |
628 | } | 671 | } |
629 | static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) | 672 | static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) |
630 | { | 673 | { |
631 | PVOP_VCALL4(write_gdt_entry, dt, entry, low, high); | 674 | PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, low, high); |
632 | } | 675 | } |
633 | static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) | 676 | static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) |
634 | { | 677 | { |
635 | PVOP_VCALL4(write_idt_entry, dt, entry, low, high); | 678 | PVOP_VCALL4(pv_cpu_ops.write_idt_entry, dt, entry, low, high); |
636 | } | 679 | } |
637 | static inline void set_iopl_mask(unsigned mask) | 680 | static inline void set_iopl_mask(unsigned mask) |
638 | { | 681 | { |
639 | PVOP_VCALL1(set_iopl_mask, mask); | 682 | PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask); |
640 | } | 683 | } |
641 | 684 | ||
642 | /* The paravirtualized I/O functions */ | 685 | /* The paravirtualized I/O functions */ |
643 | static inline void slow_down_io(void) { | 686 | static inline void slow_down_io(void) { |
644 | paravirt_ops.io_delay(); | 687 | pv_cpu_ops.io_delay(); |
645 | #ifdef REALLY_SLOW_IO | 688 | #ifdef REALLY_SLOW_IO |
646 | paravirt_ops.io_delay(); | 689 | pv_cpu_ops.io_delay(); |
647 | paravirt_ops.io_delay(); | 690 | pv_cpu_ops.io_delay(); |
648 | paravirt_ops.io_delay(); | 691 | pv_cpu_ops.io_delay(); |
649 | #endif | 692 | #endif |
650 | } | 693 | } |
651 | 694 | ||
@@ -655,121 +698,120 @@ static inline void slow_down_io(void) { | |||
655 | */ | 698 | */ |
656 | static inline void apic_write(unsigned long reg, unsigned long v) | 699 | static inline void apic_write(unsigned long reg, unsigned long v) |
657 | { | 700 | { |
658 | PVOP_VCALL2(apic_write, reg, v); | 701 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); |
659 | } | 702 | } |
660 | 703 | ||
661 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) | 704 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) |
662 | { | 705 | { |
663 | PVOP_VCALL2(apic_write_atomic, reg, v); | 706 | PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v); |
664 | } | 707 | } |
665 | 708 | ||
666 | static inline unsigned long apic_read(unsigned long reg) | 709 | static inline unsigned long apic_read(unsigned long reg) |
667 | { | 710 | { |
668 | return PVOP_CALL1(unsigned long, apic_read, reg); | 711 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); |
669 | } | 712 | } |
670 | 713 | ||
671 | static inline void setup_boot_clock(void) | 714 | static inline void setup_boot_clock(void) |
672 | { | 715 | { |
673 | PVOP_VCALL0(setup_boot_clock); | 716 | PVOP_VCALL0(pv_apic_ops.setup_boot_clock); |
674 | } | 717 | } |
675 | 718 | ||
676 | static inline void setup_secondary_clock(void) | 719 | static inline void setup_secondary_clock(void) |
677 | { | 720 | { |
678 | PVOP_VCALL0(setup_secondary_clock); | 721 | PVOP_VCALL0(pv_apic_ops.setup_secondary_clock); |
679 | } | 722 | } |
680 | #endif | 723 | #endif |
681 | 724 | ||
682 | static inline void paravirt_post_allocator_init(void) | 725 | static inline void paravirt_post_allocator_init(void) |
683 | { | 726 | { |
684 | if (paravirt_ops.post_allocator_init) | 727 | if (pv_init_ops.post_allocator_init) |
685 | (*paravirt_ops.post_allocator_init)(); | 728 | (*pv_init_ops.post_allocator_init)(); |
686 | } | 729 | } |
687 | 730 | ||
688 | static inline void paravirt_pagetable_setup_start(pgd_t *base) | 731 | static inline void paravirt_pagetable_setup_start(pgd_t *base) |
689 | { | 732 | { |
690 | if (paravirt_ops.pagetable_setup_start) | 733 | (*pv_mmu_ops.pagetable_setup_start)(base); |
691 | (*paravirt_ops.pagetable_setup_start)(base); | ||
692 | } | 734 | } |
693 | 735 | ||
694 | static inline void paravirt_pagetable_setup_done(pgd_t *base) | 736 | static inline void paravirt_pagetable_setup_done(pgd_t *base) |
695 | { | 737 | { |
696 | if (paravirt_ops.pagetable_setup_done) | 738 | (*pv_mmu_ops.pagetable_setup_done)(base); |
697 | (*paravirt_ops.pagetable_setup_done)(base); | ||
698 | } | 739 | } |
699 | 740 | ||
700 | #ifdef CONFIG_SMP | 741 | #ifdef CONFIG_SMP |
701 | static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, | 742 | static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, |
702 | unsigned long start_esp) | 743 | unsigned long start_esp) |
703 | { | 744 | { |
704 | PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp); | 745 | PVOP_VCALL3(pv_apic_ops.startup_ipi_hook, |
746 | phys_apicid, start_eip, start_esp); | ||
705 | } | 747 | } |
706 | #endif | 748 | #endif |
707 | 749 | ||
708 | static inline void paravirt_activate_mm(struct mm_struct *prev, | 750 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
709 | struct mm_struct *next) | 751 | struct mm_struct *next) |
710 | { | 752 | { |
711 | PVOP_VCALL2(activate_mm, prev, next); | 753 | PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); |
712 | } | 754 | } |
713 | 755 | ||
714 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | 756 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
715 | struct mm_struct *mm) | 757 | struct mm_struct *mm) |
716 | { | 758 | { |
717 | PVOP_VCALL2(dup_mmap, oldmm, mm); | 759 | PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); |
718 | } | 760 | } |
719 | 761 | ||
720 | static inline void arch_exit_mmap(struct mm_struct *mm) | 762 | static inline void arch_exit_mmap(struct mm_struct *mm) |
721 | { | 763 | { |
722 | PVOP_VCALL1(exit_mmap, mm); | 764 | PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); |
723 | } | 765 | } |
724 | 766 | ||
725 | static inline void __flush_tlb(void) | 767 | static inline void __flush_tlb(void) |
726 | { | 768 | { |
727 | PVOP_VCALL0(flush_tlb_user); | 769 | PVOP_VCALL0(pv_mmu_ops.flush_tlb_user); |
728 | } | 770 | } |
729 | static inline void __flush_tlb_global(void) | 771 | static inline void __flush_tlb_global(void) |
730 | { | 772 | { |
731 | PVOP_VCALL0(flush_tlb_kernel); | 773 | PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); |
732 | } | 774 | } |
733 | static inline void __flush_tlb_single(unsigned long addr) | 775 | static inline void __flush_tlb_single(unsigned long addr) |
734 | { | 776 | { |
735 | PVOP_VCALL1(flush_tlb_single, addr); | 777 | PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); |
736 | } | 778 | } |
737 | 779 | ||
738 | static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | 780 | static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, |
739 | unsigned long va) | 781 | unsigned long va) |
740 | { | 782 | { |
741 | PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); | 783 | PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); |
742 | } | 784 | } |
743 | 785 | ||
744 | static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) | 786 | static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) |
745 | { | 787 | { |
746 | PVOP_VCALL2(alloc_pt, mm, pfn); | 788 | PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn); |
747 | } | 789 | } |
748 | static inline void paravirt_release_pt(unsigned pfn) | 790 | static inline void paravirt_release_pt(unsigned pfn) |
749 | { | 791 | { |
750 | PVOP_VCALL1(release_pt, pfn); | 792 | PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); |
751 | } | 793 | } |
752 | 794 | ||
753 | static inline void paravirt_alloc_pd(unsigned pfn) | 795 | static inline void paravirt_alloc_pd(unsigned pfn) |
754 | { | 796 | { |
755 | PVOP_VCALL1(alloc_pd, pfn); | 797 | PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn); |
756 | } | 798 | } |
757 | 799 | ||
758 | static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, | 800 | static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, |
759 | unsigned start, unsigned count) | 801 | unsigned start, unsigned count) |
760 | { | 802 | { |
761 | PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count); | 803 | PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count); |
762 | } | 804 | } |
763 | static inline void paravirt_release_pd(unsigned pfn) | 805 | static inline void paravirt_release_pd(unsigned pfn) |
764 | { | 806 | { |
765 | PVOP_VCALL1(release_pd, pfn); | 807 | PVOP_VCALL1(pv_mmu_ops.release_pd, pfn); |
766 | } | 808 | } |
767 | 809 | ||
768 | #ifdef CONFIG_HIGHPTE | 810 | #ifdef CONFIG_HIGHPTE |
769 | static inline void *kmap_atomic_pte(struct page *page, enum km_type type) | 811 | static inline void *kmap_atomic_pte(struct page *page, enum km_type type) |
770 | { | 812 | { |
771 | unsigned long ret; | 813 | unsigned long ret; |
772 | ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type); | 814 | ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type); |
773 | return (void *)ret; | 815 | return (void *)ret; |
774 | } | 816 | } |
775 | #endif | 817 | #endif |
@@ -777,162 +819,171 @@ static inline void *kmap_atomic_pte(struct page *page, enum km_type type) | |||
777 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, | 819 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, |
778 | pte_t *ptep) | 820 | pte_t *ptep) |
779 | { | 821 | { |
780 | PVOP_VCALL3(pte_update, mm, addr, ptep); | 822 | PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); |
781 | } | 823 | } |
782 | 824 | ||
783 | static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, | 825 | static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, |
784 | pte_t *ptep) | 826 | pte_t *ptep) |
785 | { | 827 | { |
786 | PVOP_VCALL3(pte_update_defer, mm, addr, ptep); | 828 | PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); |
787 | } | 829 | } |
788 | 830 | ||
789 | #ifdef CONFIG_X86_PAE | 831 | #ifdef CONFIG_X86_PAE |
790 | static inline pte_t __pte(unsigned long long val) | 832 | static inline pte_t __pte(unsigned long long val) |
791 | { | 833 | { |
792 | unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte, | 834 | unsigned long long ret = PVOP_CALL2(unsigned long long, |
835 | pv_mmu_ops.make_pte, | ||
793 | val, val >> 32); | 836 | val, val >> 32); |
794 | return (pte_t) { ret, ret >> 32 }; | 837 | return (pte_t) { ret, ret >> 32 }; |
795 | } | 838 | } |
796 | 839 | ||
797 | static inline pmd_t __pmd(unsigned long long val) | 840 | static inline pmd_t __pmd(unsigned long long val) |
798 | { | 841 | { |
799 | return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) }; | 842 | return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd, |
843 | val, val >> 32) }; | ||
800 | } | 844 | } |
801 | 845 | ||
802 | static inline pgd_t __pgd(unsigned long long val) | 846 | static inline pgd_t __pgd(unsigned long long val) |
803 | { | 847 | { |
804 | return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) }; | 848 | return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd, |
849 | val, val >> 32) }; | ||
805 | } | 850 | } |
806 | 851 | ||
807 | static inline unsigned long long pte_val(pte_t x) | 852 | static inline unsigned long long pte_val(pte_t x) |
808 | { | 853 | { |
809 | return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high); | 854 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val, |
855 | x.pte_low, x.pte_high); | ||
810 | } | 856 | } |
811 | 857 | ||
812 | static inline unsigned long long pmd_val(pmd_t x) | 858 | static inline unsigned long long pmd_val(pmd_t x) |
813 | { | 859 | { |
814 | return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32); | 860 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val, |
861 | x.pmd, x.pmd >> 32); | ||
815 | } | 862 | } |
816 | 863 | ||
817 | static inline unsigned long long pgd_val(pgd_t x) | 864 | static inline unsigned long long pgd_val(pgd_t x) |
818 | { | 865 | { |
819 | return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32); | 866 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val, |
867 | x.pgd, x.pgd >> 32); | ||
820 | } | 868 | } |
821 | 869 | ||
822 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 870 | static inline void set_pte(pte_t *ptep, pte_t pteval) |
823 | { | 871 | { |
824 | PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high); | 872 | PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high); |
825 | } | 873 | } |
826 | 874 | ||
827 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 875 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
828 | pte_t *ptep, pte_t pteval) | 876 | pte_t *ptep, pte_t pteval) |
829 | { | 877 | { |
830 | /* 5 arg words */ | 878 | /* 5 arg words */ |
831 | paravirt_ops.set_pte_at(mm, addr, ptep, pteval); | 879 | pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval); |
832 | } | 880 | } |
833 | 881 | ||
834 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) | 882 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) |
835 | { | 883 | { |
836 | PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high); | 884 | PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, |
885 | pteval.pte_low, pteval.pte_high); | ||
837 | } | 886 | } |
838 | 887 | ||
839 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, | 888 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, |
840 | pte_t *ptep, pte_t pte) | 889 | pte_t *ptep, pte_t pte) |
841 | { | 890 | { |
842 | /* 5 arg words */ | 891 | /* 5 arg words */ |
843 | paravirt_ops.set_pte_present(mm, addr, ptep, pte); | 892 | pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); |
844 | } | 893 | } |
845 | 894 | ||
846 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | 895 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) |
847 | { | 896 | { |
848 | PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32); | 897 | PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, |
898 | pmdval.pmd, pmdval.pmd >> 32); | ||
849 | } | 899 | } |
850 | 900 | ||
851 | static inline void set_pud(pud_t *pudp, pud_t pudval) | 901 | static inline void set_pud(pud_t *pudp, pud_t pudval) |
852 | { | 902 | { |
853 | PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32); | 903 | PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, |
904 | pudval.pgd.pgd, pudval.pgd.pgd >> 32); | ||
854 | } | 905 | } |
855 | 906 | ||
856 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 907 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
857 | { | 908 | { |
858 | PVOP_VCALL3(pte_clear, mm, addr, ptep); | 909 | PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); |
859 | } | 910 | } |
860 | 911 | ||
861 | static inline void pmd_clear(pmd_t *pmdp) | 912 | static inline void pmd_clear(pmd_t *pmdp) |
862 | { | 913 | { |
863 | PVOP_VCALL1(pmd_clear, pmdp); | 914 | PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); |
864 | } | 915 | } |
865 | 916 | ||
866 | #else /* !CONFIG_X86_PAE */ | 917 | #else /* !CONFIG_X86_PAE */ |
867 | 918 | ||
868 | static inline pte_t __pte(unsigned long val) | 919 | static inline pte_t __pte(unsigned long val) |
869 | { | 920 | { |
870 | return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) }; | 921 | return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) }; |
871 | } | 922 | } |
872 | 923 | ||
873 | static inline pgd_t __pgd(unsigned long val) | 924 | static inline pgd_t __pgd(unsigned long val) |
874 | { | 925 | { |
875 | return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) }; | 926 | return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) }; |
876 | } | 927 | } |
877 | 928 | ||
878 | static inline unsigned long pte_val(pte_t x) | 929 | static inline unsigned long pte_val(pte_t x) |
879 | { | 930 | { |
880 | return PVOP_CALL1(unsigned long, pte_val, x.pte_low); | 931 | return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low); |
881 | } | 932 | } |
882 | 933 | ||
883 | static inline unsigned long pgd_val(pgd_t x) | 934 | static inline unsigned long pgd_val(pgd_t x) |
884 | { | 935 | { |
885 | return PVOP_CALL1(unsigned long, pgd_val, x.pgd); | 936 | return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd); |
886 | } | 937 | } |
887 | 938 | ||
888 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 939 | static inline void set_pte(pte_t *ptep, pte_t pteval) |
889 | { | 940 | { |
890 | PVOP_VCALL2(set_pte, ptep, pteval.pte_low); | 941 | PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low); |
891 | } | 942 | } |
892 | 943 | ||
893 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 944 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
894 | pte_t *ptep, pte_t pteval) | 945 | pte_t *ptep, pte_t pteval) |
895 | { | 946 | { |
896 | PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low); | 947 | PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low); |
897 | } | 948 | } |
898 | 949 | ||
899 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | 950 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) |
900 | { | 951 | { |
901 | PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd); | 952 | PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd); |
902 | } | 953 | } |
903 | #endif /* CONFIG_X86_PAE */ | 954 | #endif /* CONFIG_X86_PAE */ |
904 | 955 | ||
905 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 956 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE |
906 | static inline void arch_enter_lazy_cpu_mode(void) | 957 | static inline void arch_enter_lazy_cpu_mode(void) |
907 | { | 958 | { |
908 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU); | 959 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_CPU); |
909 | } | 960 | } |
910 | 961 | ||
911 | static inline void arch_leave_lazy_cpu_mode(void) | 962 | static inline void arch_leave_lazy_cpu_mode(void) |
912 | { | 963 | { |
913 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); | 964 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE); |
914 | } | 965 | } |
915 | 966 | ||
916 | static inline void arch_flush_lazy_cpu_mode(void) | 967 | static inline void arch_flush_lazy_cpu_mode(void) |
917 | { | 968 | { |
918 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); | 969 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH); |
919 | } | 970 | } |
920 | 971 | ||
921 | 972 | ||
922 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | 973 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
923 | static inline void arch_enter_lazy_mmu_mode(void) | 974 | static inline void arch_enter_lazy_mmu_mode(void) |
924 | { | 975 | { |
925 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU); | 976 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_MMU); |
926 | } | 977 | } |
927 | 978 | ||
928 | static inline void arch_leave_lazy_mmu_mode(void) | 979 | static inline void arch_leave_lazy_mmu_mode(void) |
929 | { | 980 | { |
930 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); | 981 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE); |
931 | } | 982 | } |
932 | 983 | ||
933 | static inline void arch_flush_lazy_mmu_mode(void) | 984 | static inline void arch_flush_lazy_mmu_mode(void) |
934 | { | 985 | { |
935 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); | 986 | PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH); |
936 | } | 987 | } |
937 | 988 | ||
938 | void _paravirt_nop(void); | 989 | void _paravirt_nop(void); |
@@ -957,7 +1008,7 @@ static inline unsigned long __raw_local_save_flags(void) | |||
957 | PARAVIRT_CALL | 1008 | PARAVIRT_CALL |
958 | "popl %%edx; popl %%ecx") | 1009 | "popl %%edx; popl %%ecx") |
959 | : "=a"(f) | 1010 | : "=a"(f) |
960 | : paravirt_type(save_fl), | 1011 | : paravirt_type(pv_irq_ops.save_fl), |
961 | paravirt_clobber(CLBR_EAX) | 1012 | paravirt_clobber(CLBR_EAX) |
962 | : "memory", "cc"); | 1013 | : "memory", "cc"); |
963 | return f; | 1014 | return f; |
@@ -970,7 +1021,7 @@ static inline void raw_local_irq_restore(unsigned long f) | |||
970 | "popl %%edx; popl %%ecx") | 1021 | "popl %%edx; popl %%ecx") |
971 | : "=a"(f) | 1022 | : "=a"(f) |
972 | : "0"(f), | 1023 | : "0"(f), |
973 | paravirt_type(restore_fl), | 1024 | paravirt_type(pv_irq_ops.restore_fl), |
974 | paravirt_clobber(CLBR_EAX) | 1025 | paravirt_clobber(CLBR_EAX) |
975 | : "memory", "cc"); | 1026 | : "memory", "cc"); |
976 | } | 1027 | } |
@@ -981,7 +1032,7 @@ static inline void raw_local_irq_disable(void) | |||
981 | PARAVIRT_CALL | 1032 | PARAVIRT_CALL |
982 | "popl %%edx; popl %%ecx") | 1033 | "popl %%edx; popl %%ecx") |
983 | : | 1034 | : |
984 | : paravirt_type(irq_disable), | 1035 | : paravirt_type(pv_irq_ops.irq_disable), |
985 | paravirt_clobber(CLBR_EAX) | 1036 | paravirt_clobber(CLBR_EAX) |
986 | : "memory", "eax", "cc"); | 1037 | : "memory", "eax", "cc"); |
987 | } | 1038 | } |
@@ -992,7 +1043,7 @@ static inline void raw_local_irq_enable(void) | |||
992 | PARAVIRT_CALL | 1043 | PARAVIRT_CALL |
993 | "popl %%edx; popl %%ecx") | 1044 | "popl %%edx; popl %%ecx") |
994 | : | 1045 | : |
995 | : paravirt_type(irq_enable), | 1046 | : paravirt_type(pv_irq_ops.irq_enable), |
996 | paravirt_clobber(CLBR_EAX) | 1047 | paravirt_clobber(CLBR_EAX) |
997 | : "memory", "eax", "cc"); | 1048 | : "memory", "eax", "cc"); |
998 | } | 1049 | } |
@@ -1008,21 +1059,23 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1008 | 1059 | ||
1009 | #define CLI_STRING \ | 1060 | #define CLI_STRING \ |
1010 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ | 1061 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ |
1011 | "call *paravirt_ops+%c[paravirt_cli_type]*4;" \ | 1062 | "call *%[paravirt_cli_opptr];" \ |
1012 | "popl %%edx; popl %%ecx", \ | 1063 | "popl %%edx; popl %%ecx", \ |
1013 | "%c[paravirt_cli_type]", "%c[paravirt_clobber]") | 1064 | "%c[paravirt_cli_type]", "%c[paravirt_clobber]") |
1014 | 1065 | ||
1015 | #define STI_STRING \ | 1066 | #define STI_STRING \ |
1016 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ | 1067 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ |
1017 | "call *paravirt_ops+%c[paravirt_sti_type]*4;" \ | 1068 | "call *%[paravirt_sti_opptr];" \ |
1018 | "popl %%edx; popl %%ecx", \ | 1069 | "popl %%edx; popl %%ecx", \ |
1019 | "%c[paravirt_sti_type]", "%c[paravirt_clobber]") | 1070 | "%c[paravirt_sti_type]", "%c[paravirt_clobber]") |
1020 | 1071 | ||
1021 | #define CLI_STI_CLOBBERS , "%eax" | 1072 | #define CLI_STI_CLOBBERS , "%eax" |
1022 | #define CLI_STI_INPUT_ARGS \ | 1073 | #define CLI_STI_INPUT_ARGS \ |
1023 | , \ | 1074 | , \ |
1024 | [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)), \ | 1075 | [paravirt_cli_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_disable)), \ |
1025 | [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)), \ | 1076 | [paravirt_cli_opptr] "m" (pv_irq_ops.irq_disable), \ |
1077 | [paravirt_sti_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_enable)), \ | ||
1078 | [paravirt_sti_opptr] "m" (pv_irq_ops.irq_enable), \ | ||
1026 | paravirt_clobber(CLBR_EAX) | 1079 | paravirt_clobber(CLBR_EAX) |
1027 | 1080 | ||
1028 | /* Make sure as little as possible of this mess escapes. */ | 1081 | /* Make sure as little as possible of this mess escapes. */ |
@@ -1042,7 +1095,7 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1042 | 1095 | ||
1043 | #else /* __ASSEMBLY__ */ | 1096 | #else /* __ASSEMBLY__ */ |
1044 | 1097 | ||
1045 | #define PARA_PATCH(off) ((off) / 4) | 1098 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) |
1046 | 1099 | ||
1047 | #define PARA_SITE(ptype, clobbers, ops) \ | 1100 | #define PARA_SITE(ptype, clobbers, ops) \ |
1048 | 771:; \ | 1101 | 771:; \ |
@@ -1055,29 +1108,29 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1055 | .short clobbers; \ | 1108 | .short clobbers; \ |
1056 | .popsection | 1109 | .popsection |
1057 | 1110 | ||
1058 | #define INTERRUPT_RETURN \ | 1111 | #define INTERRUPT_RETURN \ |
1059 | PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \ | 1112 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ |
1060 | jmp *%cs:paravirt_ops+PARAVIRT_iret) | 1113 | jmp *%cs:pv_cpu_ops+PV_CPU_iret) |
1061 | 1114 | ||
1062 | #define DISABLE_INTERRUPTS(clobbers) \ | 1115 | #define DISABLE_INTERRUPTS(clobbers) \ |
1063 | PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \ | 1116 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ |
1064 | pushl %eax; pushl %ecx; pushl %edx; \ | 1117 | pushl %eax; pushl %ecx; pushl %edx; \ |
1065 | call *%cs:paravirt_ops+PARAVIRT_irq_disable; \ | 1118 | call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \ |
1066 | popl %edx; popl %ecx; popl %eax) \ | 1119 | popl %edx; popl %ecx; popl %eax) \ |
1067 | 1120 | ||
1068 | #define ENABLE_INTERRUPTS(clobbers) \ | 1121 | #define ENABLE_INTERRUPTS(clobbers) \ |
1069 | PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \ | 1122 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ |
1070 | pushl %eax; pushl %ecx; pushl %edx; \ | 1123 | pushl %eax; pushl %ecx; pushl %edx; \ |
1071 | call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ | 1124 | call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ |
1072 | popl %edx; popl %ecx; popl %eax) | 1125 | popl %edx; popl %ecx; popl %eax) |
1073 | 1126 | ||
1074 | #define ENABLE_INTERRUPTS_SYSEXIT \ | 1127 | #define ENABLE_INTERRUPTS_SYSEXIT \ |
1075 | PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \ | 1128 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), CLBR_NONE,\ |
1076 | jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) | 1129 | jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_sysexit) |
1077 | 1130 | ||
1078 | #define GET_CR0_INTO_EAX \ | 1131 | #define GET_CR0_INTO_EAX \ |
1079 | push %ecx; push %edx; \ | 1132 | push %ecx; push %edx; \ |
1080 | call *paravirt_ops+PARAVIRT_read_cr0; \ | 1133 | call *pv_cpu_ops+PV_CPU_read_cr0; \ |
1081 | pop %edx; pop %ecx | 1134 | pop %edx; pop %ecx |
1082 | 1135 | ||
1083 | #endif /* __ASSEMBLY__ */ | 1136 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-x86/pgtable-3level-defs.h b/include/asm-x86/pgtable-3level-defs.h index c0df89f66e8b..448ac9516314 100644 --- a/include/asm-x86/pgtable-3level-defs.h +++ b/include/asm-x86/pgtable-3level-defs.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _I386_PGTABLE_3LEVEL_DEFS_H | 2 | #define _I386_PGTABLE_3LEVEL_DEFS_H |
3 | 3 | ||
4 | #ifdef CONFIG_PARAVIRT | 4 | #ifdef CONFIG_PARAVIRT |
5 | #define SHARED_KERNEL_PMD (paravirt_ops.shared_kernel_pmd) | 5 | #define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) |
6 | #else | 6 | #else |
7 | #define SHARED_KERNEL_PMD 1 | 7 | #define SHARED_KERNEL_PMD 1 |
8 | #endif | 8 | #endif |