aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 14:10:11 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 14:10:11 -0400
commitfb9fc395174138983a49f2da982ed14caabbe741 (patch)
tree5d5d3643ee6853a899205613da272cc343fdc1a4 /include/asm-x86
parent0eafaae84e21ac033815cc9f33c3ae889cd7ccfe (diff)
parentace2e92e193126711cb3a83a3752b2c5b8396950 (diff)
Merge branch 'xen-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen
* 'xen-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen: xfs: eagerly remove vmap mappings to avoid upsetting Xen xen: add some debug output for failed multicalls xen: fix incorrect vcpu_register_vcpu_info hypercall argument xen: ask the hypervisor how much space it needs reserved xen: lock pte pages while pinning/unpinning xen: deal with stale cr3 values when unpinning pagetables xen: add batch completion callbacks xen: yield to IPI target if necessary Clean up duplicate includes in arch/i386/xen/ remove dead code in pgtable_cache_init paravirt: clean up lazy mode handling paravirt: refactor struct paravirt_ops into smaller pv_*_ops
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/paravirt.h487
-rw-r--r--include/asm-x86/pgtable-3level-defs.h2
2 files changed, 278 insertions, 211 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 9fa3fa9e62d1..f59d370c5df4 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -25,27 +25,22 @@ struct tss_struct;
25struct mm_struct; 25struct mm_struct;
26struct desc_struct; 26struct desc_struct;
27 27
28/* Lazy mode for batching updates / context switch */ 28/* general info */
29enum paravirt_lazy_mode { 29struct pv_info {
30 PARAVIRT_LAZY_NONE = 0,
31 PARAVIRT_LAZY_MMU = 1,
32 PARAVIRT_LAZY_CPU = 2,
33 PARAVIRT_LAZY_FLUSH = 3,
34};
35
36struct paravirt_ops
37{
38 unsigned int kernel_rpl; 30 unsigned int kernel_rpl;
39 int shared_kernel_pmd; 31 int shared_kernel_pmd;
40 int paravirt_enabled; 32 int paravirt_enabled;
41 const char *name; 33 const char *name;
34};
42 35
36struct pv_init_ops {
43 /* 37 /*
44 * Patch may replace one of the defined code sequences with arbitrary 38 * Patch may replace one of the defined code sequences with
45 * code, subject to the same register constraints. This generally 39 * arbitrary code, subject to the same register constraints.
46 * means the code is not free to clobber any registers other than EAX. 40 * This generally means the code is not free to clobber any
47 * The patch function should return the number of bytes of code 41 * registers other than EAX. The patch function should return
48 * generated, as we nop pad the rest in generic code. 42 * the number of bytes of code generated, as we nop pad the
43 * rest in generic code.
49 */ 44 */
50 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, 45 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
51 unsigned long addr, unsigned len); 46 unsigned long addr, unsigned len);
@@ -55,29 +50,29 @@ struct paravirt_ops
55 char *(*memory_setup)(void); 50 char *(*memory_setup)(void);
56 void (*post_allocator_init)(void); 51 void (*post_allocator_init)(void);
57 52
58 void (*init_IRQ)(void);
59 void (*time_init)(void);
60
61 /*
62 * Called before/after init_mm pagetable setup. setup_start
63 * may reset %cr3, and may pre-install parts of the pagetable;
64 * pagetable setup is expected to preserve any existing
65 * mapping.
66 */
67 void (*pagetable_setup_start)(pgd_t *pgd_base);
68 void (*pagetable_setup_done)(pgd_t *pgd_base);
69
70 /* Print a banner to identify the environment */ 53 /* Print a banner to identify the environment */
71 void (*banner)(void); 54 void (*banner)(void);
55};
56
57
58struct pv_lazy_ops {
59 /* Set deferred update mode, used for batching operations. */
60 void (*enter)(void);
61 void (*leave)(void);
62};
63
64struct pv_time_ops {
65 void (*time_init)(void);
72 66
73 /* Set and set time of day */ 67 /* Set and set time of day */
74 unsigned long (*get_wallclock)(void); 68 unsigned long (*get_wallclock)(void);
75 int (*set_wallclock)(unsigned long); 69 int (*set_wallclock)(unsigned long);
76 70
77 /* cpuid emulation, mostly so that caps bits can be disabled */ 71 unsigned long long (*sched_clock)(void);
78 void (*cpuid)(unsigned int *eax, unsigned int *ebx, 72 unsigned long (*get_cpu_khz)(void);
79 unsigned int *ecx, unsigned int *edx); 73};
80 74
75struct pv_cpu_ops {
81 /* hooks for various privileged instructions */ 76 /* hooks for various privileged instructions */
82 unsigned long (*get_debugreg)(int regno); 77 unsigned long (*get_debugreg)(int regno);
83 void (*set_debugreg)(int regno, unsigned long value); 78 void (*set_debugreg)(int regno, unsigned long value);
@@ -87,41 +82,10 @@ struct paravirt_ops
87 unsigned long (*read_cr0)(void); 82 unsigned long (*read_cr0)(void);
88 void (*write_cr0)(unsigned long); 83 void (*write_cr0)(unsigned long);
89 84
90 unsigned long (*read_cr2)(void);
91 void (*write_cr2)(unsigned long);
92
93 unsigned long (*read_cr3)(void);
94 void (*write_cr3)(unsigned long);
95
96 unsigned long (*read_cr4_safe)(void); 85 unsigned long (*read_cr4_safe)(void);
97 unsigned long (*read_cr4)(void); 86 unsigned long (*read_cr4)(void);
98 void (*write_cr4)(unsigned long); 87 void (*write_cr4)(unsigned long);
99 88
100 /*
101 * Get/set interrupt state. save_fl and restore_fl are only
102 * expected to use X86_EFLAGS_IF; all other bits
103 * returned from save_fl are undefined, and may be ignored by
104 * restore_fl.
105 */
106 unsigned long (*save_fl)(void);
107 void (*restore_fl)(unsigned long);
108 void (*irq_disable)(void);
109 void (*irq_enable)(void);
110 void (*safe_halt)(void);
111 void (*halt)(void);
112
113 void (*wbinvd)(void);
114
115 /* MSR, PMC and TSR operations.
116 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
117 u64 (*read_msr)(unsigned int msr, int *err);
118 int (*write_msr)(unsigned int msr, u64 val);
119
120 u64 (*read_tsc)(void);
121 u64 (*read_pmc)(void);
122 unsigned long long (*sched_clock)(void);
123 unsigned long (*get_cpu_khz)(void);
124
125 /* Segment descriptor handling */ 89 /* Segment descriptor handling */
126 void (*load_tr_desc)(void); 90 void (*load_tr_desc)(void);
127 void (*load_gdt)(const struct Xgt_desc_struct *); 91 void (*load_gdt)(const struct Xgt_desc_struct *);
@@ -140,18 +104,47 @@ struct paravirt_ops
140 void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); 104 void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
141 105
142 void (*set_iopl_mask)(unsigned mask); 106 void (*set_iopl_mask)(unsigned mask);
107
108 void (*wbinvd)(void);
143 void (*io_delay)(void); 109 void (*io_delay)(void);
144 110
111 /* cpuid emulation, mostly so that caps bits can be disabled */
112 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
113 unsigned int *ecx, unsigned int *edx);
114
115 /* MSR, PMC and TSR operations.
116 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
117 u64 (*read_msr)(unsigned int msr, int *err);
118 int (*write_msr)(unsigned int msr, u64 val);
119
120 u64 (*read_tsc)(void);
121 u64 (*read_pmc)(void);
122
123 /* These two are jmp to, not actually called. */
124 void (*irq_enable_sysexit)(void);
125 void (*iret)(void);
126
127 struct pv_lazy_ops lazy_mode;
128};
129
130struct pv_irq_ops {
131 void (*init_IRQ)(void);
132
145 /* 133 /*
146 * Hooks for intercepting the creation/use/destruction of an 134 * Get/set interrupt state. save_fl and restore_fl are only
147 * mm_struct. 135 * expected to use X86_EFLAGS_IF; all other bits
136 * returned from save_fl are undefined, and may be ignored by
137 * restore_fl.
148 */ 138 */
149 void (*activate_mm)(struct mm_struct *prev, 139 unsigned long (*save_fl)(void);
150 struct mm_struct *next); 140 void (*restore_fl)(unsigned long);
151 void (*dup_mmap)(struct mm_struct *oldmm, 141 void (*irq_disable)(void);
152 struct mm_struct *mm); 142 void (*irq_enable)(void);
153 void (*exit_mmap)(struct mm_struct *mm); 143 void (*safe_halt)(void);
144 void (*halt)(void);
145};
154 146
147struct pv_apic_ops {
155#ifdef CONFIG_X86_LOCAL_APIC 148#ifdef CONFIG_X86_LOCAL_APIC
156 /* 149 /*
157 * Direct APIC operations, principally for VMI. Ideally 150 * Direct APIC operations, principally for VMI. Ideally
@@ -167,6 +160,34 @@ struct paravirt_ops
167 unsigned long start_eip, 160 unsigned long start_eip,
168 unsigned long start_esp); 161 unsigned long start_esp);
169#endif 162#endif
163};
164
165struct pv_mmu_ops {
166 /*
167 * Called before/after init_mm pagetable setup. setup_start
168 * may reset %cr3, and may pre-install parts of the pagetable;
169 * pagetable setup is expected to preserve any existing
170 * mapping.
171 */
172 void (*pagetable_setup_start)(pgd_t *pgd_base);
173 void (*pagetable_setup_done)(pgd_t *pgd_base);
174
175 unsigned long (*read_cr2)(void);
176 void (*write_cr2)(unsigned long);
177
178 unsigned long (*read_cr3)(void);
179 void (*write_cr3)(unsigned long);
180
181 /*
182 * Hooks for intercepting the creation/use/destruction of an
183 * mm_struct.
184 */
185 void (*activate_mm)(struct mm_struct *prev,
186 struct mm_struct *next);
187 void (*dup_mmap)(struct mm_struct *oldmm,
188 struct mm_struct *mm);
189 void (*exit_mmap)(struct mm_struct *mm);
190
170 191
171 /* TLB operations */ 192 /* TLB operations */
172 void (*flush_tlb_user)(void); 193 void (*flush_tlb_user)(void);
@@ -191,15 +212,12 @@ struct paravirt_ops
191 void (*pte_update_defer)(struct mm_struct *mm, 212 void (*pte_update_defer)(struct mm_struct *mm,
192 unsigned long addr, pte_t *ptep); 213 unsigned long addr, pte_t *ptep);
193 214
194#ifdef CONFIG_HIGHPTE
195 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
196#endif
197
198#ifdef CONFIG_X86_PAE 215#ifdef CONFIG_X86_PAE
199 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 216 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
200 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); 217 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
218 pte_t *ptep, pte_t pte);
201 void (*set_pud)(pud_t *pudp, pud_t pudval); 219 void (*set_pud)(pud_t *pudp, pud_t pudval);
202 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 220 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
203 void (*pmd_clear)(pmd_t *pmdp); 221 void (*pmd_clear)(pmd_t *pmdp);
204 222
205 unsigned long long (*pte_val)(pte_t); 223 unsigned long long (*pte_val)(pte_t);
@@ -217,21 +235,40 @@ struct paravirt_ops
217 pgd_t (*make_pgd)(unsigned long pgd); 235 pgd_t (*make_pgd)(unsigned long pgd);
218#endif 236#endif
219 237
220 /* Set deferred update mode, used for batching operations. */ 238#ifdef CONFIG_HIGHPTE
221 void (*set_lazy_mode)(enum paravirt_lazy_mode mode); 239 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
240#endif
222 241
223 /* These two are jmp to, not actually called. */ 242 struct pv_lazy_ops lazy_mode;
224 void (*irq_enable_sysexit)(void);
225 void (*iret)(void);
226}; 243};
227 244
228extern struct paravirt_ops paravirt_ops; 245/* This contains all the paravirt structures: we get a convenient
246 * number for each function using the offset which we use to indicate
247 * what to patch. */
248struct paravirt_patch_template
249{
250 struct pv_init_ops pv_init_ops;
251 struct pv_time_ops pv_time_ops;
252 struct pv_cpu_ops pv_cpu_ops;
253 struct pv_irq_ops pv_irq_ops;
254 struct pv_apic_ops pv_apic_ops;
255 struct pv_mmu_ops pv_mmu_ops;
256};
257
258extern struct pv_info pv_info;
259extern struct pv_init_ops pv_init_ops;
260extern struct pv_time_ops pv_time_ops;
261extern struct pv_cpu_ops pv_cpu_ops;
262extern struct pv_irq_ops pv_irq_ops;
263extern struct pv_apic_ops pv_apic_ops;
264extern struct pv_mmu_ops pv_mmu_ops;
229 265
230#define PARAVIRT_PATCH(x) \ 266#define PARAVIRT_PATCH(x) \
231 (offsetof(struct paravirt_ops, x) / sizeof(void *)) 267 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
232 268
233#define paravirt_type(type) \ 269#define paravirt_type(op) \
234 [paravirt_typenum] "i" (PARAVIRT_PATCH(type)) 270 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
271 [paravirt_opptr] "m" (op)
235#define paravirt_clobber(clobber) \ 272#define paravirt_clobber(clobber) \
236 [paravirt_clobber] "i" (clobber) 273 [paravirt_clobber] "i" (clobber)
237 274
@@ -258,7 +295,7 @@ unsigned paravirt_patch_call(void *insnbuf,
258 const void *target, u16 tgt_clobbers, 295 const void *target, u16 tgt_clobbers,
259 unsigned long addr, u16 site_clobbers, 296 unsigned long addr, u16 site_clobbers,
260 unsigned len); 297 unsigned len);
261unsigned paravirt_patch_jmp(const void *target, void *insnbuf, 298unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
262 unsigned long addr, unsigned len); 299 unsigned long addr, unsigned len);
263unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, 300unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
264 unsigned long addr, unsigned len); 301 unsigned long addr, unsigned len);
@@ -271,14 +308,14 @@ int paravirt_disable_iospace(void);
271/* 308/*
272 * This generates an indirect call based on the operation type number. 309 * This generates an indirect call based on the operation type number.
273 * The type number, computed in PARAVIRT_PATCH, is derived from the 310 * The type number, computed in PARAVIRT_PATCH, is derived from the
274 * offset into the paravirt_ops structure, and can therefore be freely 311 * offset into the paravirt_patch_template structure, and can therefore be
275 * converted back into a structure offset. 312 * freely converted back into a structure offset.
276 */ 313 */
277#define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);" 314#define PARAVIRT_CALL "call *%[paravirt_opptr];"
278 315
279/* 316/*
280 * These macros are intended to wrap calls into a paravirt_ops 317 * These macros are intended to wrap calls through one of the paravirt
281 * operation, so that they can be later identified and patched at 318 * ops structs, so that they can be later identified and patched at
282 * runtime. 319 * runtime.
283 * 320 *
284 * Normally, a call to a pv_op function is a simple indirect call: 321 * Normally, a call to a pv_op function is a simple indirect call:
@@ -301,7 +338,7 @@ int paravirt_disable_iospace(void);
301 * The call instruction itself is marked by placing its start address 338 * The call instruction itself is marked by placing its start address
302 * and size into the .parainstructions section, so that 339 * and size into the .parainstructions section, so that
303 * apply_paravirt() in arch/i386/kernel/alternative.c can do the 340 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
304 * appropriate patching under the control of the backend paravirt_ops 341 * appropriate patching under the control of the backend pv_init_ops
305 * implementation. 342 * implementation.
306 * 343 *
307 * Unfortunately there's no way to get gcc to generate the args setup 344 * Unfortunately there's no way to get gcc to generate the args setup
@@ -409,36 +446,36 @@ int paravirt_disable_iospace(void);
409 446
410static inline int paravirt_enabled(void) 447static inline int paravirt_enabled(void)
411{ 448{
412 return paravirt_ops.paravirt_enabled; 449 return pv_info.paravirt_enabled;
413} 450}
414 451
415static inline void load_esp0(struct tss_struct *tss, 452static inline void load_esp0(struct tss_struct *tss,
416 struct thread_struct *thread) 453 struct thread_struct *thread)
417{ 454{
418 PVOP_VCALL2(load_esp0, tss, thread); 455 PVOP_VCALL2(pv_cpu_ops.load_esp0, tss, thread);
419} 456}
420 457
421#define ARCH_SETUP paravirt_ops.arch_setup(); 458#define ARCH_SETUP pv_init_ops.arch_setup();
422static inline unsigned long get_wallclock(void) 459static inline unsigned long get_wallclock(void)
423{ 460{
424 return PVOP_CALL0(unsigned long, get_wallclock); 461 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
425} 462}
426 463
427static inline int set_wallclock(unsigned long nowtime) 464static inline int set_wallclock(unsigned long nowtime)
428{ 465{
429 return PVOP_CALL1(int, set_wallclock, nowtime); 466 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
430} 467}
431 468
432static inline void (*choose_time_init(void))(void) 469static inline void (*choose_time_init(void))(void)
433{ 470{
434 return paravirt_ops.time_init; 471 return pv_time_ops.time_init;
435} 472}
436 473
437/* The paravirtualized CPUID instruction. */ 474/* The paravirtualized CPUID instruction. */
438static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 475static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
439 unsigned int *ecx, unsigned int *edx) 476 unsigned int *ecx, unsigned int *edx)
440{ 477{
441 PVOP_VCALL4(cpuid, eax, ebx, ecx, edx); 478 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
442} 479}
443 480
444/* 481/*
@@ -446,87 +483,87 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
446 */ 483 */
447static inline unsigned long paravirt_get_debugreg(int reg) 484static inline unsigned long paravirt_get_debugreg(int reg)
448{ 485{
449 return PVOP_CALL1(unsigned long, get_debugreg, reg); 486 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
450} 487}
451#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) 488#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
452static inline void set_debugreg(unsigned long val, int reg) 489static inline void set_debugreg(unsigned long val, int reg)
453{ 490{
454 PVOP_VCALL2(set_debugreg, reg, val); 491 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
455} 492}
456 493
457static inline void clts(void) 494static inline void clts(void)
458{ 495{
459 PVOP_VCALL0(clts); 496 PVOP_VCALL0(pv_cpu_ops.clts);
460} 497}
461 498
462static inline unsigned long read_cr0(void) 499static inline unsigned long read_cr0(void)
463{ 500{
464 return PVOP_CALL0(unsigned long, read_cr0); 501 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
465} 502}
466 503
467static inline void write_cr0(unsigned long x) 504static inline void write_cr0(unsigned long x)
468{ 505{
469 PVOP_VCALL1(write_cr0, x); 506 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
470} 507}
471 508
472static inline unsigned long read_cr2(void) 509static inline unsigned long read_cr2(void)
473{ 510{
474 return PVOP_CALL0(unsigned long, read_cr2); 511 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
475} 512}
476 513
477static inline void write_cr2(unsigned long x) 514static inline void write_cr2(unsigned long x)
478{ 515{
479 PVOP_VCALL1(write_cr2, x); 516 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
480} 517}
481 518
482static inline unsigned long read_cr3(void) 519static inline unsigned long read_cr3(void)
483{ 520{
484 return PVOP_CALL0(unsigned long, read_cr3); 521 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
485} 522}
486 523
487static inline void write_cr3(unsigned long x) 524static inline void write_cr3(unsigned long x)
488{ 525{
489 PVOP_VCALL1(write_cr3, x); 526 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
490} 527}
491 528
492static inline unsigned long read_cr4(void) 529static inline unsigned long read_cr4(void)
493{ 530{
494 return PVOP_CALL0(unsigned long, read_cr4); 531 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
495} 532}
496static inline unsigned long read_cr4_safe(void) 533static inline unsigned long read_cr4_safe(void)
497{ 534{
498 return PVOP_CALL0(unsigned long, read_cr4_safe); 535 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
499} 536}
500 537
501static inline void write_cr4(unsigned long x) 538static inline void write_cr4(unsigned long x)
502{ 539{
503 PVOP_VCALL1(write_cr4, x); 540 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
504} 541}
505 542
506static inline void raw_safe_halt(void) 543static inline void raw_safe_halt(void)
507{ 544{
508 PVOP_VCALL0(safe_halt); 545 PVOP_VCALL0(pv_irq_ops.safe_halt);
509} 546}
510 547
511static inline void halt(void) 548static inline void halt(void)
512{ 549{
513 PVOP_VCALL0(safe_halt); 550 PVOP_VCALL0(pv_irq_ops.safe_halt);
514} 551}
515 552
516static inline void wbinvd(void) 553static inline void wbinvd(void)
517{ 554{
518 PVOP_VCALL0(wbinvd); 555 PVOP_VCALL0(pv_cpu_ops.wbinvd);
519} 556}
520 557
521#define get_kernel_rpl() (paravirt_ops.kernel_rpl) 558#define get_kernel_rpl() (pv_info.kernel_rpl)
522 559
523static inline u64 paravirt_read_msr(unsigned msr, int *err) 560static inline u64 paravirt_read_msr(unsigned msr, int *err)
524{ 561{
525 return PVOP_CALL2(u64, read_msr, msr, err); 562 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
526} 563}
527static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 564static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
528{ 565{
529 return PVOP_CALL3(int, write_msr, msr, low, high); 566 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
530} 567}
531 568
532/* These should all do BUG_ON(_err), but our headers are too tangled. */ 569/* These should all do BUG_ON(_err), but our headers are too tangled. */
@@ -560,7 +597,7 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
560 597
561static inline u64 paravirt_read_tsc(void) 598static inline u64 paravirt_read_tsc(void)
562{ 599{
563 return PVOP_CALL0(u64, read_tsc); 600 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
564} 601}
565 602
566#define rdtscl(low) do { \ 603#define rdtscl(low) do { \
@@ -572,15 +609,15 @@ static inline u64 paravirt_read_tsc(void)
572 609
573static inline unsigned long long paravirt_sched_clock(void) 610static inline unsigned long long paravirt_sched_clock(void)
574{ 611{
575 return PVOP_CALL0(unsigned long long, sched_clock); 612 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
576} 613}
577#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) 614#define calculate_cpu_khz() (pv_time_ops.get_cpu_khz())
578 615
579#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 616#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
580 617
581static inline unsigned long long paravirt_read_pmc(int counter) 618static inline unsigned long long paravirt_read_pmc(int counter)
582{ 619{
583 return PVOP_CALL1(u64, read_pmc, counter); 620 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
584} 621}
585 622
586#define rdpmc(counter,low,high) do { \ 623#define rdpmc(counter,low,high) do { \
@@ -591,61 +628,61 @@ static inline unsigned long long paravirt_read_pmc(int counter)
591 628
592static inline void load_TR_desc(void) 629static inline void load_TR_desc(void)
593{ 630{
594 PVOP_VCALL0(load_tr_desc); 631 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
595} 632}
596static inline void load_gdt(const struct Xgt_desc_struct *dtr) 633static inline void load_gdt(const struct Xgt_desc_struct *dtr)
597{ 634{
598 PVOP_VCALL1(load_gdt, dtr); 635 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
599} 636}
600static inline void load_idt(const struct Xgt_desc_struct *dtr) 637static inline void load_idt(const struct Xgt_desc_struct *dtr)
601{ 638{
602 PVOP_VCALL1(load_idt, dtr); 639 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
603} 640}
604static inline void set_ldt(const void *addr, unsigned entries) 641static inline void set_ldt(const void *addr, unsigned entries)
605{ 642{
606 PVOP_VCALL2(set_ldt, addr, entries); 643 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
607} 644}
608static inline void store_gdt(struct Xgt_desc_struct *dtr) 645static inline void store_gdt(struct Xgt_desc_struct *dtr)
609{ 646{
610 PVOP_VCALL1(store_gdt, dtr); 647 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
611} 648}
612static inline void store_idt(struct Xgt_desc_struct *dtr) 649static inline void store_idt(struct Xgt_desc_struct *dtr)
613{ 650{
614 PVOP_VCALL1(store_idt, dtr); 651 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
615} 652}
616static inline unsigned long paravirt_store_tr(void) 653static inline unsigned long paravirt_store_tr(void)
617{ 654{
618 return PVOP_CALL0(unsigned long, store_tr); 655 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
619} 656}
620#define store_tr(tr) ((tr) = paravirt_store_tr()) 657#define store_tr(tr) ((tr) = paravirt_store_tr())
621static inline void load_TLS(struct thread_struct *t, unsigned cpu) 658static inline void load_TLS(struct thread_struct *t, unsigned cpu)
622{ 659{
623 PVOP_VCALL2(load_tls, t, cpu); 660 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
624} 661}
625static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) 662static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high)
626{ 663{
627 PVOP_VCALL4(write_ldt_entry, dt, entry, low, high); 664 PVOP_VCALL4(pv_cpu_ops.write_ldt_entry, dt, entry, low, high);
628} 665}
629static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) 666static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high)
630{ 667{
631 PVOP_VCALL4(write_gdt_entry, dt, entry, low, high); 668 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, low, high);
632} 669}
633static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) 670static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high)
634{ 671{
635 PVOP_VCALL4(write_idt_entry, dt, entry, low, high); 672 PVOP_VCALL4(pv_cpu_ops.write_idt_entry, dt, entry, low, high);
636} 673}
637static inline void set_iopl_mask(unsigned mask) 674static inline void set_iopl_mask(unsigned mask)
638{ 675{
639 PVOP_VCALL1(set_iopl_mask, mask); 676 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
640} 677}
641 678
642/* The paravirtualized I/O functions */ 679/* The paravirtualized I/O functions */
643static inline void slow_down_io(void) { 680static inline void slow_down_io(void) {
644 paravirt_ops.io_delay(); 681 pv_cpu_ops.io_delay();
645#ifdef REALLY_SLOW_IO 682#ifdef REALLY_SLOW_IO
646 paravirt_ops.io_delay(); 683 pv_cpu_ops.io_delay();
647 paravirt_ops.io_delay(); 684 pv_cpu_ops.io_delay();
648 paravirt_ops.io_delay(); 685 pv_cpu_ops.io_delay();
649#endif 686#endif
650} 687}
651 688
@@ -655,121 +692,120 @@ static inline void slow_down_io(void) {
655 */ 692 */
656static inline void apic_write(unsigned long reg, unsigned long v) 693static inline void apic_write(unsigned long reg, unsigned long v)
657{ 694{
658 PVOP_VCALL2(apic_write, reg, v); 695 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
659} 696}
660 697
661static inline void apic_write_atomic(unsigned long reg, unsigned long v) 698static inline void apic_write_atomic(unsigned long reg, unsigned long v)
662{ 699{
663 PVOP_VCALL2(apic_write_atomic, reg, v); 700 PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v);
664} 701}
665 702
666static inline unsigned long apic_read(unsigned long reg) 703static inline unsigned long apic_read(unsigned long reg)
667{ 704{
668 return PVOP_CALL1(unsigned long, apic_read, reg); 705 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
669} 706}
670 707
671static inline void setup_boot_clock(void) 708static inline void setup_boot_clock(void)
672{ 709{
673 PVOP_VCALL0(setup_boot_clock); 710 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
674} 711}
675 712
676static inline void setup_secondary_clock(void) 713static inline void setup_secondary_clock(void)
677{ 714{
678 PVOP_VCALL0(setup_secondary_clock); 715 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
679} 716}
680#endif 717#endif
681 718
682static inline void paravirt_post_allocator_init(void) 719static inline void paravirt_post_allocator_init(void)
683{ 720{
684 if (paravirt_ops.post_allocator_init) 721 if (pv_init_ops.post_allocator_init)
685 (*paravirt_ops.post_allocator_init)(); 722 (*pv_init_ops.post_allocator_init)();
686} 723}
687 724
688static inline void paravirt_pagetable_setup_start(pgd_t *base) 725static inline void paravirt_pagetable_setup_start(pgd_t *base)
689{ 726{
690 if (paravirt_ops.pagetable_setup_start) 727 (*pv_mmu_ops.pagetable_setup_start)(base);
691 (*paravirt_ops.pagetable_setup_start)(base);
692} 728}
693 729
694static inline void paravirt_pagetable_setup_done(pgd_t *base) 730static inline void paravirt_pagetable_setup_done(pgd_t *base)
695{ 731{
696 if (paravirt_ops.pagetable_setup_done) 732 (*pv_mmu_ops.pagetable_setup_done)(base);
697 (*paravirt_ops.pagetable_setup_done)(base);
698} 733}
699 734
700#ifdef CONFIG_SMP 735#ifdef CONFIG_SMP
701static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, 736static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
702 unsigned long start_esp) 737 unsigned long start_esp)
703{ 738{
704 PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp); 739 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
740 phys_apicid, start_eip, start_esp);
705} 741}
706#endif 742#endif
707 743
708static inline void paravirt_activate_mm(struct mm_struct *prev, 744static inline void paravirt_activate_mm(struct mm_struct *prev,
709 struct mm_struct *next) 745 struct mm_struct *next)
710{ 746{
711 PVOP_VCALL2(activate_mm, prev, next); 747 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
712} 748}
713 749
714static inline void arch_dup_mmap(struct mm_struct *oldmm, 750static inline void arch_dup_mmap(struct mm_struct *oldmm,
715 struct mm_struct *mm) 751 struct mm_struct *mm)
716{ 752{
717 PVOP_VCALL2(dup_mmap, oldmm, mm); 753 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
718} 754}
719 755
720static inline void arch_exit_mmap(struct mm_struct *mm) 756static inline void arch_exit_mmap(struct mm_struct *mm)
721{ 757{
722 PVOP_VCALL1(exit_mmap, mm); 758 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
723} 759}
724 760
725static inline void __flush_tlb(void) 761static inline void __flush_tlb(void)
726{ 762{
727 PVOP_VCALL0(flush_tlb_user); 763 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
728} 764}
729static inline void __flush_tlb_global(void) 765static inline void __flush_tlb_global(void)
730{ 766{
731 PVOP_VCALL0(flush_tlb_kernel); 767 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
732} 768}
733static inline void __flush_tlb_single(unsigned long addr) 769static inline void __flush_tlb_single(unsigned long addr)
734{ 770{
735 PVOP_VCALL1(flush_tlb_single, addr); 771 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
736} 772}
737 773
738static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 774static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
739 unsigned long va) 775 unsigned long va)
740{ 776{
741 PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); 777 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
742} 778}
743 779
744static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) 780static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
745{ 781{
746 PVOP_VCALL2(alloc_pt, mm, pfn); 782 PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn);
747} 783}
748static inline void paravirt_release_pt(unsigned pfn) 784static inline void paravirt_release_pt(unsigned pfn)
749{ 785{
750 PVOP_VCALL1(release_pt, pfn); 786 PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
751} 787}
752 788
753static inline void paravirt_alloc_pd(unsigned pfn) 789static inline void paravirt_alloc_pd(unsigned pfn)
754{ 790{
755 PVOP_VCALL1(alloc_pd, pfn); 791 PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn);
756} 792}
757 793
758static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, 794static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
759 unsigned start, unsigned count) 795 unsigned start, unsigned count)
760{ 796{
761 PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count); 797 PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count);
762} 798}
763static inline void paravirt_release_pd(unsigned pfn) 799static inline void paravirt_release_pd(unsigned pfn)
764{ 800{
765 PVOP_VCALL1(release_pd, pfn); 801 PVOP_VCALL1(pv_mmu_ops.release_pd, pfn);
766} 802}
767 803
768#ifdef CONFIG_HIGHPTE 804#ifdef CONFIG_HIGHPTE
769static inline void *kmap_atomic_pte(struct page *page, enum km_type type) 805static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
770{ 806{
771 unsigned long ret; 807 unsigned long ret;
772 ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type); 808 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
773 return (void *)ret; 809 return (void *)ret;
774} 810}
775#endif 811#endif
@@ -777,162 +813,191 @@ static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
777static inline void pte_update(struct mm_struct *mm, unsigned long addr, 813static inline void pte_update(struct mm_struct *mm, unsigned long addr,
778 pte_t *ptep) 814 pte_t *ptep)
779{ 815{
780 PVOP_VCALL3(pte_update, mm, addr, ptep); 816 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
781} 817}
782 818
783static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, 819static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
784 pte_t *ptep) 820 pte_t *ptep)
785{ 821{
786 PVOP_VCALL3(pte_update_defer, mm, addr, ptep); 822 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
787} 823}
788 824
789#ifdef CONFIG_X86_PAE 825#ifdef CONFIG_X86_PAE
790static inline pte_t __pte(unsigned long long val) 826static inline pte_t __pte(unsigned long long val)
791{ 827{
792 unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte, 828 unsigned long long ret = PVOP_CALL2(unsigned long long,
829 pv_mmu_ops.make_pte,
793 val, val >> 32); 830 val, val >> 32);
794 return (pte_t) { ret, ret >> 32 }; 831 return (pte_t) { ret, ret >> 32 };
795} 832}
796 833
797static inline pmd_t __pmd(unsigned long long val) 834static inline pmd_t __pmd(unsigned long long val)
798{ 835{
799 return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) }; 836 return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd,
837 val, val >> 32) };
800} 838}
801 839
802static inline pgd_t __pgd(unsigned long long val) 840static inline pgd_t __pgd(unsigned long long val)
803{ 841{
804 return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) }; 842 return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd,
843 val, val >> 32) };
805} 844}
806 845
807static inline unsigned long long pte_val(pte_t x) 846static inline unsigned long long pte_val(pte_t x)
808{ 847{
809 return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high); 848 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val,
849 x.pte_low, x.pte_high);
810} 850}
811 851
812static inline unsigned long long pmd_val(pmd_t x) 852static inline unsigned long long pmd_val(pmd_t x)
813{ 853{
814 return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32); 854 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val,
855 x.pmd, x.pmd >> 32);
815} 856}
816 857
817static inline unsigned long long pgd_val(pgd_t x) 858static inline unsigned long long pgd_val(pgd_t x)
818{ 859{
819 return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32); 860 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val,
861 x.pgd, x.pgd >> 32);
820} 862}
821 863
822static inline void set_pte(pte_t *ptep, pte_t pteval) 864static inline void set_pte(pte_t *ptep, pte_t pteval)
823{ 865{
824 PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high); 866 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high);
825} 867}
826 868
827static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 869static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
828 pte_t *ptep, pte_t pteval) 870 pte_t *ptep, pte_t pteval)
829{ 871{
830 /* 5 arg words */ 872 /* 5 arg words */
831 paravirt_ops.set_pte_at(mm, addr, ptep, pteval); 873 pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval);
832} 874}
833 875
834static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) 876static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
835{ 877{
836 PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high); 878 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
879 pteval.pte_low, pteval.pte_high);
837} 880}
838 881
839static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, 882static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
840 pte_t *ptep, pte_t pte) 883 pte_t *ptep, pte_t pte)
841{ 884{
842 /* 5 arg words */ 885 /* 5 arg words */
843 paravirt_ops.set_pte_present(mm, addr, ptep, pte); 886 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
844} 887}
845 888
846static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) 889static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
847{ 890{
848 PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32); 891 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
892 pmdval.pmd, pmdval.pmd >> 32);
849} 893}
850 894
851static inline void set_pud(pud_t *pudp, pud_t pudval) 895static inline void set_pud(pud_t *pudp, pud_t pudval)
852{ 896{
853 PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32); 897 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
898 pudval.pgd.pgd, pudval.pgd.pgd >> 32);
854} 899}
855 900
856static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 901static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
857{ 902{
858 PVOP_VCALL3(pte_clear, mm, addr, ptep); 903 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
859} 904}
860 905
861static inline void pmd_clear(pmd_t *pmdp) 906static inline void pmd_clear(pmd_t *pmdp)
862{ 907{
863 PVOP_VCALL1(pmd_clear, pmdp); 908 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
864} 909}
865 910
866#else /* !CONFIG_X86_PAE */ 911#else /* !CONFIG_X86_PAE */
867 912
868static inline pte_t __pte(unsigned long val) 913static inline pte_t __pte(unsigned long val)
869{ 914{
870 return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) }; 915 return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) };
871} 916}
872 917
873static inline pgd_t __pgd(unsigned long val) 918static inline pgd_t __pgd(unsigned long val)
874{ 919{
875 return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) }; 920 return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) };
876} 921}
877 922
878static inline unsigned long pte_val(pte_t x) 923static inline unsigned long pte_val(pte_t x)
879{ 924{
880 return PVOP_CALL1(unsigned long, pte_val, x.pte_low); 925 return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low);
881} 926}
882 927
883static inline unsigned long pgd_val(pgd_t x) 928static inline unsigned long pgd_val(pgd_t x)
884{ 929{
885 return PVOP_CALL1(unsigned long, pgd_val, x.pgd); 930 return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd);
886} 931}
887 932
888static inline void set_pte(pte_t *ptep, pte_t pteval) 933static inline void set_pte(pte_t *ptep, pte_t pteval)
889{ 934{
890 PVOP_VCALL2(set_pte, ptep, pteval.pte_low); 935 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low);
891} 936}
892 937
893static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 938static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
894 pte_t *ptep, pte_t pteval) 939 pte_t *ptep, pte_t pteval)
895{ 940{
896 PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low); 941 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low);
897} 942}
898 943
899static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) 944static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
900{ 945{
901 PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd); 946 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
902} 947}
903#endif /* CONFIG_X86_PAE */ 948#endif /* CONFIG_X86_PAE */
904 949
950/* Lazy mode for batching updates / context switch */
951enum paravirt_lazy_mode {
952 PARAVIRT_LAZY_NONE,
953 PARAVIRT_LAZY_MMU,
954 PARAVIRT_LAZY_CPU,
955};
956
957enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
958void paravirt_enter_lazy_cpu(void);
959void paravirt_leave_lazy_cpu(void);
960void paravirt_enter_lazy_mmu(void);
961void paravirt_leave_lazy_mmu(void);
962void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
963
905#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE 964#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
906static inline void arch_enter_lazy_cpu_mode(void) 965static inline void arch_enter_lazy_cpu_mode(void)
907{ 966{
908 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU); 967 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
909} 968}
910 969
911static inline void arch_leave_lazy_cpu_mode(void) 970static inline void arch_leave_lazy_cpu_mode(void)
912{ 971{
913 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); 972 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
914} 973}
915 974
916static inline void arch_flush_lazy_cpu_mode(void) 975static inline void arch_flush_lazy_cpu_mode(void)
917{ 976{
918 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); 977 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
978 arch_leave_lazy_cpu_mode();
979 arch_enter_lazy_cpu_mode();
980 }
919} 981}
920 982
921 983
922#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 984#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
923static inline void arch_enter_lazy_mmu_mode(void) 985static inline void arch_enter_lazy_mmu_mode(void)
924{ 986{
925 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU); 987 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
926} 988}
927 989
928static inline void arch_leave_lazy_mmu_mode(void) 990static inline void arch_leave_lazy_mmu_mode(void)
929{ 991{
930 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); 992 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
931} 993}
932 994
933static inline void arch_flush_lazy_mmu_mode(void) 995static inline void arch_flush_lazy_mmu_mode(void)
934{ 996{
935 PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); 997 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
998 arch_leave_lazy_mmu_mode();
999 arch_enter_lazy_mmu_mode();
1000 }
936} 1001}
937 1002
938void _paravirt_nop(void); 1003void _paravirt_nop(void);
@@ -957,7 +1022,7 @@ static inline unsigned long __raw_local_save_flags(void)
957 PARAVIRT_CALL 1022 PARAVIRT_CALL
958 "popl %%edx; popl %%ecx") 1023 "popl %%edx; popl %%ecx")
959 : "=a"(f) 1024 : "=a"(f)
960 : paravirt_type(save_fl), 1025 : paravirt_type(pv_irq_ops.save_fl),
961 paravirt_clobber(CLBR_EAX) 1026 paravirt_clobber(CLBR_EAX)
962 : "memory", "cc"); 1027 : "memory", "cc");
963 return f; 1028 return f;
@@ -970,7 +1035,7 @@ static inline void raw_local_irq_restore(unsigned long f)
970 "popl %%edx; popl %%ecx") 1035 "popl %%edx; popl %%ecx")
971 : "=a"(f) 1036 : "=a"(f)
972 : "0"(f), 1037 : "0"(f),
973 paravirt_type(restore_fl), 1038 paravirt_type(pv_irq_ops.restore_fl),
974 paravirt_clobber(CLBR_EAX) 1039 paravirt_clobber(CLBR_EAX)
975 : "memory", "cc"); 1040 : "memory", "cc");
976} 1041}
@@ -981,7 +1046,7 @@ static inline void raw_local_irq_disable(void)
981 PARAVIRT_CALL 1046 PARAVIRT_CALL
982 "popl %%edx; popl %%ecx") 1047 "popl %%edx; popl %%ecx")
983 : 1048 :
984 : paravirt_type(irq_disable), 1049 : paravirt_type(pv_irq_ops.irq_disable),
985 paravirt_clobber(CLBR_EAX) 1050 paravirt_clobber(CLBR_EAX)
986 : "memory", "eax", "cc"); 1051 : "memory", "eax", "cc");
987} 1052}
@@ -992,7 +1057,7 @@ static inline void raw_local_irq_enable(void)
992 PARAVIRT_CALL 1057 PARAVIRT_CALL
993 "popl %%edx; popl %%ecx") 1058 "popl %%edx; popl %%ecx")
994 : 1059 :
995 : paravirt_type(irq_enable), 1060 : paravirt_type(pv_irq_ops.irq_enable),
996 paravirt_clobber(CLBR_EAX) 1061 paravirt_clobber(CLBR_EAX)
997 : "memory", "eax", "cc"); 1062 : "memory", "eax", "cc");
998} 1063}
@@ -1008,21 +1073,23 @@ static inline unsigned long __raw_local_irq_save(void)
1008 1073
1009#define CLI_STRING \ 1074#define CLI_STRING \
1010 _paravirt_alt("pushl %%ecx; pushl %%edx;" \ 1075 _paravirt_alt("pushl %%ecx; pushl %%edx;" \
1011 "call *paravirt_ops+%c[paravirt_cli_type]*4;" \ 1076 "call *%[paravirt_cli_opptr];" \
1012 "popl %%edx; popl %%ecx", \ 1077 "popl %%edx; popl %%ecx", \
1013 "%c[paravirt_cli_type]", "%c[paravirt_clobber]") 1078 "%c[paravirt_cli_type]", "%c[paravirt_clobber]")
1014 1079
1015#define STI_STRING \ 1080#define STI_STRING \
1016 _paravirt_alt("pushl %%ecx; pushl %%edx;" \ 1081 _paravirt_alt("pushl %%ecx; pushl %%edx;" \
1017 "call *paravirt_ops+%c[paravirt_sti_type]*4;" \ 1082 "call *%[paravirt_sti_opptr];" \
1018 "popl %%edx; popl %%ecx", \ 1083 "popl %%edx; popl %%ecx", \
1019 "%c[paravirt_sti_type]", "%c[paravirt_clobber]") 1084 "%c[paravirt_sti_type]", "%c[paravirt_clobber]")
1020 1085
1021#define CLI_STI_CLOBBERS , "%eax" 1086#define CLI_STI_CLOBBERS , "%eax"
1022#define CLI_STI_INPUT_ARGS \ 1087#define CLI_STI_INPUT_ARGS \
1023 , \ 1088 , \
1024 [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)), \ 1089 [paravirt_cli_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_disable)), \
1025 [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)), \ 1090 [paravirt_cli_opptr] "m" (pv_irq_ops.irq_disable), \
1091 [paravirt_sti_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_enable)), \
1092 [paravirt_sti_opptr] "m" (pv_irq_ops.irq_enable), \
1026 paravirt_clobber(CLBR_EAX) 1093 paravirt_clobber(CLBR_EAX)
1027 1094
1028/* Make sure as little as possible of this mess escapes. */ 1095/* Make sure as little as possible of this mess escapes. */
@@ -1042,7 +1109,7 @@ static inline unsigned long __raw_local_irq_save(void)
1042 1109
1043#else /* __ASSEMBLY__ */ 1110#else /* __ASSEMBLY__ */
1044 1111
1045#define PARA_PATCH(off) ((off) / 4) 1112#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1046 1113
1047#define PARA_SITE(ptype, clobbers, ops) \ 1114#define PARA_SITE(ptype, clobbers, ops) \
1048771:; \ 1115771:; \
@@ -1055,29 +1122,29 @@ static inline unsigned long __raw_local_irq_save(void)
1055 .short clobbers; \ 1122 .short clobbers; \
1056 .popsection 1123 .popsection
1057 1124
1058#define INTERRUPT_RETURN \ 1125#define INTERRUPT_RETURN \
1059 PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \ 1126 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1060 jmp *%cs:paravirt_ops+PARAVIRT_iret) 1127 jmp *%cs:pv_cpu_ops+PV_CPU_iret)
1061 1128
1062#define DISABLE_INTERRUPTS(clobbers) \ 1129#define DISABLE_INTERRUPTS(clobbers) \
1063 PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \ 1130 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1064 pushl %eax; pushl %ecx; pushl %edx; \ 1131 pushl %eax; pushl %ecx; pushl %edx; \
1065 call *%cs:paravirt_ops+PARAVIRT_irq_disable; \ 1132 call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \
1066 popl %edx; popl %ecx; popl %eax) \ 1133 popl %edx; popl %ecx; popl %eax) \
1067 1134
1068#define ENABLE_INTERRUPTS(clobbers) \ 1135#define ENABLE_INTERRUPTS(clobbers) \
1069 PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \ 1136 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1070 pushl %eax; pushl %ecx; pushl %edx; \ 1137 pushl %eax; pushl %ecx; pushl %edx; \
1071 call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ 1138 call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \
1072 popl %edx; popl %ecx; popl %eax) 1139 popl %edx; popl %ecx; popl %eax)
1073 1140
1074#define ENABLE_INTERRUPTS_SYSEXIT \ 1141#define ENABLE_INTERRUPTS_SYSEXIT \
1075 PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \ 1142 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), CLBR_NONE,\
1076 jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) 1143 jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_sysexit)
1077 1144
1078#define GET_CR0_INTO_EAX \ 1145#define GET_CR0_INTO_EAX \
1079 push %ecx; push %edx; \ 1146 push %ecx; push %edx; \
1080 call *paravirt_ops+PARAVIRT_read_cr0; \ 1147 call *pv_cpu_ops+PV_CPU_read_cr0; \
1081 pop %edx; pop %ecx 1148 pop %edx; pop %ecx
1082 1149
1083#endif /* __ASSEMBLY__ */ 1150#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86/pgtable-3level-defs.h b/include/asm-x86/pgtable-3level-defs.h
index c0df89f66e8b..448ac9516314 100644
--- a/include/asm-x86/pgtable-3level-defs.h
+++ b/include/asm-x86/pgtable-3level-defs.h
@@ -2,7 +2,7 @@
2#define _I386_PGTABLE_3LEVEL_DEFS_H 2#define _I386_PGTABLE_3LEVEL_DEFS_H
3 3
4#ifdef CONFIG_PARAVIRT 4#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (paravirt_ops.shared_kernel_pmd) 5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
6#else 6#else
7#define SHARED_KERNEL_PMD 1 7#define SHARED_KERNEL_PMD 1
8#endif 8#endif