diff options
Diffstat (limited to 'include/asm-x86/paravirt.h')
-rw-r--r-- | include/asm-x86/paravirt.h | 1085 |
1 files changed, 1085 insertions, 0 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h new file mode 100644 index 000000000000..9fa3fa9e62d1 --- /dev/null +++ b/include/asm-x86/paravirt.h | |||
@@ -0,0 +1,1085 @@ | |||
1 | #ifndef __ASM_PARAVIRT_H | ||
2 | #define __ASM_PARAVIRT_H | ||
3 | /* Various instructions on x86 need to be replaced for | ||
4 | * para-virtualization: those hooks are defined here. */ | ||
5 | |||
6 | #ifdef CONFIG_PARAVIRT | ||
7 | #include <asm/page.h> | ||
8 | |||
9 | /* Bitmask of what can be clobbered: usually at least eax. */ | ||
10 | #define CLBR_NONE 0x0 | ||
11 | #define CLBR_EAX 0x1 | ||
12 | #define CLBR_ECX 0x2 | ||
13 | #define CLBR_EDX 0x4 | ||
14 | #define CLBR_ANY 0x7 | ||
15 | |||
16 | #ifndef __ASSEMBLY__ | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/cpumask.h> | ||
19 | #include <asm/kmap_types.h> | ||
20 | |||
21 | struct page; | ||
22 | struct thread_struct; | ||
23 | struct Xgt_desc_struct; | ||
24 | struct tss_struct; | ||
25 | struct mm_struct; | ||
26 | struct desc_struct; | ||
27 | |||
28 | /* Lazy mode for batching updates / context switch */ | ||
29 | enum paravirt_lazy_mode { | ||
30 | PARAVIRT_LAZY_NONE = 0, | ||
31 | PARAVIRT_LAZY_MMU = 1, | ||
32 | PARAVIRT_LAZY_CPU = 2, | ||
33 | PARAVIRT_LAZY_FLUSH = 3, | ||
34 | }; | ||
35 | |||
36 | struct paravirt_ops | ||
37 | { | ||
38 | unsigned int kernel_rpl; | ||
39 | int shared_kernel_pmd; | ||
40 | int paravirt_enabled; | ||
41 | const char *name; | ||
42 | |||
43 | /* | ||
44 | * Patch may replace one of the defined code sequences with arbitrary | ||
45 | * code, subject to the same register constraints. This generally | ||
46 | * means the code is not free to clobber any registers other than EAX. | ||
47 | * The patch function should return the number of bytes of code | ||
48 | * generated, as we nop pad the rest in generic code. | ||
49 | */ | ||
50 | unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, | ||
51 | unsigned long addr, unsigned len); | ||
52 | |||
53 | /* Basic arch-specific setup */ | ||
54 | void (*arch_setup)(void); | ||
55 | char *(*memory_setup)(void); | ||
56 | void (*post_allocator_init)(void); | ||
57 | |||
58 | void (*init_IRQ)(void); | ||
59 | void (*time_init)(void); | ||
60 | |||
61 | /* | ||
62 | * Called before/after init_mm pagetable setup. setup_start | ||
63 | * may reset %cr3, and may pre-install parts of the pagetable; | ||
64 | * pagetable setup is expected to preserve any existing | ||
65 | * mapping. | ||
66 | */ | ||
67 | void (*pagetable_setup_start)(pgd_t *pgd_base); | ||
68 | void (*pagetable_setup_done)(pgd_t *pgd_base); | ||
69 | |||
70 | /* Print a banner to identify the environment */ | ||
71 | void (*banner)(void); | ||
72 | |||
73 | /* Set and set time of day */ | ||
74 | unsigned long (*get_wallclock)(void); | ||
75 | int (*set_wallclock)(unsigned long); | ||
76 | |||
77 | /* cpuid emulation, mostly so that caps bits can be disabled */ | ||
78 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, | ||
79 | unsigned int *ecx, unsigned int *edx); | ||
80 | |||
81 | /* hooks for various privileged instructions */ | ||
82 | unsigned long (*get_debugreg)(int regno); | ||
83 | void (*set_debugreg)(int regno, unsigned long value); | ||
84 | |||
85 | void (*clts)(void); | ||
86 | |||
87 | unsigned long (*read_cr0)(void); | ||
88 | void (*write_cr0)(unsigned long); | ||
89 | |||
90 | unsigned long (*read_cr2)(void); | ||
91 | void (*write_cr2)(unsigned long); | ||
92 | |||
93 | unsigned long (*read_cr3)(void); | ||
94 | void (*write_cr3)(unsigned long); | ||
95 | |||
96 | unsigned long (*read_cr4_safe)(void); | ||
97 | unsigned long (*read_cr4)(void); | ||
98 | void (*write_cr4)(unsigned long); | ||
99 | |||
100 | /* | ||
101 | * Get/set interrupt state. save_fl and restore_fl are only | ||
102 | * expected to use X86_EFLAGS_IF; all other bits | ||
103 | * returned from save_fl are undefined, and may be ignored by | ||
104 | * restore_fl. | ||
105 | */ | ||
106 | unsigned long (*save_fl)(void); | ||
107 | void (*restore_fl)(unsigned long); | ||
108 | void (*irq_disable)(void); | ||
109 | void (*irq_enable)(void); | ||
110 | void (*safe_halt)(void); | ||
111 | void (*halt)(void); | ||
112 | |||
113 | void (*wbinvd)(void); | ||
114 | |||
115 | /* MSR, PMC and TSR operations. | ||
116 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | ||
117 | u64 (*read_msr)(unsigned int msr, int *err); | ||
118 | int (*write_msr)(unsigned int msr, u64 val); | ||
119 | |||
120 | u64 (*read_tsc)(void); | ||
121 | u64 (*read_pmc)(void); | ||
122 | unsigned long long (*sched_clock)(void); | ||
123 | unsigned long (*get_cpu_khz)(void); | ||
124 | |||
125 | /* Segment descriptor handling */ | ||
126 | void (*load_tr_desc)(void); | ||
127 | void (*load_gdt)(const struct Xgt_desc_struct *); | ||
128 | void (*load_idt)(const struct Xgt_desc_struct *); | ||
129 | void (*store_gdt)(struct Xgt_desc_struct *); | ||
130 | void (*store_idt)(struct Xgt_desc_struct *); | ||
131 | void (*set_ldt)(const void *desc, unsigned entries); | ||
132 | unsigned long (*store_tr)(void); | ||
133 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); | ||
134 | void (*write_ldt_entry)(struct desc_struct *, | ||
135 | int entrynum, u32 low, u32 high); | ||
136 | void (*write_gdt_entry)(struct desc_struct *, | ||
137 | int entrynum, u32 low, u32 high); | ||
138 | void (*write_idt_entry)(struct desc_struct *, | ||
139 | int entrynum, u32 low, u32 high); | ||
140 | void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); | ||
141 | |||
142 | void (*set_iopl_mask)(unsigned mask); | ||
143 | void (*io_delay)(void); | ||
144 | |||
145 | /* | ||
146 | * Hooks for intercepting the creation/use/destruction of an | ||
147 | * mm_struct. | ||
148 | */ | ||
149 | void (*activate_mm)(struct mm_struct *prev, | ||
150 | struct mm_struct *next); | ||
151 | void (*dup_mmap)(struct mm_struct *oldmm, | ||
152 | struct mm_struct *mm); | ||
153 | void (*exit_mmap)(struct mm_struct *mm); | ||
154 | |||
155 | #ifdef CONFIG_X86_LOCAL_APIC | ||
156 | /* | ||
157 | * Direct APIC operations, principally for VMI. Ideally | ||
158 | * these shouldn't be in this interface. | ||
159 | */ | ||
160 | void (*apic_write)(unsigned long reg, unsigned long v); | ||
161 | void (*apic_write_atomic)(unsigned long reg, unsigned long v); | ||
162 | unsigned long (*apic_read)(unsigned long reg); | ||
163 | void (*setup_boot_clock)(void); | ||
164 | void (*setup_secondary_clock)(void); | ||
165 | |||
166 | void (*startup_ipi_hook)(int phys_apicid, | ||
167 | unsigned long start_eip, | ||
168 | unsigned long start_esp); | ||
169 | #endif | ||
170 | |||
171 | /* TLB operations */ | ||
172 | void (*flush_tlb_user)(void); | ||
173 | void (*flush_tlb_kernel)(void); | ||
174 | void (*flush_tlb_single)(unsigned long addr); | ||
175 | void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, | ||
176 | unsigned long va); | ||
177 | |||
178 | /* Hooks for allocating/releasing pagetable pages */ | ||
179 | void (*alloc_pt)(struct mm_struct *mm, u32 pfn); | ||
180 | void (*alloc_pd)(u32 pfn); | ||
181 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | ||
182 | void (*release_pt)(u32 pfn); | ||
183 | void (*release_pd)(u32 pfn); | ||
184 | |||
185 | /* Pagetable manipulation functions */ | ||
186 | void (*set_pte)(pte_t *ptep, pte_t pteval); | ||
187 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, | ||
188 | pte_t *ptep, pte_t pteval); | ||
189 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); | ||
190 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
191 | void (*pte_update_defer)(struct mm_struct *mm, | ||
192 | unsigned long addr, pte_t *ptep); | ||
193 | |||
194 | #ifdef CONFIG_HIGHPTE | ||
195 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | ||
196 | #endif | ||
197 | |||
198 | #ifdef CONFIG_X86_PAE | ||
199 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | ||
200 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); | ||
201 | void (*set_pud)(pud_t *pudp, pud_t pudval); | ||
202 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
203 | void (*pmd_clear)(pmd_t *pmdp); | ||
204 | |||
205 | unsigned long long (*pte_val)(pte_t); | ||
206 | unsigned long long (*pmd_val)(pmd_t); | ||
207 | unsigned long long (*pgd_val)(pgd_t); | ||
208 | |||
209 | pte_t (*make_pte)(unsigned long long pte); | ||
210 | pmd_t (*make_pmd)(unsigned long long pmd); | ||
211 | pgd_t (*make_pgd)(unsigned long long pgd); | ||
212 | #else | ||
213 | unsigned long (*pte_val)(pte_t); | ||
214 | unsigned long (*pgd_val)(pgd_t); | ||
215 | |||
216 | pte_t (*make_pte)(unsigned long pte); | ||
217 | pgd_t (*make_pgd)(unsigned long pgd); | ||
218 | #endif | ||
219 | |||
220 | /* Set deferred update mode, used for batching operations. */ | ||
221 | void (*set_lazy_mode)(enum paravirt_lazy_mode mode); | ||
222 | |||
223 | /* These two are jmp to, not actually called. */ | ||
224 | void (*irq_enable_sysexit)(void); | ||
225 | void (*iret)(void); | ||
226 | }; | ||
227 | |||
228 | extern struct paravirt_ops paravirt_ops; | ||
229 | |||
230 | #define PARAVIRT_PATCH(x) \ | ||
231 | (offsetof(struct paravirt_ops, x) / sizeof(void *)) | ||
232 | |||
233 | #define paravirt_type(type) \ | ||
234 | [paravirt_typenum] "i" (PARAVIRT_PATCH(type)) | ||
235 | #define paravirt_clobber(clobber) \ | ||
236 | [paravirt_clobber] "i" (clobber) | ||
237 | |||
238 | /* | ||
239 | * Generate some code, and mark it as patchable by the | ||
240 | * apply_paravirt() alternate instruction patcher. | ||
241 | */ | ||
242 | #define _paravirt_alt(insn_string, type, clobber) \ | ||
243 | "771:\n\t" insn_string "\n" "772:\n" \ | ||
244 | ".pushsection .parainstructions,\"a\"\n" \ | ||
245 | " .long 771b\n" \ | ||
246 | " .byte " type "\n" \ | ||
247 | " .byte 772b-771b\n" \ | ||
248 | " .short " clobber "\n" \ | ||
249 | ".popsection\n" | ||
250 | |||
251 | /* Generate patchable code, with the default asm parameters. */ | ||
252 | #define paravirt_alt(insn_string) \ | ||
253 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") | ||
254 | |||
255 | unsigned paravirt_patch_nop(void); | ||
256 | unsigned paravirt_patch_ignore(unsigned len); | ||
257 | unsigned paravirt_patch_call(void *insnbuf, | ||
258 | const void *target, u16 tgt_clobbers, | ||
259 | unsigned long addr, u16 site_clobbers, | ||
260 | unsigned len); | ||
261 | unsigned paravirt_patch_jmp(const void *target, void *insnbuf, | ||
262 | unsigned long addr, unsigned len); | ||
263 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | ||
264 | unsigned long addr, unsigned len); | ||
265 | |||
266 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | ||
267 | const char *start, const char *end); | ||
268 | |||
269 | int paravirt_disable_iospace(void); | ||
270 | |||
271 | /* | ||
272 | * This generates an indirect call based on the operation type number. | ||
273 | * The type number, computed in PARAVIRT_PATCH, is derived from the | ||
274 | * offset into the paravirt_ops structure, and can therefore be freely | ||
275 | * converted back into a structure offset. | ||
276 | */ | ||
277 | #define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);" | ||
278 | |||
279 | /* | ||
280 | * These macros are intended to wrap calls into a paravirt_ops | ||
281 | * operation, so that they can be later identified and patched at | ||
282 | * runtime. | ||
283 | * | ||
284 | * Normally, a call to a pv_op function is a simple indirect call: | ||
285 | * (paravirt_ops.operations)(args...). | ||
286 | * | ||
287 | * Unfortunately, this is a relatively slow operation for modern CPUs, | ||
288 | * because it cannot necessarily determine what the destination | ||
289 | * address is. In this case, the address is a runtime constant, so at | ||
290 | * the very least we can patch the call to e a simple direct call, or | ||
291 | * ideally, patch an inline implementation into the callsite. (Direct | ||
292 | * calls are essentially free, because the call and return addresses | ||
293 | * are completely predictable.) | ||
294 | * | ||
295 | * These macros rely on the standard gcc "regparm(3)" calling | ||
296 | * convention, in which the first three arguments are placed in %eax, | ||
297 | * %edx, %ecx (in that order), and the remaining arguments are placed | ||
298 | * on the stack. All caller-save registers (eax,edx,ecx) are expected | ||
299 | * to be modified (either clobbered or used for return values). | ||
300 | * | ||
301 | * The call instruction itself is marked by placing its start address | ||
302 | * and size into the .parainstructions section, so that | ||
303 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the | ||
304 | * appropriate patching under the control of the backend paravirt_ops | ||
305 | * implementation. | ||
306 | * | ||
307 | * Unfortunately there's no way to get gcc to generate the args setup | ||
308 | * for the call, and then allow the call itself to be generated by an | ||
309 | * inline asm. Because of this, we must do the complete arg setup and | ||
310 | * return value handling from within these macros. This is fairly | ||
311 | * cumbersome. | ||
312 | * | ||
313 | * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. | ||
314 | * It could be extended to more arguments, but there would be little | ||
315 | * to be gained from that. For each number of arguments, there are | ||
316 | * the two VCALL and CALL variants for void and non-void functions. | ||
317 | * | ||
318 | * When there is a return value, the invoker of the macro must specify | ||
319 | * the return type. The macro then uses sizeof() on that type to | ||
320 | * determine whether its a 32 or 64 bit value, and places the return | ||
321 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for | ||
322 | * 64-bit). | ||
323 | * | ||
324 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments | ||
325 | * in low,high order. | ||
326 | * | ||
327 | * Small structures are passed and returned in registers. The macro | ||
328 | * calling convention can't directly deal with this, so the wrapper | ||
329 | * functions must do this. | ||
330 | * | ||
331 | * These PVOP_* macros are only defined within this header. This | ||
332 | * means that all uses must be wrapped in inline functions. This also | ||
333 | * makes sure the incoming and outgoing types are always correct. | ||
334 | */ | ||
335 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ | ||
336 | ({ \ | ||
337 | rettype __ret; \ | ||
338 | unsigned long __eax, __edx, __ecx; \ | ||
339 | if (sizeof(rettype) > sizeof(unsigned long)) { \ | ||
340 | asm volatile(pre \ | ||
341 | paravirt_alt(PARAVIRT_CALL) \ | ||
342 | post \ | ||
343 | : "=a" (__eax), "=d" (__edx), \ | ||
344 | "=c" (__ecx) \ | ||
345 | : paravirt_type(op), \ | ||
346 | paravirt_clobber(CLBR_ANY), \ | ||
347 | ##__VA_ARGS__ \ | ||
348 | : "memory", "cc"); \ | ||
349 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ | ||
350 | } else { \ | ||
351 | asm volatile(pre \ | ||
352 | paravirt_alt(PARAVIRT_CALL) \ | ||
353 | post \ | ||
354 | : "=a" (__eax), "=d" (__edx), \ | ||
355 | "=c" (__ecx) \ | ||
356 | : paravirt_type(op), \ | ||
357 | paravirt_clobber(CLBR_ANY), \ | ||
358 | ##__VA_ARGS__ \ | ||
359 | : "memory", "cc"); \ | ||
360 | __ret = (rettype)__eax; \ | ||
361 | } \ | ||
362 | __ret; \ | ||
363 | }) | ||
364 | #define __PVOP_VCALL(op, pre, post, ...) \ | ||
365 | ({ \ | ||
366 | unsigned long __eax, __edx, __ecx; \ | ||
367 | asm volatile(pre \ | ||
368 | paravirt_alt(PARAVIRT_CALL) \ | ||
369 | post \ | ||
370 | : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ | ||
371 | : paravirt_type(op), \ | ||
372 | paravirt_clobber(CLBR_ANY), \ | ||
373 | ##__VA_ARGS__ \ | ||
374 | : "memory", "cc"); \ | ||
375 | }) | ||
376 | |||
377 | #define PVOP_CALL0(rettype, op) \ | ||
378 | __PVOP_CALL(rettype, op, "", "") | ||
379 | #define PVOP_VCALL0(op) \ | ||
380 | __PVOP_VCALL(op, "", "") | ||
381 | |||
382 | #define PVOP_CALL1(rettype, op, arg1) \ | ||
383 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1))) | ||
384 | #define PVOP_VCALL1(op, arg1) \ | ||
385 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1))) | ||
386 | |||
387 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ | ||
388 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) | ||
389 | #define PVOP_VCALL2(op, arg1, arg2) \ | ||
390 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) | ||
391 | |||
392 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ | ||
393 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), \ | ||
394 | "1"((u32)(arg2)), "2"((u32)(arg3))) | ||
395 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ | ||
396 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1"((u32)(arg2)), \ | ||
397 | "2"((u32)(arg3))) | ||
398 | |||
399 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | ||
400 | __PVOP_CALL(rettype, op, \ | ||
401 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | ||
402 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | ||
403 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | ||
404 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | ||
405 | __PVOP_VCALL(op, \ | ||
406 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | ||
407 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | ||
408 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | ||
409 | |||
410 | static inline int paravirt_enabled(void) | ||
411 | { | ||
412 | return paravirt_ops.paravirt_enabled; | ||
413 | } | ||
414 | |||
415 | static inline void load_esp0(struct tss_struct *tss, | ||
416 | struct thread_struct *thread) | ||
417 | { | ||
418 | PVOP_VCALL2(load_esp0, tss, thread); | ||
419 | } | ||
420 | |||
421 | #define ARCH_SETUP paravirt_ops.arch_setup(); | ||
422 | static inline unsigned long get_wallclock(void) | ||
423 | { | ||
424 | return PVOP_CALL0(unsigned long, get_wallclock); | ||
425 | } | ||
426 | |||
427 | static inline int set_wallclock(unsigned long nowtime) | ||
428 | { | ||
429 | return PVOP_CALL1(int, set_wallclock, nowtime); | ||
430 | } | ||
431 | |||
432 | static inline void (*choose_time_init(void))(void) | ||
433 | { | ||
434 | return paravirt_ops.time_init; | ||
435 | } | ||
436 | |||
437 | /* The paravirtualized CPUID instruction. */ | ||
438 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | ||
439 | unsigned int *ecx, unsigned int *edx) | ||
440 | { | ||
441 | PVOP_VCALL4(cpuid, eax, ebx, ecx, edx); | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * These special macros can be used to get or set a debugging register | ||
446 | */ | ||
447 | static inline unsigned long paravirt_get_debugreg(int reg) | ||
448 | { | ||
449 | return PVOP_CALL1(unsigned long, get_debugreg, reg); | ||
450 | } | ||
451 | #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) | ||
452 | static inline void set_debugreg(unsigned long val, int reg) | ||
453 | { | ||
454 | PVOP_VCALL2(set_debugreg, reg, val); | ||
455 | } | ||
456 | |||
457 | static inline void clts(void) | ||
458 | { | ||
459 | PVOP_VCALL0(clts); | ||
460 | } | ||
461 | |||
462 | static inline unsigned long read_cr0(void) | ||
463 | { | ||
464 | return PVOP_CALL0(unsigned long, read_cr0); | ||
465 | } | ||
466 | |||
467 | static inline void write_cr0(unsigned long x) | ||
468 | { | ||
469 | PVOP_VCALL1(write_cr0, x); | ||
470 | } | ||
471 | |||
472 | static inline unsigned long read_cr2(void) | ||
473 | { | ||
474 | return PVOP_CALL0(unsigned long, read_cr2); | ||
475 | } | ||
476 | |||
477 | static inline void write_cr2(unsigned long x) | ||
478 | { | ||
479 | PVOP_VCALL1(write_cr2, x); | ||
480 | } | ||
481 | |||
482 | static inline unsigned long read_cr3(void) | ||
483 | { | ||
484 | return PVOP_CALL0(unsigned long, read_cr3); | ||
485 | } | ||
486 | |||
487 | static inline void write_cr3(unsigned long x) | ||
488 | { | ||
489 | PVOP_VCALL1(write_cr3, x); | ||
490 | } | ||
491 | |||
492 | static inline unsigned long read_cr4(void) | ||
493 | { | ||
494 | return PVOP_CALL0(unsigned long, read_cr4); | ||
495 | } | ||
496 | static inline unsigned long read_cr4_safe(void) | ||
497 | { | ||
498 | return PVOP_CALL0(unsigned long, read_cr4_safe); | ||
499 | } | ||
500 | |||
501 | static inline void write_cr4(unsigned long x) | ||
502 | { | ||
503 | PVOP_VCALL1(write_cr4, x); | ||
504 | } | ||
505 | |||
506 | static inline void raw_safe_halt(void) | ||
507 | { | ||
508 | PVOP_VCALL0(safe_halt); | ||
509 | } | ||
510 | |||
511 | static inline void halt(void) | ||
512 | { | ||
513 | PVOP_VCALL0(safe_halt); | ||
514 | } | ||
515 | |||
516 | static inline void wbinvd(void) | ||
517 | { | ||
518 | PVOP_VCALL0(wbinvd); | ||
519 | } | ||
520 | |||
521 | #define get_kernel_rpl() (paravirt_ops.kernel_rpl) | ||
522 | |||
523 | static inline u64 paravirt_read_msr(unsigned msr, int *err) | ||
524 | { | ||
525 | return PVOP_CALL2(u64, read_msr, msr, err); | ||
526 | } | ||
527 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | ||
528 | { | ||
529 | return PVOP_CALL3(int, write_msr, msr, low, high); | ||
530 | } | ||
531 | |||
532 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | ||
533 | #define rdmsr(msr,val1,val2) do { \ | ||
534 | int _err; \ | ||
535 | u64 _l = paravirt_read_msr(msr, &_err); \ | ||
536 | val1 = (u32)_l; \ | ||
537 | val2 = _l >> 32; \ | ||
538 | } while(0) | ||
539 | |||
540 | #define wrmsr(msr,val1,val2) do { \ | ||
541 | paravirt_write_msr(msr, val1, val2); \ | ||
542 | } while(0) | ||
543 | |||
544 | #define rdmsrl(msr,val) do { \ | ||
545 | int _err; \ | ||
546 | val = paravirt_read_msr(msr, &_err); \ | ||
547 | } while(0) | ||
548 | |||
549 | #define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) | ||
550 | #define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b) | ||
551 | |||
552 | /* rdmsr with exception handling */ | ||
553 | #define rdmsr_safe(msr,a,b) ({ \ | ||
554 | int _err; \ | ||
555 | u64 _l = paravirt_read_msr(msr, &_err); \ | ||
556 | (*a) = (u32)_l; \ | ||
557 | (*b) = _l >> 32; \ | ||
558 | _err; }) | ||
559 | |||
560 | |||
561 | static inline u64 paravirt_read_tsc(void) | ||
562 | { | ||
563 | return PVOP_CALL0(u64, read_tsc); | ||
564 | } | ||
565 | |||
566 | #define rdtscl(low) do { \ | ||
567 | u64 _l = paravirt_read_tsc(); \ | ||
568 | low = (int)_l; \ | ||
569 | } while(0) | ||
570 | |||
571 | #define rdtscll(val) (val = paravirt_read_tsc()) | ||
572 | |||
573 | static inline unsigned long long paravirt_sched_clock(void) | ||
574 | { | ||
575 | return PVOP_CALL0(unsigned long long, sched_clock); | ||
576 | } | ||
577 | #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) | ||
578 | |||
579 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | ||
580 | |||
581 | static inline unsigned long long paravirt_read_pmc(int counter) | ||
582 | { | ||
583 | return PVOP_CALL1(u64, read_pmc, counter); | ||
584 | } | ||
585 | |||
586 | #define rdpmc(counter,low,high) do { \ | ||
587 | u64 _l = paravirt_read_pmc(counter); \ | ||
588 | low = (u32)_l; \ | ||
589 | high = _l >> 32; \ | ||
590 | } while(0) | ||
591 | |||
592 | static inline void load_TR_desc(void) | ||
593 | { | ||
594 | PVOP_VCALL0(load_tr_desc); | ||
595 | } | ||
596 | static inline void load_gdt(const struct Xgt_desc_struct *dtr) | ||
597 | { | ||
598 | PVOP_VCALL1(load_gdt, dtr); | ||
599 | } | ||
600 | static inline void load_idt(const struct Xgt_desc_struct *dtr) | ||
601 | { | ||
602 | PVOP_VCALL1(load_idt, dtr); | ||
603 | } | ||
604 | static inline void set_ldt(const void *addr, unsigned entries) | ||
605 | { | ||
606 | PVOP_VCALL2(set_ldt, addr, entries); | ||
607 | } | ||
608 | static inline void store_gdt(struct Xgt_desc_struct *dtr) | ||
609 | { | ||
610 | PVOP_VCALL1(store_gdt, dtr); | ||
611 | } | ||
612 | static inline void store_idt(struct Xgt_desc_struct *dtr) | ||
613 | { | ||
614 | PVOP_VCALL1(store_idt, dtr); | ||
615 | } | ||
616 | static inline unsigned long paravirt_store_tr(void) | ||
617 | { | ||
618 | return PVOP_CALL0(unsigned long, store_tr); | ||
619 | } | ||
620 | #define store_tr(tr) ((tr) = paravirt_store_tr()) | ||
621 | static inline void load_TLS(struct thread_struct *t, unsigned cpu) | ||
622 | { | ||
623 | PVOP_VCALL2(load_tls, t, cpu); | ||
624 | } | ||
625 | static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) | ||
626 | { | ||
627 | PVOP_VCALL4(write_ldt_entry, dt, entry, low, high); | ||
628 | } | ||
629 | static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) | ||
630 | { | ||
631 | PVOP_VCALL4(write_gdt_entry, dt, entry, low, high); | ||
632 | } | ||
633 | static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) | ||
634 | { | ||
635 | PVOP_VCALL4(write_idt_entry, dt, entry, low, high); | ||
636 | } | ||
637 | static inline void set_iopl_mask(unsigned mask) | ||
638 | { | ||
639 | PVOP_VCALL1(set_iopl_mask, mask); | ||
640 | } | ||
641 | |||
642 | /* The paravirtualized I/O functions */ | ||
643 | static inline void slow_down_io(void) { | ||
644 | paravirt_ops.io_delay(); | ||
645 | #ifdef REALLY_SLOW_IO | ||
646 | paravirt_ops.io_delay(); | ||
647 | paravirt_ops.io_delay(); | ||
648 | paravirt_ops.io_delay(); | ||
649 | #endif | ||
650 | } | ||
651 | |||
652 | #ifdef CONFIG_X86_LOCAL_APIC | ||
653 | /* | ||
654 | * Basic functions accessing APICs. | ||
655 | */ | ||
656 | static inline void apic_write(unsigned long reg, unsigned long v) | ||
657 | { | ||
658 | PVOP_VCALL2(apic_write, reg, v); | ||
659 | } | ||
660 | |||
661 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) | ||
662 | { | ||
663 | PVOP_VCALL2(apic_write_atomic, reg, v); | ||
664 | } | ||
665 | |||
666 | static inline unsigned long apic_read(unsigned long reg) | ||
667 | { | ||
668 | return PVOP_CALL1(unsigned long, apic_read, reg); | ||
669 | } | ||
670 | |||
671 | static inline void setup_boot_clock(void) | ||
672 | { | ||
673 | PVOP_VCALL0(setup_boot_clock); | ||
674 | } | ||
675 | |||
676 | static inline void setup_secondary_clock(void) | ||
677 | { | ||
678 | PVOP_VCALL0(setup_secondary_clock); | ||
679 | } | ||
680 | #endif | ||
681 | |||
682 | static inline void paravirt_post_allocator_init(void) | ||
683 | { | ||
684 | if (paravirt_ops.post_allocator_init) | ||
685 | (*paravirt_ops.post_allocator_init)(); | ||
686 | } | ||
687 | |||
688 | static inline void paravirt_pagetable_setup_start(pgd_t *base) | ||
689 | { | ||
690 | if (paravirt_ops.pagetable_setup_start) | ||
691 | (*paravirt_ops.pagetable_setup_start)(base); | ||
692 | } | ||
693 | |||
694 | static inline void paravirt_pagetable_setup_done(pgd_t *base) | ||
695 | { | ||
696 | if (paravirt_ops.pagetable_setup_done) | ||
697 | (*paravirt_ops.pagetable_setup_done)(base); | ||
698 | } | ||
699 | |||
700 | #ifdef CONFIG_SMP | ||
701 | static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, | ||
702 | unsigned long start_esp) | ||
703 | { | ||
704 | PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp); | ||
705 | } | ||
706 | #endif | ||
707 | |||
708 | static inline void paravirt_activate_mm(struct mm_struct *prev, | ||
709 | struct mm_struct *next) | ||
710 | { | ||
711 | PVOP_VCALL2(activate_mm, prev, next); | ||
712 | } | ||
713 | |||
714 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | ||
715 | struct mm_struct *mm) | ||
716 | { | ||
717 | PVOP_VCALL2(dup_mmap, oldmm, mm); | ||
718 | } | ||
719 | |||
720 | static inline void arch_exit_mmap(struct mm_struct *mm) | ||
721 | { | ||
722 | PVOP_VCALL1(exit_mmap, mm); | ||
723 | } | ||
724 | |||
725 | static inline void __flush_tlb(void) | ||
726 | { | ||
727 | PVOP_VCALL0(flush_tlb_user); | ||
728 | } | ||
729 | static inline void __flush_tlb_global(void) | ||
730 | { | ||
731 | PVOP_VCALL0(flush_tlb_kernel); | ||
732 | } | ||
733 | static inline void __flush_tlb_single(unsigned long addr) | ||
734 | { | ||
735 | PVOP_VCALL1(flush_tlb_single, addr); | ||
736 | } | ||
737 | |||
738 | static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | ||
739 | unsigned long va) | ||
740 | { | ||
741 | PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); | ||
742 | } | ||
743 | |||
744 | static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) | ||
745 | { | ||
746 | PVOP_VCALL2(alloc_pt, mm, pfn); | ||
747 | } | ||
748 | static inline void paravirt_release_pt(unsigned pfn) | ||
749 | { | ||
750 | PVOP_VCALL1(release_pt, pfn); | ||
751 | } | ||
752 | |||
753 | static inline void paravirt_alloc_pd(unsigned pfn) | ||
754 | { | ||
755 | PVOP_VCALL1(alloc_pd, pfn); | ||
756 | } | ||
757 | |||
758 | static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, | ||
759 | unsigned start, unsigned count) | ||
760 | { | ||
761 | PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count); | ||
762 | } | ||
763 | static inline void paravirt_release_pd(unsigned pfn) | ||
764 | { | ||
765 | PVOP_VCALL1(release_pd, pfn); | ||
766 | } | ||
767 | |||
768 | #ifdef CONFIG_HIGHPTE | ||
769 | static inline void *kmap_atomic_pte(struct page *page, enum km_type type) | ||
770 | { | ||
771 | unsigned long ret; | ||
772 | ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type); | ||
773 | return (void *)ret; | ||
774 | } | ||
775 | #endif | ||
776 | |||
777 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, | ||
778 | pte_t *ptep) | ||
779 | { | ||
780 | PVOP_VCALL3(pte_update, mm, addr, ptep); | ||
781 | } | ||
782 | |||
783 | static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, | ||
784 | pte_t *ptep) | ||
785 | { | ||
786 | PVOP_VCALL3(pte_update_defer, mm, addr, ptep); | ||
787 | } | ||
788 | |||
789 | #ifdef CONFIG_X86_PAE | ||
790 | static inline pte_t __pte(unsigned long long val) | ||
791 | { | ||
792 | unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte, | ||
793 | val, val >> 32); | ||
794 | return (pte_t) { ret, ret >> 32 }; | ||
795 | } | ||
796 | |||
797 | static inline pmd_t __pmd(unsigned long long val) | ||
798 | { | ||
799 | return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) }; | ||
800 | } | ||
801 | |||
802 | static inline pgd_t __pgd(unsigned long long val) | ||
803 | { | ||
804 | return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) }; | ||
805 | } | ||
806 | |||
807 | static inline unsigned long long pte_val(pte_t x) | ||
808 | { | ||
809 | return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high); | ||
810 | } | ||
811 | |||
812 | static inline unsigned long long pmd_val(pmd_t x) | ||
813 | { | ||
814 | return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32); | ||
815 | } | ||
816 | |||
817 | static inline unsigned long long pgd_val(pgd_t x) | ||
818 | { | ||
819 | return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32); | ||
820 | } | ||
821 | |||
822 | static inline void set_pte(pte_t *ptep, pte_t pteval) | ||
823 | { | ||
824 | PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high); | ||
825 | } | ||
826 | |||
827 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
828 | pte_t *ptep, pte_t pteval) | ||
829 | { | ||
830 | /* 5 arg words */ | ||
831 | paravirt_ops.set_pte_at(mm, addr, ptep, pteval); | ||
832 | } | ||
833 | |||
834 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) | ||
835 | { | ||
836 | PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high); | ||
837 | } | ||
838 | |||
839 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, | ||
840 | pte_t *ptep, pte_t pte) | ||
841 | { | ||
842 | /* 5 arg words */ | ||
843 | paravirt_ops.set_pte_present(mm, addr, ptep, pte); | ||
844 | } | ||
845 | |||
846 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
847 | { | ||
848 | PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32); | ||
849 | } | ||
850 | |||
851 | static inline void set_pud(pud_t *pudp, pud_t pudval) | ||
852 | { | ||
853 | PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32); | ||
854 | } | ||
855 | |||
856 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
857 | { | ||
858 | PVOP_VCALL3(pte_clear, mm, addr, ptep); | ||
859 | } | ||
860 | |||
861 | static inline void pmd_clear(pmd_t *pmdp) | ||
862 | { | ||
863 | PVOP_VCALL1(pmd_clear, pmdp); | ||
864 | } | ||
865 | |||
866 | #else /* !CONFIG_X86_PAE */ | ||
867 | |||
868 | static inline pte_t __pte(unsigned long val) | ||
869 | { | ||
870 | return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) }; | ||
871 | } | ||
872 | |||
873 | static inline pgd_t __pgd(unsigned long val) | ||
874 | { | ||
875 | return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) }; | ||
876 | } | ||
877 | |||
878 | static inline unsigned long pte_val(pte_t x) | ||
879 | { | ||
880 | return PVOP_CALL1(unsigned long, pte_val, x.pte_low); | ||
881 | } | ||
882 | |||
883 | static inline unsigned long pgd_val(pgd_t x) | ||
884 | { | ||
885 | return PVOP_CALL1(unsigned long, pgd_val, x.pgd); | ||
886 | } | ||
887 | |||
888 | static inline void set_pte(pte_t *ptep, pte_t pteval) | ||
889 | { | ||
890 | PVOP_VCALL2(set_pte, ptep, pteval.pte_low); | ||
891 | } | ||
892 | |||
893 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
894 | pte_t *ptep, pte_t pteval) | ||
895 | { | ||
896 | PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low); | ||
897 | } | ||
898 | |||
899 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
900 | { | ||
901 | PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd); | ||
902 | } | ||
903 | #endif /* CONFIG_X86_PAE */ | ||
904 | |||
905 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE | ||
906 | static inline void arch_enter_lazy_cpu_mode(void) | ||
907 | { | ||
908 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU); | ||
909 | } | ||
910 | |||
911 | static inline void arch_leave_lazy_cpu_mode(void) | ||
912 | { | ||
913 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); | ||
914 | } | ||
915 | |||
916 | static inline void arch_flush_lazy_cpu_mode(void) | ||
917 | { | ||
918 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); | ||
919 | } | ||
920 | |||
921 | |||
922 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
923 | static inline void arch_enter_lazy_mmu_mode(void) | ||
924 | { | ||
925 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU); | ||
926 | } | ||
927 | |||
928 | static inline void arch_leave_lazy_mmu_mode(void) | ||
929 | { | ||
930 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); | ||
931 | } | ||
932 | |||
933 | static inline void arch_flush_lazy_mmu_mode(void) | ||
934 | { | ||
935 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); | ||
936 | } | ||
937 | |||
938 | void _paravirt_nop(void); | ||
939 | #define paravirt_nop ((void *)_paravirt_nop) | ||
940 | |||
941 | /* These all sit in the .parainstructions section to tell us what to patch. */ | ||
942 | struct paravirt_patch_site { | ||
943 | u8 *instr; /* original instructions */ | ||
944 | u8 instrtype; /* type of this instruction */ | ||
945 | u8 len; /* length of original instruction */ | ||
946 | u16 clobbers; /* what registers you may clobber */ | ||
947 | }; | ||
948 | |||
949 | extern struct paravirt_patch_site __parainstructions[], | ||
950 | __parainstructions_end[]; | ||
951 | |||
952 | static inline unsigned long __raw_local_save_flags(void) | ||
953 | { | ||
954 | unsigned long f; | ||
955 | |||
956 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | ||
957 | PARAVIRT_CALL | ||
958 | "popl %%edx; popl %%ecx") | ||
959 | : "=a"(f) | ||
960 | : paravirt_type(save_fl), | ||
961 | paravirt_clobber(CLBR_EAX) | ||
962 | : "memory", "cc"); | ||
963 | return f; | ||
964 | } | ||
965 | |||
966 | static inline void raw_local_irq_restore(unsigned long f) | ||
967 | { | ||
968 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | ||
969 | PARAVIRT_CALL | ||
970 | "popl %%edx; popl %%ecx") | ||
971 | : "=a"(f) | ||
972 | : "0"(f), | ||
973 | paravirt_type(restore_fl), | ||
974 | paravirt_clobber(CLBR_EAX) | ||
975 | : "memory", "cc"); | ||
976 | } | ||
977 | |||
978 | static inline void raw_local_irq_disable(void) | ||
979 | { | ||
980 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | ||
981 | PARAVIRT_CALL | ||
982 | "popl %%edx; popl %%ecx") | ||
983 | : | ||
984 | : paravirt_type(irq_disable), | ||
985 | paravirt_clobber(CLBR_EAX) | ||
986 | : "memory", "eax", "cc"); | ||
987 | } | ||
988 | |||
989 | static inline void raw_local_irq_enable(void) | ||
990 | { | ||
991 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | ||
992 | PARAVIRT_CALL | ||
993 | "popl %%edx; popl %%ecx") | ||
994 | : | ||
995 | : paravirt_type(irq_enable), | ||
996 | paravirt_clobber(CLBR_EAX) | ||
997 | : "memory", "eax", "cc"); | ||
998 | } | ||
999 | |||
1000 | static inline unsigned long __raw_local_irq_save(void) | ||
1001 | { | ||
1002 | unsigned long f; | ||
1003 | |||
1004 | f = __raw_local_save_flags(); | ||
1005 | raw_local_irq_disable(); | ||
1006 | return f; | ||
1007 | } | ||
1008 | |||
1009 | #define CLI_STRING \ | ||
1010 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
1011 | "call *paravirt_ops+%c[paravirt_cli_type]*4;" \ | ||
1012 | "popl %%edx; popl %%ecx", \ | ||
1013 | "%c[paravirt_cli_type]", "%c[paravirt_clobber]") | ||
1014 | |||
1015 | #define STI_STRING \ | ||
1016 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
1017 | "call *paravirt_ops+%c[paravirt_sti_type]*4;" \ | ||
1018 | "popl %%edx; popl %%ecx", \ | ||
1019 | "%c[paravirt_sti_type]", "%c[paravirt_clobber]") | ||
1020 | |||
1021 | #define CLI_STI_CLOBBERS , "%eax" | ||
1022 | #define CLI_STI_INPUT_ARGS \ | ||
1023 | , \ | ||
1024 | [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)), \ | ||
1025 | [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)), \ | ||
1026 | paravirt_clobber(CLBR_EAX) | ||
1027 | |||
1028 | /* Make sure as little as possible of this mess escapes. */ | ||
1029 | #undef PARAVIRT_CALL | ||
1030 | #undef __PVOP_CALL | ||
1031 | #undef __PVOP_VCALL | ||
1032 | #undef PVOP_VCALL0 | ||
1033 | #undef PVOP_CALL0 | ||
1034 | #undef PVOP_VCALL1 | ||
1035 | #undef PVOP_CALL1 | ||
1036 | #undef PVOP_VCALL2 | ||
1037 | #undef PVOP_CALL2 | ||
1038 | #undef PVOP_VCALL3 | ||
1039 | #undef PVOP_CALL3 | ||
1040 | #undef PVOP_VCALL4 | ||
1041 | #undef PVOP_CALL4 | ||
1042 | |||
1043 | #else /* __ASSEMBLY__ */ | ||
1044 | |||
1045 | #define PARA_PATCH(off) ((off) / 4) | ||
1046 | |||
1047 | #define PARA_SITE(ptype, clobbers, ops) \ | ||
1048 | 771:; \ | ||
1049 | ops; \ | ||
1050 | 772:; \ | ||
1051 | .pushsection .parainstructions,"a"; \ | ||
1052 | .long 771b; \ | ||
1053 | .byte ptype; \ | ||
1054 | .byte 772b-771b; \ | ||
1055 | .short clobbers; \ | ||
1056 | .popsection | ||
1057 | |||
1058 | #define INTERRUPT_RETURN \ | ||
1059 | PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \ | ||
1060 | jmp *%cs:paravirt_ops+PARAVIRT_iret) | ||
1061 | |||
1062 | #define DISABLE_INTERRUPTS(clobbers) \ | ||
1063 | PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \ | ||
1064 | pushl %eax; pushl %ecx; pushl %edx; \ | ||
1065 | call *%cs:paravirt_ops+PARAVIRT_irq_disable; \ | ||
1066 | popl %edx; popl %ecx; popl %eax) \ | ||
1067 | |||
1068 | #define ENABLE_INTERRUPTS(clobbers) \ | ||
1069 | PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \ | ||
1070 | pushl %eax; pushl %ecx; pushl %edx; \ | ||
1071 | call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ | ||
1072 | popl %edx; popl %ecx; popl %eax) | ||
1073 | |||
1074 | #define ENABLE_INTERRUPTS_SYSEXIT \ | ||
1075 | PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \ | ||
1076 | jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) | ||
1077 | |||
1078 | #define GET_CR0_INTO_EAX \ | ||
1079 | push %ecx; push %edx; \ | ||
1080 | call *paravirt_ops+PARAVIRT_read_cr0; \ | ||
1081 | pop %edx; pop %ecx | ||
1082 | |||
1083 | #endif /* __ASSEMBLY__ */ | ||
1084 | #endif /* CONFIG_PARAVIRT */ | ||
1085 | #endif /* __ASM_PARAVIRT_H */ | ||