diff options
Diffstat (limited to 'include/asm-x86/paravirt.h')
-rw-r--r-- | include/asm-x86/paravirt.h | 615 |
1 files changed, 432 insertions, 183 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index f59d370c5df4..d6236eb46466 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -5,22 +5,37 @@ | |||
5 | 5 | ||
6 | #ifdef CONFIG_PARAVIRT | 6 | #ifdef CONFIG_PARAVIRT |
7 | #include <asm/page.h> | 7 | #include <asm/page.h> |
8 | #include <asm/asm.h> | ||
8 | 9 | ||
9 | /* Bitmask of what can be clobbered: usually at least eax. */ | 10 | /* Bitmask of what can be clobbered: usually at least eax. */ |
10 | #define CLBR_NONE 0x0 | 11 | #define CLBR_NONE 0 |
11 | #define CLBR_EAX 0x1 | 12 | #define CLBR_EAX (1 << 0) |
12 | #define CLBR_ECX 0x2 | 13 | #define CLBR_ECX (1 << 1) |
13 | #define CLBR_EDX 0x4 | 14 | #define CLBR_EDX (1 << 2) |
14 | #define CLBR_ANY 0x7 | 15 | |
16 | #ifdef CONFIG_X86_64 | ||
17 | #define CLBR_RSI (1 << 3) | ||
18 | #define CLBR_RDI (1 << 4) | ||
19 | #define CLBR_R8 (1 << 5) | ||
20 | #define CLBR_R9 (1 << 6) | ||
21 | #define CLBR_R10 (1 << 7) | ||
22 | #define CLBR_R11 (1 << 8) | ||
23 | #define CLBR_ANY ((1 << 9) - 1) | ||
24 | #include <asm/desc_defs.h> | ||
25 | #else | ||
26 | /* CLBR_ANY should match all regs platform has. For i386, that's just it */ | ||
27 | #define CLBR_ANY ((1 << 3) - 1) | ||
28 | #endif /* X86_64 */ | ||
15 | 29 | ||
16 | #ifndef __ASSEMBLY__ | 30 | #ifndef __ASSEMBLY__ |
17 | #include <linux/types.h> | 31 | #include <linux/types.h> |
18 | #include <linux/cpumask.h> | 32 | #include <linux/cpumask.h> |
19 | #include <asm/kmap_types.h> | 33 | #include <asm/kmap_types.h> |
34 | #include <asm/desc_defs.h> | ||
20 | 35 | ||
21 | struct page; | 36 | struct page; |
22 | struct thread_struct; | 37 | struct thread_struct; |
23 | struct Xgt_desc_struct; | 38 | struct desc_ptr; |
24 | struct tss_struct; | 39 | struct tss_struct; |
25 | struct mm_struct; | 40 | struct mm_struct; |
26 | struct desc_struct; | 41 | struct desc_struct; |
@@ -86,22 +101,27 @@ struct pv_cpu_ops { | |||
86 | unsigned long (*read_cr4)(void); | 101 | unsigned long (*read_cr4)(void); |
87 | void (*write_cr4)(unsigned long); | 102 | void (*write_cr4)(unsigned long); |
88 | 103 | ||
104 | #ifdef CONFIG_X86_64 | ||
105 | unsigned long (*read_cr8)(void); | ||
106 | void (*write_cr8)(unsigned long); | ||
107 | #endif | ||
108 | |||
89 | /* Segment descriptor handling */ | 109 | /* Segment descriptor handling */ |
90 | void (*load_tr_desc)(void); | 110 | void (*load_tr_desc)(void); |
91 | void (*load_gdt)(const struct Xgt_desc_struct *); | 111 | void (*load_gdt)(const struct desc_ptr *); |
92 | void (*load_idt)(const struct Xgt_desc_struct *); | 112 | void (*load_idt)(const struct desc_ptr *); |
93 | void (*store_gdt)(struct Xgt_desc_struct *); | 113 | void (*store_gdt)(struct desc_ptr *); |
94 | void (*store_idt)(struct Xgt_desc_struct *); | 114 | void (*store_idt)(struct desc_ptr *); |
95 | void (*set_ldt)(const void *desc, unsigned entries); | 115 | void (*set_ldt)(const void *desc, unsigned entries); |
96 | unsigned long (*store_tr)(void); | 116 | unsigned long (*store_tr)(void); |
97 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); | 117 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); |
98 | void (*write_ldt_entry)(struct desc_struct *, | 118 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, |
99 | int entrynum, u32 low, u32 high); | 119 | const void *desc); |
100 | void (*write_gdt_entry)(struct desc_struct *, | 120 | void (*write_gdt_entry)(struct desc_struct *, |
101 | int entrynum, u32 low, u32 high); | 121 | int entrynum, const void *desc, int size); |
102 | void (*write_idt_entry)(struct desc_struct *, | 122 | void (*write_idt_entry)(gate_desc *, |
103 | int entrynum, u32 low, u32 high); | 123 | int entrynum, const gate_desc *gate); |
104 | void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); | 124 | void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); |
105 | 125 | ||
106 | void (*set_iopl_mask)(unsigned mask); | 126 | void (*set_iopl_mask)(unsigned mask); |
107 | 127 | ||
@@ -115,15 +135,18 @@ struct pv_cpu_ops { | |||
115 | /* MSR, PMC and TSR operations. | 135 | /* MSR, PMC and TSR operations. |
116 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | 136 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ |
117 | u64 (*read_msr)(unsigned int msr, int *err); | 137 | u64 (*read_msr)(unsigned int msr, int *err); |
118 | int (*write_msr)(unsigned int msr, u64 val); | 138 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
119 | 139 | ||
120 | u64 (*read_tsc)(void); | 140 | u64 (*read_tsc)(void); |
121 | u64 (*read_pmc)(void); | 141 | u64 (*read_pmc)(int counter); |
142 | unsigned long long (*read_tscp)(unsigned int *aux); | ||
122 | 143 | ||
123 | /* These two are jmp to, not actually called. */ | 144 | /* These two are jmp to, not actually called. */ |
124 | void (*irq_enable_sysexit)(void); | 145 | void (*irq_enable_syscall_ret)(void); |
125 | void (*iret)(void); | 146 | void (*iret)(void); |
126 | 147 | ||
148 | void (*swapgs)(void); | ||
149 | |||
127 | struct pv_lazy_ops lazy_mode; | 150 | struct pv_lazy_ops lazy_mode; |
128 | }; | 151 | }; |
129 | 152 | ||
@@ -150,9 +173,9 @@ struct pv_apic_ops { | |||
150 | * Direct APIC operations, principally for VMI. Ideally | 173 | * Direct APIC operations, principally for VMI. Ideally |
151 | * these shouldn't be in this interface. | 174 | * these shouldn't be in this interface. |
152 | */ | 175 | */ |
153 | void (*apic_write)(unsigned long reg, unsigned long v); | 176 | void (*apic_write)(unsigned long reg, u32 v); |
154 | void (*apic_write_atomic)(unsigned long reg, unsigned long v); | 177 | void (*apic_write_atomic)(unsigned long reg, u32 v); |
155 | unsigned long (*apic_read)(unsigned long reg); | 178 | u32 (*apic_read)(unsigned long reg); |
156 | void (*setup_boot_clock)(void); | 179 | void (*setup_boot_clock)(void); |
157 | void (*setup_secondary_clock)(void); | 180 | void (*setup_secondary_clock)(void); |
158 | 181 | ||
@@ -198,7 +221,7 @@ struct pv_mmu_ops { | |||
198 | 221 | ||
199 | /* Hooks for allocating/releasing pagetable pages */ | 222 | /* Hooks for allocating/releasing pagetable pages */ |
200 | void (*alloc_pt)(struct mm_struct *mm, u32 pfn); | 223 | void (*alloc_pt)(struct mm_struct *mm, u32 pfn); |
201 | void (*alloc_pd)(u32 pfn); | 224 | void (*alloc_pd)(struct mm_struct *mm, u32 pfn); |
202 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | 225 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); |
203 | void (*release_pt)(u32 pfn); | 226 | void (*release_pt)(u32 pfn); |
204 | void (*release_pd)(u32 pfn); | 227 | void (*release_pd)(u32 pfn); |
@@ -212,28 +235,34 @@ struct pv_mmu_ops { | |||
212 | void (*pte_update_defer)(struct mm_struct *mm, | 235 | void (*pte_update_defer)(struct mm_struct *mm, |
213 | unsigned long addr, pte_t *ptep); | 236 | unsigned long addr, pte_t *ptep); |
214 | 237 | ||
238 | pteval_t (*pte_val)(pte_t); | ||
239 | pte_t (*make_pte)(pteval_t pte); | ||
240 | |||
241 | pgdval_t (*pgd_val)(pgd_t); | ||
242 | pgd_t (*make_pgd)(pgdval_t pgd); | ||
243 | |||
244 | #if PAGETABLE_LEVELS >= 3 | ||
215 | #ifdef CONFIG_X86_PAE | 245 | #ifdef CONFIG_X86_PAE |
216 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | 246 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); |
217 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, | 247 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, |
218 | pte_t *ptep, pte_t pte); | 248 | pte_t *ptep, pte_t pte); |
219 | void (*set_pud)(pud_t *pudp, pud_t pudval); | ||
220 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 249 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
221 | void (*pmd_clear)(pmd_t *pmdp); | 250 | void (*pmd_clear)(pmd_t *pmdp); |
222 | 251 | ||
223 | unsigned long long (*pte_val)(pte_t); | 252 | #endif /* CONFIG_X86_PAE */ |
224 | unsigned long long (*pmd_val)(pmd_t); | ||
225 | unsigned long long (*pgd_val)(pgd_t); | ||
226 | 253 | ||
227 | pte_t (*make_pte)(unsigned long long pte); | 254 | void (*set_pud)(pud_t *pudp, pud_t pudval); |
228 | pmd_t (*make_pmd)(unsigned long long pmd); | ||
229 | pgd_t (*make_pgd)(unsigned long long pgd); | ||
230 | #else | ||
231 | unsigned long (*pte_val)(pte_t); | ||
232 | unsigned long (*pgd_val)(pgd_t); | ||
233 | 255 | ||
234 | pte_t (*make_pte)(unsigned long pte); | 256 | pmdval_t (*pmd_val)(pmd_t); |
235 | pgd_t (*make_pgd)(unsigned long pgd); | 257 | pmd_t (*make_pmd)(pmdval_t pmd); |
236 | #endif | 258 | |
259 | #if PAGETABLE_LEVELS == 4 | ||
260 | pudval_t (*pud_val)(pud_t); | ||
261 | pud_t (*make_pud)(pudval_t pud); | ||
262 | |||
263 | void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); | ||
264 | #endif /* PAGETABLE_LEVELS == 4 */ | ||
265 | #endif /* PAGETABLE_LEVELS >= 3 */ | ||
237 | 266 | ||
238 | #ifdef CONFIG_HIGHPTE | 267 | #ifdef CONFIG_HIGHPTE |
239 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | 268 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); |
@@ -279,7 +308,8 @@ extern struct pv_mmu_ops pv_mmu_ops; | |||
279 | #define _paravirt_alt(insn_string, type, clobber) \ | 308 | #define _paravirt_alt(insn_string, type, clobber) \ |
280 | "771:\n\t" insn_string "\n" "772:\n" \ | 309 | "771:\n\t" insn_string "\n" "772:\n" \ |
281 | ".pushsection .parainstructions,\"a\"\n" \ | 310 | ".pushsection .parainstructions,\"a\"\n" \ |
282 | " .long 771b\n" \ | 311 | _ASM_ALIGN "\n" \ |
312 | _ASM_PTR " 771b\n" \ | ||
283 | " .byte " type "\n" \ | 313 | " .byte " type "\n" \ |
284 | " .byte 772b-771b\n" \ | 314 | " .byte 772b-771b\n" \ |
285 | " .short " clobber "\n" \ | 315 | " .short " clobber "\n" \ |
@@ -289,6 +319,11 @@ extern struct pv_mmu_ops pv_mmu_ops; | |||
289 | #define paravirt_alt(insn_string) \ | 319 | #define paravirt_alt(insn_string) \ |
290 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") | 320 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") |
291 | 321 | ||
322 | /* Simple instruction patching code. */ | ||
323 | #define DEF_NATIVE(ops, name, code) \ | ||
324 | extern const char start_##ops##_##name[], end_##ops##_##name[]; \ | ||
325 | asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") | ||
326 | |||
292 | unsigned paravirt_patch_nop(void); | 327 | unsigned paravirt_patch_nop(void); |
293 | unsigned paravirt_patch_ignore(unsigned len); | 328 | unsigned paravirt_patch_ignore(unsigned len); |
294 | unsigned paravirt_patch_call(void *insnbuf, | 329 | unsigned paravirt_patch_call(void *insnbuf, |
@@ -303,6 +338,9 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | |||
303 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | 338 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, |
304 | const char *start, const char *end); | 339 | const char *start, const char *end); |
305 | 340 | ||
341 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | ||
342 | unsigned long addr, unsigned len); | ||
343 | |||
306 | int paravirt_disable_iospace(void); | 344 | int paravirt_disable_iospace(void); |
307 | 345 | ||
308 | /* | 346 | /* |
@@ -319,7 +357,7 @@ int paravirt_disable_iospace(void); | |||
319 | * runtime. | 357 | * runtime. |
320 | * | 358 | * |
321 | * Normally, a call to a pv_op function is a simple indirect call: | 359 | * Normally, a call to a pv_op function is a simple indirect call: |
322 | * (paravirt_ops.operations)(args...). | 360 | * (pv_op_struct.operations)(args...). |
323 | * | 361 | * |
324 | * Unfortunately, this is a relatively slow operation for modern CPUs, | 362 | * Unfortunately, this is a relatively slow operation for modern CPUs, |
325 | * because it cannot necessarily determine what the destination | 363 | * because it cannot necessarily determine what the destination |
@@ -329,11 +367,17 @@ int paravirt_disable_iospace(void); | |||
329 | * calls are essentially free, because the call and return addresses | 367 | * calls are essentially free, because the call and return addresses |
330 | * are completely predictable.) | 368 | * are completely predictable.) |
331 | * | 369 | * |
332 | * These macros rely on the standard gcc "regparm(3)" calling | 370 | * For i386, these macros rely on the standard gcc "regparm(3)" calling |
333 | * convention, in which the first three arguments are placed in %eax, | 371 | * convention, in which the first three arguments are placed in %eax, |
334 | * %edx, %ecx (in that order), and the remaining arguments are placed | 372 | * %edx, %ecx (in that order), and the remaining arguments are placed |
335 | * on the stack. All caller-save registers (eax,edx,ecx) are expected | 373 | * on the stack. All caller-save registers (eax,edx,ecx) are expected |
336 | * to be modified (either clobbered or used for return values). | 374 | * to be modified (either clobbered or used for return values). |
375 | * X86_64, on the other hand, already specifies a register-based calling | ||
376 | * conventions, returning at %rax, with parameteres going on %rdi, %rsi, | ||
377 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any | ||
378 | * special handling for dealing with 4 arguments, unlike i386. | ||
379 | * However, x86_64 also have to clobber all caller saved registers, which | ||
380 | * unfortunately, are quite a bit (r8 - r11) | ||
337 | * | 381 | * |
338 | * The call instruction itself is marked by placing its start address | 382 | * The call instruction itself is marked by placing its start address |
339 | * and size into the .parainstructions section, so that | 383 | * and size into the .parainstructions section, so that |
@@ -356,10 +400,12 @@ int paravirt_disable_iospace(void); | |||
356 | * the return type. The macro then uses sizeof() on that type to | 400 | * the return type. The macro then uses sizeof() on that type to |
357 | * determine whether its a 32 or 64 bit value, and places the return | 401 | * determine whether its a 32 or 64 bit value, and places the return |
358 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for | 402 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for |
359 | * 64-bit). | 403 | * 64-bit). For x86_64 machines, it just returns at %rax regardless of |
404 | * the return value size. | ||
360 | * | 405 | * |
361 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments | 406 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments |
362 | * in low,high order. | 407 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments |
408 | * in low,high order | ||
363 | * | 409 | * |
364 | * Small structures are passed and returned in registers. The macro | 410 | * Small structures are passed and returned in registers. The macro |
365 | * calling convention can't directly deal with this, so the wrapper | 411 | * calling convention can't directly deal with this, so the wrapper |
@@ -369,46 +415,67 @@ int paravirt_disable_iospace(void); | |||
369 | * means that all uses must be wrapped in inline functions. This also | 415 | * means that all uses must be wrapped in inline functions. This also |
370 | * makes sure the incoming and outgoing types are always correct. | 416 | * makes sure the incoming and outgoing types are always correct. |
371 | */ | 417 | */ |
418 | #ifdef CONFIG_X86_32 | ||
419 | #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx | ||
420 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS | ||
421 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ | ||
422 | "=c" (__ecx) | ||
423 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS | ||
424 | #define EXTRA_CLOBBERS | ||
425 | #define VEXTRA_CLOBBERS | ||
426 | #else | ||
427 | #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx | ||
428 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax | ||
429 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ | ||
430 | "=S" (__esi), "=d" (__edx), \ | ||
431 | "=c" (__ecx) | ||
432 | |||
433 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) | ||
434 | |||
435 | #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" | ||
436 | #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" | ||
437 | #endif | ||
438 | |||
372 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ | 439 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ |
373 | ({ \ | 440 | ({ \ |
374 | rettype __ret; \ | 441 | rettype __ret; \ |
375 | unsigned long __eax, __edx, __ecx; \ | 442 | PVOP_CALL_ARGS; \ |
443 | /* This is 32-bit specific, but is okay in 64-bit */ \ | ||
444 | /* since this condition will never hold */ \ | ||
376 | if (sizeof(rettype) > sizeof(unsigned long)) { \ | 445 | if (sizeof(rettype) > sizeof(unsigned long)) { \ |
377 | asm volatile(pre \ | 446 | asm volatile(pre \ |
378 | paravirt_alt(PARAVIRT_CALL) \ | 447 | paravirt_alt(PARAVIRT_CALL) \ |
379 | post \ | 448 | post \ |
380 | : "=a" (__eax), "=d" (__edx), \ | 449 | : PVOP_CALL_CLOBBERS \ |
381 | "=c" (__ecx) \ | ||
382 | : paravirt_type(op), \ | 450 | : paravirt_type(op), \ |
383 | paravirt_clobber(CLBR_ANY), \ | 451 | paravirt_clobber(CLBR_ANY), \ |
384 | ##__VA_ARGS__ \ | 452 | ##__VA_ARGS__ \ |
385 | : "memory", "cc"); \ | 453 | : "memory", "cc" EXTRA_CLOBBERS); \ |
386 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ | 454 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ |
387 | } else { \ | 455 | } else { \ |
388 | asm volatile(pre \ | 456 | asm volatile(pre \ |
389 | paravirt_alt(PARAVIRT_CALL) \ | 457 | paravirt_alt(PARAVIRT_CALL) \ |
390 | post \ | 458 | post \ |
391 | : "=a" (__eax), "=d" (__edx), \ | 459 | : PVOP_CALL_CLOBBERS \ |
392 | "=c" (__ecx) \ | ||
393 | : paravirt_type(op), \ | 460 | : paravirt_type(op), \ |
394 | paravirt_clobber(CLBR_ANY), \ | 461 | paravirt_clobber(CLBR_ANY), \ |
395 | ##__VA_ARGS__ \ | 462 | ##__VA_ARGS__ \ |
396 | : "memory", "cc"); \ | 463 | : "memory", "cc" EXTRA_CLOBBERS); \ |
397 | __ret = (rettype)__eax; \ | 464 | __ret = (rettype)__eax; \ |
398 | } \ | 465 | } \ |
399 | __ret; \ | 466 | __ret; \ |
400 | }) | 467 | }) |
401 | #define __PVOP_VCALL(op, pre, post, ...) \ | 468 | #define __PVOP_VCALL(op, pre, post, ...) \ |
402 | ({ \ | 469 | ({ \ |
403 | unsigned long __eax, __edx, __ecx; \ | 470 | PVOP_VCALL_ARGS; \ |
404 | asm volatile(pre \ | 471 | asm volatile(pre \ |
405 | paravirt_alt(PARAVIRT_CALL) \ | 472 | paravirt_alt(PARAVIRT_CALL) \ |
406 | post \ | 473 | post \ |
407 | : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ | 474 | : PVOP_VCALL_CLOBBERS \ |
408 | : paravirt_type(op), \ | 475 | : paravirt_type(op), \ |
409 | paravirt_clobber(CLBR_ANY), \ | 476 | paravirt_clobber(CLBR_ANY), \ |
410 | ##__VA_ARGS__ \ | 477 | ##__VA_ARGS__ \ |
411 | : "memory", "cc"); \ | 478 | : "memory", "cc" VEXTRA_CLOBBERS); \ |
412 | }) | 479 | }) |
413 | 480 | ||
414 | #define PVOP_CALL0(rettype, op) \ | 481 | #define PVOP_CALL0(rettype, op) \ |
@@ -417,22 +484,26 @@ int paravirt_disable_iospace(void); | |||
417 | __PVOP_VCALL(op, "", "") | 484 | __PVOP_VCALL(op, "", "") |
418 | 485 | ||
419 | #define PVOP_CALL1(rettype, op, arg1) \ | 486 | #define PVOP_CALL1(rettype, op, arg1) \ |
420 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1))) | 487 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) |
421 | #define PVOP_VCALL1(op, arg1) \ | 488 | #define PVOP_VCALL1(op, arg1) \ |
422 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1))) | 489 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) |
423 | 490 | ||
424 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ | 491 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ |
425 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) | 492 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ |
493 | "1" ((unsigned long)(arg2))) | ||
426 | #define PVOP_VCALL2(op, arg1, arg2) \ | 494 | #define PVOP_VCALL2(op, arg1, arg2) \ |
427 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) | 495 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ |
496 | "1" ((unsigned long)(arg2))) | ||
428 | 497 | ||
429 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ | 498 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ |
430 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), \ | 499 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ |
431 | "1"((u32)(arg2)), "2"((u32)(arg3))) | 500 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) |
432 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ | 501 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ |
433 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1"((u32)(arg2)), \ | 502 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ |
434 | "2"((u32)(arg3))) | 503 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) |
435 | 504 | ||
505 | /* This is the only difference in x86_64. We can make it much simpler */ | ||
506 | #ifdef CONFIG_X86_32 | ||
436 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | 507 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ |
437 | __PVOP_CALL(rettype, op, \ | 508 | __PVOP_CALL(rettype, op, \ |
438 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | 509 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ |
@@ -443,16 +514,26 @@ int paravirt_disable_iospace(void); | |||
443 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | 514 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ |
444 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | 515 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ |
445 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | 516 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) |
517 | #else | ||
518 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | ||
519 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ | ||
520 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ | ||
521 | "3"((unsigned long)(arg4))) | ||
522 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | ||
523 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ | ||
524 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ | ||
525 | "3"((unsigned long)(arg4))) | ||
526 | #endif | ||
446 | 527 | ||
447 | static inline int paravirt_enabled(void) | 528 | static inline int paravirt_enabled(void) |
448 | { | 529 | { |
449 | return pv_info.paravirt_enabled; | 530 | return pv_info.paravirt_enabled; |
450 | } | 531 | } |
451 | 532 | ||
452 | static inline void load_esp0(struct tss_struct *tss, | 533 | static inline void load_sp0(struct tss_struct *tss, |
453 | struct thread_struct *thread) | 534 | struct thread_struct *thread) |
454 | { | 535 | { |
455 | PVOP_VCALL2(pv_cpu_ops.load_esp0, tss, thread); | 536 | PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); |
456 | } | 537 | } |
457 | 538 | ||
458 | #define ARCH_SETUP pv_init_ops.arch_setup(); | 539 | #define ARCH_SETUP pv_init_ops.arch_setup(); |
@@ -540,6 +621,18 @@ static inline void write_cr4(unsigned long x) | |||
540 | PVOP_VCALL1(pv_cpu_ops.write_cr4, x); | 621 | PVOP_VCALL1(pv_cpu_ops.write_cr4, x); |
541 | } | 622 | } |
542 | 623 | ||
624 | #ifdef CONFIG_X86_64 | ||
625 | static inline unsigned long read_cr8(void) | ||
626 | { | ||
627 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); | ||
628 | } | ||
629 | |||
630 | static inline void write_cr8(unsigned long x) | ||
631 | { | ||
632 | PVOP_VCALL1(pv_cpu_ops.write_cr8, x); | ||
633 | } | ||
634 | #endif | ||
635 | |||
543 | static inline void raw_safe_halt(void) | 636 | static inline void raw_safe_halt(void) |
544 | { | 637 | { |
545 | PVOP_VCALL0(pv_irq_ops.safe_halt); | 638 | PVOP_VCALL0(pv_irq_ops.safe_halt); |
@@ -613,8 +706,6 @@ static inline unsigned long long paravirt_sched_clock(void) | |||
613 | } | 706 | } |
614 | #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz()) | 707 | #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz()) |
615 | 708 | ||
616 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | ||
617 | |||
618 | static inline unsigned long long paravirt_read_pmc(int counter) | 709 | static inline unsigned long long paravirt_read_pmc(int counter) |
619 | { | 710 | { |
620 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); | 711 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); |
@@ -626,15 +717,36 @@ static inline unsigned long long paravirt_read_pmc(int counter) | |||
626 | high = _l >> 32; \ | 717 | high = _l >> 32; \ |
627 | } while(0) | 718 | } while(0) |
628 | 719 | ||
720 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) | ||
721 | { | ||
722 | return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); | ||
723 | } | ||
724 | |||
725 | #define rdtscp(low, high, aux) \ | ||
726 | do { \ | ||
727 | int __aux; \ | ||
728 | unsigned long __val = paravirt_rdtscp(&__aux); \ | ||
729 | (low) = (u32)__val; \ | ||
730 | (high) = (u32)(__val >> 32); \ | ||
731 | (aux) = __aux; \ | ||
732 | } while (0) | ||
733 | |||
734 | #define rdtscpll(val, aux) \ | ||
735 | do { \ | ||
736 | unsigned long __aux; \ | ||
737 | val = paravirt_rdtscp(&__aux); \ | ||
738 | (aux) = __aux; \ | ||
739 | } while (0) | ||
740 | |||
629 | static inline void load_TR_desc(void) | 741 | static inline void load_TR_desc(void) |
630 | { | 742 | { |
631 | PVOP_VCALL0(pv_cpu_ops.load_tr_desc); | 743 | PVOP_VCALL0(pv_cpu_ops.load_tr_desc); |
632 | } | 744 | } |
633 | static inline void load_gdt(const struct Xgt_desc_struct *dtr) | 745 | static inline void load_gdt(const struct desc_ptr *dtr) |
634 | { | 746 | { |
635 | PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); | 747 | PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); |
636 | } | 748 | } |
637 | static inline void load_idt(const struct Xgt_desc_struct *dtr) | 749 | static inline void load_idt(const struct desc_ptr *dtr) |
638 | { | 750 | { |
639 | PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); | 751 | PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); |
640 | } | 752 | } |
@@ -642,11 +754,11 @@ static inline void set_ldt(const void *addr, unsigned entries) | |||
642 | { | 754 | { |
643 | PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); | 755 | PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); |
644 | } | 756 | } |
645 | static inline void store_gdt(struct Xgt_desc_struct *dtr) | 757 | static inline void store_gdt(struct desc_ptr *dtr) |
646 | { | 758 | { |
647 | PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); | 759 | PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); |
648 | } | 760 | } |
649 | static inline void store_idt(struct Xgt_desc_struct *dtr) | 761 | static inline void store_idt(struct desc_ptr *dtr) |
650 | { | 762 | { |
651 | PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); | 763 | PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); |
652 | } | 764 | } |
@@ -659,17 +771,22 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu) | |||
659 | { | 771 | { |
660 | PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); | 772 | PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); |
661 | } | 773 | } |
662 | static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) | 774 | |
775 | static inline void write_ldt_entry(struct desc_struct *dt, int entry, | ||
776 | const void *desc) | ||
663 | { | 777 | { |
664 | PVOP_VCALL4(pv_cpu_ops.write_ldt_entry, dt, entry, low, high); | 778 | PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc); |
665 | } | 779 | } |
666 | static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) | 780 | |
781 | static inline void write_gdt_entry(struct desc_struct *dt, int entry, | ||
782 | void *desc, int type) | ||
667 | { | 783 | { |
668 | PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, low, high); | 784 | PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type); |
669 | } | 785 | } |
670 | static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) | 786 | |
787 | static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) | ||
671 | { | 788 | { |
672 | PVOP_VCALL4(pv_cpu_ops.write_idt_entry, dt, entry, low, high); | 789 | PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); |
673 | } | 790 | } |
674 | static inline void set_iopl_mask(unsigned mask) | 791 | static inline void set_iopl_mask(unsigned mask) |
675 | { | 792 | { |
@@ -690,17 +807,17 @@ static inline void slow_down_io(void) { | |||
690 | /* | 807 | /* |
691 | * Basic functions accessing APICs. | 808 | * Basic functions accessing APICs. |
692 | */ | 809 | */ |
693 | static inline void apic_write(unsigned long reg, unsigned long v) | 810 | static inline void apic_write(unsigned long reg, u32 v) |
694 | { | 811 | { |
695 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); | 812 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); |
696 | } | 813 | } |
697 | 814 | ||
698 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) | 815 | static inline void apic_write_atomic(unsigned long reg, u32 v) |
699 | { | 816 | { |
700 | PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v); | 817 | PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v); |
701 | } | 818 | } |
702 | 819 | ||
703 | static inline unsigned long apic_read(unsigned long reg) | 820 | static inline u32 apic_read(unsigned long reg) |
704 | { | 821 | { |
705 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); | 822 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); |
706 | } | 823 | } |
@@ -786,9 +903,9 @@ static inline void paravirt_release_pt(unsigned pfn) | |||
786 | PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); | 903 | PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); |
787 | } | 904 | } |
788 | 905 | ||
789 | static inline void paravirt_alloc_pd(unsigned pfn) | 906 | static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn) |
790 | { | 907 | { |
791 | PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn); | 908 | PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn); |
792 | } | 909 | } |
793 | 910 | ||
794 | static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, | 911 | static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, |
@@ -822,128 +939,236 @@ static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, | |||
822 | PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); | 939 | PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); |
823 | } | 940 | } |
824 | 941 | ||
825 | #ifdef CONFIG_X86_PAE | 942 | static inline pte_t __pte(pteval_t val) |
826 | static inline pte_t __pte(unsigned long long val) | ||
827 | { | 943 | { |
828 | unsigned long long ret = PVOP_CALL2(unsigned long long, | 944 | pteval_t ret; |
829 | pv_mmu_ops.make_pte, | 945 | |
830 | val, val >> 32); | 946 | if (sizeof(pteval_t) > sizeof(long)) |
831 | return (pte_t) { ret, ret >> 32 }; | 947 | ret = PVOP_CALL2(pteval_t, |
948 | pv_mmu_ops.make_pte, | ||
949 | val, (u64)val >> 32); | ||
950 | else | ||
951 | ret = PVOP_CALL1(pteval_t, | ||
952 | pv_mmu_ops.make_pte, | ||
953 | val); | ||
954 | |||
955 | return (pte_t) { .pte = ret }; | ||
832 | } | 956 | } |
833 | 957 | ||
834 | static inline pmd_t __pmd(unsigned long long val) | 958 | static inline pteval_t pte_val(pte_t pte) |
835 | { | 959 | { |
836 | return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd, | 960 | pteval_t ret; |
837 | val, val >> 32) }; | 961 | |
962 | if (sizeof(pteval_t) > sizeof(long)) | ||
963 | ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, | ||
964 | pte.pte, (u64)pte.pte >> 32); | ||
965 | else | ||
966 | ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val, | ||
967 | pte.pte); | ||
968 | |||
969 | return ret; | ||
838 | } | 970 | } |
839 | 971 | ||
840 | static inline pgd_t __pgd(unsigned long long val) | 972 | static inline pgd_t __pgd(pgdval_t val) |
841 | { | 973 | { |
842 | return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd, | 974 | pgdval_t ret; |
843 | val, val >> 32) }; | 975 | |
976 | if (sizeof(pgdval_t) > sizeof(long)) | ||
977 | ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, | ||
978 | val, (u64)val >> 32); | ||
979 | else | ||
980 | ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, | ||
981 | val); | ||
982 | |||
983 | return (pgd_t) { ret }; | ||
844 | } | 984 | } |
845 | 985 | ||
846 | static inline unsigned long long pte_val(pte_t x) | 986 | static inline pgdval_t pgd_val(pgd_t pgd) |
847 | { | 987 | { |
848 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val, | 988 | pgdval_t ret; |
849 | x.pte_low, x.pte_high); | 989 | |
990 | if (sizeof(pgdval_t) > sizeof(long)) | ||
991 | ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, | ||
992 | pgd.pgd, (u64)pgd.pgd >> 32); | ||
993 | else | ||
994 | ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, | ||
995 | pgd.pgd); | ||
996 | |||
997 | return ret; | ||
850 | } | 998 | } |
851 | 999 | ||
852 | static inline unsigned long long pmd_val(pmd_t x) | 1000 | static inline void set_pte(pte_t *ptep, pte_t pte) |
853 | { | 1001 | { |
854 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val, | 1002 | if (sizeof(pteval_t) > sizeof(long)) |
855 | x.pmd, x.pmd >> 32); | 1003 | PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, |
1004 | pte.pte, (u64)pte.pte >> 32); | ||
1005 | else | ||
1006 | PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, | ||
1007 | pte.pte); | ||
856 | } | 1008 | } |
857 | 1009 | ||
858 | static inline unsigned long long pgd_val(pgd_t x) | 1010 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
1011 | pte_t *ptep, pte_t pte) | ||
859 | { | 1012 | { |
860 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val, | 1013 | if (sizeof(pteval_t) > sizeof(long)) |
861 | x.pgd, x.pgd >> 32); | 1014 | /* 5 arg words */ |
1015 | pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); | ||
1016 | else | ||
1017 | PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); | ||
862 | } | 1018 | } |
863 | 1019 | ||
864 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 1020 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
865 | { | 1021 | { |
866 | PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high); | 1022 | pmdval_t val = native_pmd_val(pmd); |
1023 | |||
1024 | if (sizeof(pmdval_t) > sizeof(long)) | ||
1025 | PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32); | ||
1026 | else | ||
1027 | PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); | ||
867 | } | 1028 | } |
868 | 1029 | ||
869 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 1030 | #if PAGETABLE_LEVELS >= 3 |
870 | pte_t *ptep, pte_t pteval) | 1031 | static inline pmd_t __pmd(pmdval_t val) |
871 | { | 1032 | { |
872 | /* 5 arg words */ | 1033 | pmdval_t ret; |
873 | pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval); | 1034 | |
1035 | if (sizeof(pmdval_t) > sizeof(long)) | ||
1036 | ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, | ||
1037 | val, (u64)val >> 32); | ||
1038 | else | ||
1039 | ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, | ||
1040 | val); | ||
1041 | |||
1042 | return (pmd_t) { ret }; | ||
874 | } | 1043 | } |
875 | 1044 | ||
876 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) | 1045 | static inline pmdval_t pmd_val(pmd_t pmd) |
877 | { | 1046 | { |
878 | PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, | 1047 | pmdval_t ret; |
879 | pteval.pte_low, pteval.pte_high); | 1048 | |
1049 | if (sizeof(pmdval_t) > sizeof(long)) | ||
1050 | ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, | ||
1051 | pmd.pmd, (u64)pmd.pmd >> 32); | ||
1052 | else | ||
1053 | ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, | ||
1054 | pmd.pmd); | ||
1055 | |||
1056 | return ret; | ||
880 | } | 1057 | } |
881 | 1058 | ||
882 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, | 1059 | static inline void set_pud(pud_t *pudp, pud_t pud) |
883 | pte_t *ptep, pte_t pte) | ||
884 | { | 1060 | { |
885 | /* 5 arg words */ | 1061 | pudval_t val = native_pud_val(pud); |
886 | pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); | 1062 | |
1063 | if (sizeof(pudval_t) > sizeof(long)) | ||
1064 | PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, | ||
1065 | val, (u64)val >> 32); | ||
1066 | else | ||
1067 | PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, | ||
1068 | val); | ||
1069 | } | ||
1070 | #if PAGETABLE_LEVELS == 4 | ||
1071 | static inline pud_t __pud(pudval_t val) | ||
1072 | { | ||
1073 | pudval_t ret; | ||
1074 | |||
1075 | if (sizeof(pudval_t) > sizeof(long)) | ||
1076 | ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, | ||
1077 | val, (u64)val >> 32); | ||
1078 | else | ||
1079 | ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, | ||
1080 | val); | ||
1081 | |||
1082 | return (pud_t) { ret }; | ||
887 | } | 1083 | } |
888 | 1084 | ||
889 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | 1085 | static inline pudval_t pud_val(pud_t pud) |
890 | { | 1086 | { |
891 | PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, | 1087 | pudval_t ret; |
892 | pmdval.pmd, pmdval.pmd >> 32); | 1088 | |
1089 | if (sizeof(pudval_t) > sizeof(long)) | ||
1090 | ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, | ||
1091 | pud.pud, (u64)pud.pud >> 32); | ||
1092 | else | ||
1093 | ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, | ||
1094 | pud.pud); | ||
1095 | |||
1096 | return ret; | ||
893 | } | 1097 | } |
894 | 1098 | ||
895 | static inline void set_pud(pud_t *pudp, pud_t pudval) | 1099 | static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) |
896 | { | 1100 | { |
897 | PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, | 1101 | pgdval_t val = native_pgd_val(pgd); |
898 | pudval.pgd.pgd, pudval.pgd.pgd >> 32); | 1102 | |
1103 | if (sizeof(pgdval_t) > sizeof(long)) | ||
1104 | PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp, | ||
1105 | val, (u64)val >> 32); | ||
1106 | else | ||
1107 | PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, | ||
1108 | val); | ||
899 | } | 1109 | } |
900 | 1110 | ||
901 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 1111 | static inline void pgd_clear(pgd_t *pgdp) |
902 | { | 1112 | { |
903 | PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); | 1113 | set_pgd(pgdp, __pgd(0)); |
904 | } | 1114 | } |
905 | 1115 | ||
906 | static inline void pmd_clear(pmd_t *pmdp) | 1116 | static inline void pud_clear(pud_t *pudp) |
907 | { | 1117 | { |
908 | PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); | 1118 | set_pud(pudp, __pud(0)); |
909 | } | 1119 | } |
910 | 1120 | ||
911 | #else /* !CONFIG_X86_PAE */ | 1121 | #endif /* PAGETABLE_LEVELS == 4 */ |
912 | 1122 | ||
913 | static inline pte_t __pte(unsigned long val) | 1123 | #endif /* PAGETABLE_LEVELS >= 3 */ |
1124 | |||
1125 | #ifdef CONFIG_X86_PAE | ||
1126 | /* Special-case pte-setting operations for PAE, which can't update a | ||
1127 | 64-bit pte atomically */ | ||
1128 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) | ||
914 | { | 1129 | { |
915 | return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) }; | 1130 | PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, |
1131 | pte.pte, pte.pte >> 32); | ||
916 | } | 1132 | } |
917 | 1133 | ||
918 | static inline pgd_t __pgd(unsigned long val) | 1134 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, |
1135 | pte_t *ptep, pte_t pte) | ||
919 | { | 1136 | { |
920 | return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) }; | 1137 | /* 5 arg words */ |
1138 | pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); | ||
921 | } | 1139 | } |
922 | 1140 | ||
923 | static inline unsigned long pte_val(pte_t x) | 1141 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
1142 | pte_t *ptep) | ||
924 | { | 1143 | { |
925 | return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low); | 1144 | PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); |
926 | } | 1145 | } |
927 | 1146 | ||
928 | static inline unsigned long pgd_val(pgd_t x) | 1147 | static inline void pmd_clear(pmd_t *pmdp) |
1148 | { | ||
1149 | PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); | ||
1150 | } | ||
1151 | #else /* !CONFIG_X86_PAE */ | ||
1152 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) | ||
929 | { | 1153 | { |
930 | return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd); | 1154 | set_pte(ptep, pte); |
931 | } | 1155 | } |
932 | 1156 | ||
933 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 1157 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, |
1158 | pte_t *ptep, pte_t pte) | ||
934 | { | 1159 | { |
935 | PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low); | 1160 | set_pte(ptep, pte); |
936 | } | 1161 | } |
937 | 1162 | ||
938 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 1163 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
939 | pte_t *ptep, pte_t pteval) | 1164 | pte_t *ptep) |
940 | { | 1165 | { |
941 | PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low); | 1166 | set_pte_at(mm, addr, ptep, __pte(0)); |
942 | } | 1167 | } |
943 | 1168 | ||
944 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | 1169 | static inline void pmd_clear(pmd_t *pmdp) |
945 | { | 1170 | { |
946 | PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd); | 1171 | set_pmd(pmdp, __pmd(0)); |
947 | } | 1172 | } |
948 | #endif /* CONFIG_X86_PAE */ | 1173 | #endif /* CONFIG_X86_PAE */ |
949 | 1174 | ||
@@ -1014,52 +1239,68 @@ struct paravirt_patch_site { | |||
1014 | extern struct paravirt_patch_site __parainstructions[], | 1239 | extern struct paravirt_patch_site __parainstructions[], |
1015 | __parainstructions_end[]; | 1240 | __parainstructions_end[]; |
1016 | 1241 | ||
1242 | #ifdef CONFIG_X86_32 | ||
1243 | #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" | ||
1244 | #define PV_RESTORE_REGS "popl %%edx; popl %%ecx" | ||
1245 | #define PV_FLAGS_ARG "0" | ||
1246 | #define PV_EXTRA_CLOBBERS | ||
1247 | #define PV_VEXTRA_CLOBBERS | ||
1248 | #else | ||
1249 | /* We save some registers, but all of them, that's too much. We clobber all | ||
1250 | * caller saved registers but the argument parameter */ | ||
1251 | #define PV_SAVE_REGS "pushq %%rdi;" | ||
1252 | #define PV_RESTORE_REGS "popq %%rdi;" | ||
1253 | #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx" | ||
1254 | #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx" | ||
1255 | #define PV_FLAGS_ARG "D" | ||
1256 | #endif | ||
1257 | |||
1017 | static inline unsigned long __raw_local_save_flags(void) | 1258 | static inline unsigned long __raw_local_save_flags(void) |
1018 | { | 1259 | { |
1019 | unsigned long f; | 1260 | unsigned long f; |
1020 | 1261 | ||
1021 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | 1262 | asm volatile(paravirt_alt(PV_SAVE_REGS |
1022 | PARAVIRT_CALL | 1263 | PARAVIRT_CALL |
1023 | "popl %%edx; popl %%ecx") | 1264 | PV_RESTORE_REGS) |
1024 | : "=a"(f) | 1265 | : "=a"(f) |
1025 | : paravirt_type(pv_irq_ops.save_fl), | 1266 | : paravirt_type(pv_irq_ops.save_fl), |
1026 | paravirt_clobber(CLBR_EAX) | 1267 | paravirt_clobber(CLBR_EAX) |
1027 | : "memory", "cc"); | 1268 | : "memory", "cc" PV_VEXTRA_CLOBBERS); |
1028 | return f; | 1269 | return f; |
1029 | } | 1270 | } |
1030 | 1271 | ||
1031 | static inline void raw_local_irq_restore(unsigned long f) | 1272 | static inline void raw_local_irq_restore(unsigned long f) |
1032 | { | 1273 | { |
1033 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | 1274 | asm volatile(paravirt_alt(PV_SAVE_REGS |
1034 | PARAVIRT_CALL | 1275 | PARAVIRT_CALL |
1035 | "popl %%edx; popl %%ecx") | 1276 | PV_RESTORE_REGS) |
1036 | : "=a"(f) | 1277 | : "=a"(f) |
1037 | : "0"(f), | 1278 | : PV_FLAGS_ARG(f), |
1038 | paravirt_type(pv_irq_ops.restore_fl), | 1279 | paravirt_type(pv_irq_ops.restore_fl), |
1039 | paravirt_clobber(CLBR_EAX) | 1280 | paravirt_clobber(CLBR_EAX) |
1040 | : "memory", "cc"); | 1281 | : "memory", "cc" PV_EXTRA_CLOBBERS); |
1041 | } | 1282 | } |
1042 | 1283 | ||
1043 | static inline void raw_local_irq_disable(void) | 1284 | static inline void raw_local_irq_disable(void) |
1044 | { | 1285 | { |
1045 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | 1286 | asm volatile(paravirt_alt(PV_SAVE_REGS |
1046 | PARAVIRT_CALL | 1287 | PARAVIRT_CALL |
1047 | "popl %%edx; popl %%ecx") | 1288 | PV_RESTORE_REGS) |
1048 | : | 1289 | : |
1049 | : paravirt_type(pv_irq_ops.irq_disable), | 1290 | : paravirt_type(pv_irq_ops.irq_disable), |
1050 | paravirt_clobber(CLBR_EAX) | 1291 | paravirt_clobber(CLBR_EAX) |
1051 | : "memory", "eax", "cc"); | 1292 | : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); |
1052 | } | 1293 | } |
1053 | 1294 | ||
1054 | static inline void raw_local_irq_enable(void) | 1295 | static inline void raw_local_irq_enable(void) |
1055 | { | 1296 | { |
1056 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | 1297 | asm volatile(paravirt_alt(PV_SAVE_REGS |
1057 | PARAVIRT_CALL | 1298 | PARAVIRT_CALL |
1058 | "popl %%edx; popl %%ecx") | 1299 | PV_RESTORE_REGS) |
1059 | : | 1300 | : |
1060 | : paravirt_type(pv_irq_ops.irq_enable), | 1301 | : paravirt_type(pv_irq_ops.irq_enable), |
1061 | paravirt_clobber(CLBR_EAX) | 1302 | paravirt_clobber(CLBR_EAX) |
1062 | : "memory", "eax", "cc"); | 1303 | : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); |
1063 | } | 1304 | } |
1064 | 1305 | ||
1065 | static inline unsigned long __raw_local_irq_save(void) | 1306 | static inline unsigned long __raw_local_irq_save(void) |
@@ -1071,27 +1312,6 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1071 | return f; | 1312 | return f; |
1072 | } | 1313 | } |
1073 | 1314 | ||
1074 | #define CLI_STRING \ | ||
1075 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
1076 | "call *%[paravirt_cli_opptr];" \ | ||
1077 | "popl %%edx; popl %%ecx", \ | ||
1078 | "%c[paravirt_cli_type]", "%c[paravirt_clobber]") | ||
1079 | |||
1080 | #define STI_STRING \ | ||
1081 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
1082 | "call *%[paravirt_sti_opptr];" \ | ||
1083 | "popl %%edx; popl %%ecx", \ | ||
1084 | "%c[paravirt_sti_type]", "%c[paravirt_clobber]") | ||
1085 | |||
1086 | #define CLI_STI_CLOBBERS , "%eax" | ||
1087 | #define CLI_STI_INPUT_ARGS \ | ||
1088 | , \ | ||
1089 | [paravirt_cli_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_disable)), \ | ||
1090 | [paravirt_cli_opptr] "m" (pv_irq_ops.irq_disable), \ | ||
1091 | [paravirt_sti_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_enable)), \ | ||
1092 | [paravirt_sti_opptr] "m" (pv_irq_ops.irq_enable), \ | ||
1093 | paravirt_clobber(CLBR_EAX) | ||
1094 | |||
1095 | /* Make sure as little as possible of this mess escapes. */ | 1315 | /* Make sure as little as possible of this mess escapes. */ |
1096 | #undef PARAVIRT_CALL | 1316 | #undef PARAVIRT_CALL |
1097 | #undef __PVOP_CALL | 1317 | #undef __PVOP_CALL |
@@ -1109,43 +1329,72 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1109 | 1329 | ||
1110 | #else /* __ASSEMBLY__ */ | 1330 | #else /* __ASSEMBLY__ */ |
1111 | 1331 | ||
1112 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) | 1332 | #define _PVSITE(ptype, clobbers, ops, word, algn) \ |
1113 | |||
1114 | #define PARA_SITE(ptype, clobbers, ops) \ | ||
1115 | 771:; \ | 1333 | 771:; \ |
1116 | ops; \ | 1334 | ops; \ |
1117 | 772:; \ | 1335 | 772:; \ |
1118 | .pushsection .parainstructions,"a"; \ | 1336 | .pushsection .parainstructions,"a"; \ |
1119 | .long 771b; \ | 1337 | .align algn; \ |
1338 | word 771b; \ | ||
1120 | .byte ptype; \ | 1339 | .byte ptype; \ |
1121 | .byte 772b-771b; \ | 1340 | .byte 772b-771b; \ |
1122 | .short clobbers; \ | 1341 | .short clobbers; \ |
1123 | .popsection | 1342 | .popsection |
1124 | 1343 | ||
1344 | |||
1345 | #ifdef CONFIG_X86_64 | ||
1346 | #define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx | ||
1347 | #define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax | ||
1348 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) | ||
1349 | #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) | ||
1350 | #else | ||
1351 | #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx | ||
1352 | #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax | ||
1353 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) | ||
1354 | #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) | ||
1355 | #endif | ||
1356 | |||
1125 | #define INTERRUPT_RETURN \ | 1357 | #define INTERRUPT_RETURN \ |
1126 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ | 1358 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ |
1127 | jmp *%cs:pv_cpu_ops+PV_CPU_iret) | 1359 | jmp *%cs:pv_cpu_ops+PV_CPU_iret) |
1128 | 1360 | ||
1129 | #define DISABLE_INTERRUPTS(clobbers) \ | 1361 | #define DISABLE_INTERRUPTS(clobbers) \ |
1130 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ | 1362 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ |
1131 | pushl %eax; pushl %ecx; pushl %edx; \ | 1363 | PV_SAVE_REGS; \ |
1132 | call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \ | 1364 | call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \ |
1133 | popl %edx; popl %ecx; popl %eax) \ | 1365 | PV_RESTORE_REGS;) \ |
1134 | 1366 | ||
1135 | #define ENABLE_INTERRUPTS(clobbers) \ | 1367 | #define ENABLE_INTERRUPTS(clobbers) \ |
1136 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ | 1368 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ |
1137 | pushl %eax; pushl %ecx; pushl %edx; \ | 1369 | PV_SAVE_REGS; \ |
1138 | call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ | 1370 | call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ |
1139 | popl %edx; popl %ecx; popl %eax) | 1371 | PV_RESTORE_REGS;) |
1372 | |||
1373 | #define ENABLE_INTERRUPTS_SYSCALL_RET \ | ||
1374 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\ | ||
1375 | CLBR_NONE, \ | ||
1376 | jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret) | ||
1140 | 1377 | ||
1141 | #define ENABLE_INTERRUPTS_SYSEXIT \ | ||
1142 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), CLBR_NONE,\ | ||
1143 | jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_sysexit) | ||
1144 | 1378 | ||
1379 | #ifdef CONFIG_X86_32 | ||
1145 | #define GET_CR0_INTO_EAX \ | 1380 | #define GET_CR0_INTO_EAX \ |
1146 | push %ecx; push %edx; \ | 1381 | push %ecx; push %edx; \ |
1147 | call *pv_cpu_ops+PV_CPU_read_cr0; \ | 1382 | call *pv_cpu_ops+PV_CPU_read_cr0; \ |
1148 | pop %edx; pop %ecx | 1383 | pop %edx; pop %ecx |
1384 | #else | ||
1385 | #define SWAPGS \ | ||
1386 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ | ||
1387 | PV_SAVE_REGS; \ | ||
1388 | call *pv_cpu_ops+PV_CPU_swapgs; \ | ||
1389 | PV_RESTORE_REGS \ | ||
1390 | ) | ||
1391 | |||
1392 | #define GET_CR2_INTO_RCX \ | ||
1393 | call *pv_mmu_ops+PV_MMU_read_cr2; \ | ||
1394 | movq %rax, %rcx; \ | ||
1395 | xorq %rax, %rax; | ||
1396 | |||
1397 | #endif | ||
1149 | 1398 | ||
1150 | #endif /* __ASSEMBLY__ */ | 1399 | #endif /* __ASSEMBLY__ */ |
1151 | #endif /* CONFIG_PARAVIRT */ | 1400 | #endif /* CONFIG_PARAVIRT */ |