aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/paravirt.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/paravirt.h')
-rw-r--r--arch/x86/include/asm/paravirt.h484
1 files changed, 309 insertions, 175 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index e299287e8e33..7727aa8b7dda 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -4,7 +4,7 @@
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5 5
6#ifdef CONFIG_PARAVIRT 6#ifdef CONFIG_PARAVIRT
7#include <asm/page.h> 7#include <asm/pgtable_types.h>
8#include <asm/asm.h> 8#include <asm/asm.h>
9 9
10/* Bitmask of what can be clobbered: usually at least eax. */ 10/* Bitmask of what can be clobbered: usually at least eax. */
@@ -12,21 +12,38 @@
12#define CLBR_EAX (1 << 0) 12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1) 13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2) 14#define CLBR_EDX (1 << 2)
15#define CLBR_EDI (1 << 3)
15 16
16#ifdef CONFIG_X86_64 17#ifdef CONFIG_X86_32
17#define CLBR_RSI (1 << 3) 18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
18#define CLBR_RDI (1 << 4) 19#define CLBR_ANY ((1 << 4) - 1)
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23#define CLBR_SCRATCH (0)
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
19#define CLBR_R8 (1 << 5) 30#define CLBR_R8 (1 << 5)
20#define CLBR_R9 (1 << 6) 31#define CLBR_R9 (1 << 6)
21#define CLBR_R10 (1 << 7) 32#define CLBR_R10 (1 << 7)
22#define CLBR_R11 (1 << 8) 33#define CLBR_R11 (1 << 8)
34
23#define CLBR_ANY ((1 << 9) - 1) 35#define CLBR_ANY ((1 << 9) - 1)
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39#define CLBR_RET_REG (CLBR_RAX)
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
24#include <asm/desc_defs.h> 42#include <asm/desc_defs.h>
25#else
26/* CLBR_ANY should match all regs platform has. For i386, that's just it */
27#define CLBR_ANY ((1 << 3) - 1)
28#endif /* X86_64 */ 43#endif /* X86_64 */
29 44
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
30#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
31#include <linux/types.h> 48#include <linux/types.h>
32#include <linux/cpumask.h> 49#include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
40struct mm_struct; 57struct mm_struct;
41struct desc_struct; 58struct desc_struct;
42 59
60/*
61 * Wrapper type for pointers to code which uses the non-standard
62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 */
64struct paravirt_callee_save {
65 void *func;
66};
67
43/* general info */ 68/* general info */
44struct pv_info { 69struct pv_info {
45 unsigned int kernel_rpl; 70 unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
189 * expected to use X86_EFLAGS_IF; all other bits 214 * expected to use X86_EFLAGS_IF; all other bits
190 * returned from save_fl are undefined, and may be ignored by 215 * returned from save_fl are undefined, and may be ignored by
191 * restore_fl. 216 * restore_fl.
217 *
218 * NOTE: These functions callers expect the callee to preserve
219 * more registers than the standard C calling convention.
192 */ 220 */
193 unsigned long (*save_fl)(void); 221 struct paravirt_callee_save save_fl;
194 void (*restore_fl)(unsigned long); 222 struct paravirt_callee_save restore_fl;
195 void (*irq_disable)(void); 223 struct paravirt_callee_save irq_disable;
196 void (*irq_enable)(void); 224 struct paravirt_callee_save irq_enable;
225
197 void (*safe_halt)(void); 226 void (*safe_halt)(void);
198 void (*halt)(void); 227 void (*halt)(void);
199 228
@@ -244,7 +273,8 @@ struct pv_mmu_ops {
244 void (*flush_tlb_user)(void); 273 void (*flush_tlb_user)(void);
245 void (*flush_tlb_kernel)(void); 274 void (*flush_tlb_kernel)(void);
246 void (*flush_tlb_single)(unsigned long addr); 275 void (*flush_tlb_single)(unsigned long addr);
247 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, 276 void (*flush_tlb_others)(const struct cpumask *cpus,
277 struct mm_struct *mm,
248 unsigned long va); 278 unsigned long va);
249 279
250 /* Hooks for allocating and freeing a pagetable top-level */ 280 /* Hooks for allocating and freeing a pagetable top-level */
@@ -278,18 +308,15 @@ struct pv_mmu_ops {
278 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, 308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
279 pte_t *ptep, pte_t pte); 309 pte_t *ptep, pte_t pte);
280 310
281 pteval_t (*pte_val)(pte_t); 311 struct paravirt_callee_save pte_val;
282 pteval_t (*pte_flags)(pte_t); 312 struct paravirt_callee_save make_pte;
283 pte_t (*make_pte)(pteval_t pte);
284 313
285 pgdval_t (*pgd_val)(pgd_t); 314 struct paravirt_callee_save pgd_val;
286 pgd_t (*make_pgd)(pgdval_t pgd); 315 struct paravirt_callee_save make_pgd;
287 316
288#if PAGETABLE_LEVELS >= 3 317#if PAGETABLE_LEVELS >= 3
289#ifdef CONFIG_X86_PAE 318#ifdef CONFIG_X86_PAE
290 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 319 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
291 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
292 pte_t *ptep, pte_t pte);
293 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, 320 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
294 pte_t *ptep); 321 pte_t *ptep);
295 void (*pmd_clear)(pmd_t *pmdp); 322 void (*pmd_clear)(pmd_t *pmdp);
@@ -298,12 +325,12 @@ struct pv_mmu_ops {
298 325
299 void (*set_pud)(pud_t *pudp, pud_t pudval); 326 void (*set_pud)(pud_t *pudp, pud_t pudval);
300 327
301 pmdval_t (*pmd_val)(pmd_t); 328 struct paravirt_callee_save pmd_val;
302 pmd_t (*make_pmd)(pmdval_t pmd); 329 struct paravirt_callee_save make_pmd;
303 330
304#if PAGETABLE_LEVELS == 4 331#if PAGETABLE_LEVELS == 4
305 pudval_t (*pud_val)(pud_t); 332 struct paravirt_callee_save pud_val;
306 pud_t (*make_pud)(pudval_t pud); 333 struct paravirt_callee_save make_pud;
307 334
308 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 335 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
309#endif /* PAGETABLE_LEVELS == 4 */ 336#endif /* PAGETABLE_LEVELS == 4 */
@@ -360,7 +387,7 @@ extern struct pv_lock_ops pv_lock_ops;
360 387
361#define paravirt_type(op) \ 388#define paravirt_type(op) \
362 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ 389 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
363 [paravirt_opptr] "m" (op) 390 [paravirt_opptr] "i" (&(op))
364#define paravirt_clobber(clobber) \ 391#define paravirt_clobber(clobber) \
365 [paravirt_clobber] "i" (clobber) 392 [paravirt_clobber] "i" (clobber)
366 393
@@ -388,6 +415,8 @@ extern struct pv_lock_ops pv_lock_ops;
388 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 415 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
389 416
390unsigned paravirt_patch_nop(void); 417unsigned paravirt_patch_nop(void);
418unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
419unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
391unsigned paravirt_patch_ignore(unsigned len); 420unsigned paravirt_patch_ignore(unsigned len);
392unsigned paravirt_patch_call(void *insnbuf, 421unsigned paravirt_patch_call(void *insnbuf,
393 const void *target, u16 tgt_clobbers, 422 const void *target, u16 tgt_clobbers,
@@ -412,7 +441,7 @@ int paravirt_disable_iospace(void);
412 * offset into the paravirt_patch_template structure, and can therefore be 441 * offset into the paravirt_patch_template structure, and can therefore be
413 * freely converted back into a structure offset. 442 * freely converted back into a structure offset.
414 */ 443 */
415#define PARAVIRT_CALL "call *%[paravirt_opptr];" 444#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
416 445
417/* 446/*
418 * These macros are intended to wrap calls through one of the paravirt 447 * These macros are intended to wrap calls through one of the paravirt
@@ -479,25 +508,45 @@ int paravirt_disable_iospace(void);
479 * makes sure the incoming and outgoing types are always correct. 508 * makes sure the incoming and outgoing types are always correct.
480 */ 509 */
481#ifdef CONFIG_X86_32 510#ifdef CONFIG_X86_32
482#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx 511#define PVOP_VCALL_ARGS \
512 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
483#define PVOP_CALL_ARGS PVOP_VCALL_ARGS 513#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
514
515#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
516#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
517#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
518
484#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 519#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
485 "=c" (__ecx) 520 "=c" (__ecx)
486#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 521#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
522
523#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
524#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
525
487#define EXTRA_CLOBBERS 526#define EXTRA_CLOBBERS
488#define VEXTRA_CLOBBERS 527#define VEXTRA_CLOBBERS
489#else 528#else /* CONFIG_X86_64 */
490#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx 529#define PVOP_VCALL_ARGS \
530 unsigned long __edi = __edi, __esi = __esi, \
531 __edx = __edx, __ecx = __ecx
491#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax 532#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
533
534#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
535#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
536#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
537#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
538
492#define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 539#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
493 "=S" (__esi), "=d" (__edx), \ 540 "=S" (__esi), "=d" (__edx), \
494 "=c" (__ecx) 541 "=c" (__ecx)
495
496#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 542#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
497 543
544#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
545#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
546
498#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 547#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
499#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 548#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
500#endif 549#endif /* CONFIG_X86_32 */
501 550
502#ifdef CONFIG_PARAVIRT_DEBUG 551#ifdef CONFIG_PARAVIRT_DEBUG
503#define PVOP_TEST_NULL(op) BUG_ON(op == NULL) 552#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
@@ -505,10 +554,11 @@ int paravirt_disable_iospace(void);
505#define PVOP_TEST_NULL(op) ((void)op) 554#define PVOP_TEST_NULL(op) ((void)op)
506#endif 555#endif
507 556
508#define __PVOP_CALL(rettype, op, pre, post, ...) \ 557#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
558 pre, post, ...) \
509 ({ \ 559 ({ \
510 rettype __ret; \ 560 rettype __ret; \
511 PVOP_CALL_ARGS; \ 561 PVOP_CALL_ARGS; \
512 PVOP_TEST_NULL(op); \ 562 PVOP_TEST_NULL(op); \
513 /* This is 32-bit specific, but is okay in 64-bit */ \ 563 /* This is 32-bit specific, but is okay in 64-bit */ \
514 /* since this condition will never hold */ \ 564 /* since this condition will never hold */ \
@@ -516,70 +566,113 @@ int paravirt_disable_iospace(void);
516 asm volatile(pre \ 566 asm volatile(pre \
517 paravirt_alt(PARAVIRT_CALL) \ 567 paravirt_alt(PARAVIRT_CALL) \
518 post \ 568 post \
519 : PVOP_CALL_CLOBBERS \ 569 : call_clbr \
520 : paravirt_type(op), \ 570 : paravirt_type(op), \
521 paravirt_clobber(CLBR_ANY), \ 571 paravirt_clobber(clbr), \
522 ##__VA_ARGS__ \ 572 ##__VA_ARGS__ \
523 : "memory", "cc" EXTRA_CLOBBERS); \ 573 : "memory", "cc" extra_clbr); \
524 __ret = (rettype)((((u64)__edx) << 32) | __eax); \ 574 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
525 } else { \ 575 } else { \
526 asm volatile(pre \ 576 asm volatile(pre \
527 paravirt_alt(PARAVIRT_CALL) \ 577 paravirt_alt(PARAVIRT_CALL) \
528 post \ 578 post \
529 : PVOP_CALL_CLOBBERS \ 579 : call_clbr \
530 : paravirt_type(op), \ 580 : paravirt_type(op), \
531 paravirt_clobber(CLBR_ANY), \ 581 paravirt_clobber(clbr), \
532 ##__VA_ARGS__ \ 582 ##__VA_ARGS__ \
533 : "memory", "cc" EXTRA_CLOBBERS); \ 583 : "memory", "cc" extra_clbr); \
534 __ret = (rettype)__eax; \ 584 __ret = (rettype)__eax; \
535 } \ 585 } \
536 __ret; \ 586 __ret; \
537 }) 587 })
538#define __PVOP_VCALL(op, pre, post, ...) \ 588
589#define __PVOP_CALL(rettype, op, pre, post, ...) \
590 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
591 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
592
593#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
594 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
595 PVOP_CALLEE_CLOBBERS, , \
596 pre, post, ##__VA_ARGS__)
597
598
599#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
539 ({ \ 600 ({ \
540 PVOP_VCALL_ARGS; \ 601 PVOP_VCALL_ARGS; \
541 PVOP_TEST_NULL(op); \ 602 PVOP_TEST_NULL(op); \
542 asm volatile(pre \ 603 asm volatile(pre \
543 paravirt_alt(PARAVIRT_CALL) \ 604 paravirt_alt(PARAVIRT_CALL) \
544 post \ 605 post \
545 : PVOP_VCALL_CLOBBERS \ 606 : call_clbr \
546 : paravirt_type(op), \ 607 : paravirt_type(op), \
547 paravirt_clobber(CLBR_ANY), \ 608 paravirt_clobber(clbr), \
548 ##__VA_ARGS__ \ 609 ##__VA_ARGS__ \
549 : "memory", "cc" VEXTRA_CLOBBERS); \ 610 : "memory", "cc" extra_clbr); \
550 }) 611 })
551 612
613#define __PVOP_VCALL(op, pre, post, ...) \
614 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
615 VEXTRA_CLOBBERS, \
616 pre, post, ##__VA_ARGS__)
617
618#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
619 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
620 PVOP_VCALLEE_CLOBBERS, , \
621 pre, post, ##__VA_ARGS__)
622
623
624
552#define PVOP_CALL0(rettype, op) \ 625#define PVOP_CALL0(rettype, op) \
553 __PVOP_CALL(rettype, op, "", "") 626 __PVOP_CALL(rettype, op, "", "")
554#define PVOP_VCALL0(op) \ 627#define PVOP_VCALL0(op) \
555 __PVOP_VCALL(op, "", "") 628 __PVOP_VCALL(op, "", "")
556 629
630#define PVOP_CALLEE0(rettype, op) \
631 __PVOP_CALLEESAVE(rettype, op, "", "")
632#define PVOP_VCALLEE0(op) \
633 __PVOP_VCALLEESAVE(op, "", "")
634
635
557#define PVOP_CALL1(rettype, op, arg1) \ 636#define PVOP_CALL1(rettype, op, arg1) \
558 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) 637 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
559#define PVOP_VCALL1(op, arg1) \ 638#define PVOP_VCALL1(op, arg1) \
560 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) 639 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
640
641#define PVOP_CALLEE1(rettype, op, arg1) \
642 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
643#define PVOP_VCALLEE1(op, arg1) \
644 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
645
561 646
562#define PVOP_CALL2(rettype, op, arg1, arg2) \ 647#define PVOP_CALL2(rettype, op, arg1, arg2) \
563 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 648 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
564 "1" ((unsigned long)(arg2))) 649 PVOP_CALL_ARG2(arg2))
565#define PVOP_VCALL2(op, arg1, arg2) \ 650#define PVOP_VCALL2(op, arg1, arg2) \
566 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 651 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
567 "1" ((unsigned long)(arg2))) 652 PVOP_CALL_ARG2(arg2))
653
654#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
655 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
656 PVOP_CALL_ARG2(arg2))
657#define PVOP_VCALLEE2(op, arg1, arg2) \
658 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
659 PVOP_CALL_ARG2(arg2))
660
568 661
569#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 662#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
570 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 663 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
571 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 664 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
572#define PVOP_VCALL3(op, arg1, arg2, arg3) \ 665#define PVOP_VCALL3(op, arg1, arg2, arg3) \
573 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 666 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
574 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 667 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
575 668
576/* This is the only difference in x86_64. We can make it much simpler */ 669/* This is the only difference in x86_64. We can make it much simpler */
577#ifdef CONFIG_X86_32 670#ifdef CONFIG_X86_32
578#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 671#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
579 __PVOP_CALL(rettype, op, \ 672 __PVOP_CALL(rettype, op, \
580 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 673 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
581 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ 674 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
582 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 675 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
583#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 676#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
584 __PVOP_VCALL(op, \ 677 __PVOP_VCALL(op, \
585 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 678 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
@@ -587,13 +680,13 @@ int paravirt_disable_iospace(void);
587 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 680 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
588#else 681#else
589#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 682#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
590 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 683 __PVOP_CALL(rettype, op, "", "", \
591 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 684 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
592 "3"((unsigned long)(arg4))) 685 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
593#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 686#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
594 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 687 __PVOP_VCALL(op, "", "", \
595 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 688 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
596 "3"((unsigned long)(arg4))) 689 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
597#endif 690#endif
598 691
599static inline int paravirt_enabled(void) 692static inline int paravirt_enabled(void)
@@ -984,10 +1077,11 @@ static inline void __flush_tlb_single(unsigned long addr)
984 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); 1077 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
985} 1078}
986 1079
987static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, 1080static inline void flush_tlb_others(const struct cpumask *cpumask,
1081 struct mm_struct *mm,
988 unsigned long va) 1082 unsigned long va)
989{ 1083{
990 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 1084 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
991} 1085}
992 1086
993static inline int paravirt_pgd_alloc(struct mm_struct *mm) 1087static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -1059,13 +1153,13 @@ static inline pte_t __pte(pteval_t val)
1059 pteval_t ret; 1153 pteval_t ret;
1060 1154
1061 if (sizeof(pteval_t) > sizeof(long)) 1155 if (sizeof(pteval_t) > sizeof(long))
1062 ret = PVOP_CALL2(pteval_t, 1156 ret = PVOP_CALLEE2(pteval_t,
1063 pv_mmu_ops.make_pte, 1157 pv_mmu_ops.make_pte,
1064 val, (u64)val >> 32); 1158 val, (u64)val >> 32);
1065 else 1159 else
1066 ret = PVOP_CALL1(pteval_t, 1160 ret = PVOP_CALLEE1(pteval_t,
1067 pv_mmu_ops.make_pte, 1161 pv_mmu_ops.make_pte,
1068 val); 1162 val);
1069 1163
1070 return (pte_t) { .pte = ret }; 1164 return (pte_t) { .pte = ret };
1071} 1165}
@@ -1075,29 +1169,12 @@ static inline pteval_t pte_val(pte_t pte)
1075 pteval_t ret; 1169 pteval_t ret;
1076 1170
1077 if (sizeof(pteval_t) > sizeof(long)) 1171 if (sizeof(pteval_t) > sizeof(long))
1078 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, 1172 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1079 pte.pte, (u64)pte.pte >> 32); 1173 pte.pte, (u64)pte.pte >> 32);
1080 else
1081 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1082 pte.pte);
1083
1084 return ret;
1085}
1086
1087static inline pteval_t pte_flags(pte_t pte)
1088{
1089 pteval_t ret;
1090
1091 if (sizeof(pteval_t) > sizeof(long))
1092 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1093 pte.pte, (u64)pte.pte >> 32);
1094 else 1174 else
1095 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, 1175 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1096 pte.pte); 1176 pte.pte);
1097 1177
1098#ifdef CONFIG_PARAVIRT_DEBUG
1099 BUG_ON(ret & PTE_PFN_MASK);
1100#endif
1101 return ret; 1178 return ret;
1102} 1179}
1103 1180
@@ -1106,11 +1183,11 @@ static inline pgd_t __pgd(pgdval_t val)
1106 pgdval_t ret; 1183 pgdval_t ret;
1107 1184
1108 if (sizeof(pgdval_t) > sizeof(long)) 1185 if (sizeof(pgdval_t) > sizeof(long))
1109 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, 1186 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1110 val, (u64)val >> 32); 1187 val, (u64)val >> 32);
1111 else 1188 else
1112 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, 1189 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1113 val); 1190 val);
1114 1191
1115 return (pgd_t) { ret }; 1192 return (pgd_t) { ret };
1116} 1193}
@@ -1120,11 +1197,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1120 pgdval_t ret; 1197 pgdval_t ret;
1121 1198
1122 if (sizeof(pgdval_t) > sizeof(long)) 1199 if (sizeof(pgdval_t) > sizeof(long))
1123 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, 1200 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1124 pgd.pgd, (u64)pgd.pgd >> 32); 1201 pgd.pgd, (u64)pgd.pgd >> 32);
1125 else 1202 else
1126 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, 1203 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1127 pgd.pgd); 1204 pgd.pgd);
1128 1205
1129 return ret; 1206 return ret;
1130} 1207}
@@ -1188,11 +1265,11 @@ static inline pmd_t __pmd(pmdval_t val)
1188 pmdval_t ret; 1265 pmdval_t ret;
1189 1266
1190 if (sizeof(pmdval_t) > sizeof(long)) 1267 if (sizeof(pmdval_t) > sizeof(long))
1191 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, 1268 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1192 val, (u64)val >> 32); 1269 val, (u64)val >> 32);
1193 else 1270 else
1194 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, 1271 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1195 val); 1272 val);
1196 1273
1197 return (pmd_t) { ret }; 1274 return (pmd_t) { ret };
1198} 1275}
@@ -1202,11 +1279,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
1202 pmdval_t ret; 1279 pmdval_t ret;
1203 1280
1204 if (sizeof(pmdval_t) > sizeof(long)) 1281 if (sizeof(pmdval_t) > sizeof(long))
1205 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, 1282 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1206 pmd.pmd, (u64)pmd.pmd >> 32); 1283 pmd.pmd, (u64)pmd.pmd >> 32);
1207 else 1284 else
1208 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, 1285 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1209 pmd.pmd); 1286 pmd.pmd);
1210 1287
1211 return ret; 1288 return ret;
1212} 1289}
@@ -1228,11 +1305,11 @@ static inline pud_t __pud(pudval_t val)
1228 pudval_t ret; 1305 pudval_t ret;
1229 1306
1230 if (sizeof(pudval_t) > sizeof(long)) 1307 if (sizeof(pudval_t) > sizeof(long))
1231 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, 1308 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1232 val, (u64)val >> 32); 1309 val, (u64)val >> 32);
1233 else 1310 else
1234 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, 1311 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1235 val); 1312 val);
1236 1313
1237 return (pud_t) { ret }; 1314 return (pud_t) { ret };
1238} 1315}
@@ -1242,11 +1319,11 @@ static inline pudval_t pud_val(pud_t pud)
1242 pudval_t ret; 1319 pudval_t ret;
1243 1320
1244 if (sizeof(pudval_t) > sizeof(long)) 1321 if (sizeof(pudval_t) > sizeof(long))
1245 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, 1322 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1246 pud.pud, (u64)pud.pud >> 32); 1323 pud.pud, (u64)pud.pud >> 32);
1247 else 1324 else
1248 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, 1325 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1249 pud.pud); 1326 pud.pud);
1250 1327
1251 return ret; 1328 return ret;
1252} 1329}
@@ -1286,13 +1363,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1286 pte.pte, pte.pte >> 32); 1363 pte.pte, pte.pte >> 32);
1287} 1364}
1288 1365
1289static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1290 pte_t *ptep, pte_t pte)
1291{
1292 /* 5 arg words */
1293 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1294}
1295
1296static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 1366static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1297 pte_t *ptep) 1367 pte_t *ptep)
1298{ 1368{
@@ -1309,12 +1379,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1309 set_pte(ptep, pte); 1379 set_pte(ptep, pte);
1310} 1380}
1311 1381
1312static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1313 pte_t *ptep, pte_t pte)
1314{
1315 set_pte(ptep, pte);
1316}
1317
1318static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 1382static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1319 pte_t *ptep) 1383 pte_t *ptep)
1320{ 1384{
@@ -1374,9 +1438,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1374} 1438}
1375 1439
1376void _paravirt_nop(void); 1440void _paravirt_nop(void);
1377#define paravirt_nop ((void *)_paravirt_nop) 1441u32 _paravirt_ident_32(u32);
1442u64 _paravirt_ident_64(u64);
1378 1443
1379void paravirt_use_bytelocks(void); 1444#define paravirt_nop ((void *)_paravirt_nop)
1380 1445
1381#ifdef CONFIG_SMP 1446#ifdef CONFIG_SMP
1382 1447
@@ -1426,12 +1491,37 @@ extern struct paravirt_patch_site __parainstructions[],
1426 __parainstructions_end[]; 1491 __parainstructions_end[];
1427 1492
1428#ifdef CONFIG_X86_32 1493#ifdef CONFIG_X86_32
1429#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" 1494#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1430#define PV_RESTORE_REGS "popl %%edx; popl %%ecx" 1495#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1496
1497/* save and restore all caller-save registers, except return value */
1498#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
1499#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
1500
1431#define PV_FLAGS_ARG "0" 1501#define PV_FLAGS_ARG "0"
1432#define PV_EXTRA_CLOBBERS 1502#define PV_EXTRA_CLOBBERS
1433#define PV_VEXTRA_CLOBBERS 1503#define PV_VEXTRA_CLOBBERS
1434#else 1504#else
1505/* save and restore all caller-save registers, except return value */
1506#define PV_SAVE_ALL_CALLER_REGS \
1507 "push %rcx;" \
1508 "push %rdx;" \
1509 "push %rsi;" \
1510 "push %rdi;" \
1511 "push %r8;" \
1512 "push %r9;" \
1513 "push %r10;" \
1514 "push %r11;"
1515#define PV_RESTORE_ALL_CALLER_REGS \
1516 "pop %r11;" \
1517 "pop %r10;" \
1518 "pop %r9;" \
1519 "pop %r8;" \
1520 "pop %rdi;" \
1521 "pop %rsi;" \
1522 "pop %rdx;" \
1523 "pop %rcx;"
1524
1435/* We save some registers, but all of them, that's too much. We clobber all 1525/* We save some registers, but all of them, that's too much. We clobber all
1436 * caller saved registers but the argument parameter */ 1526 * caller saved registers but the argument parameter */
1437#define PV_SAVE_REGS "pushq %%rdi;" 1527#define PV_SAVE_REGS "pushq %%rdi;"
@@ -1441,52 +1531,76 @@ extern struct paravirt_patch_site __parainstructions[],
1441#define PV_FLAGS_ARG "D" 1531#define PV_FLAGS_ARG "D"
1442#endif 1532#endif
1443 1533
1534/*
1535 * Generate a thunk around a function which saves all caller-save
1536 * registers except for the return value. This allows C functions to
1537 * be called from assembler code where fewer than normal registers are
1538 * available. It may also help code generation around calls from C
1539 * code if the common case doesn't use many registers.
1540 *
1541 * When a callee is wrapped in a thunk, the caller can assume that all
1542 * arg regs and all scratch registers are preserved across the
1543 * call. The return value in rax/eax will not be saved, even for void
1544 * functions.
1545 */
1546#define PV_CALLEE_SAVE_REGS_THUNK(func) \
1547 extern typeof(func) __raw_callee_save_##func; \
1548 static void *__##func##__ __used = func; \
1549 \
1550 asm(".pushsection .text;" \
1551 "__raw_callee_save_" #func ": " \
1552 PV_SAVE_ALL_CALLER_REGS \
1553 "call " #func ";" \
1554 PV_RESTORE_ALL_CALLER_REGS \
1555 "ret;" \
1556 ".popsection")
1557
1558/* Get a reference to a callee-save function */
1559#define PV_CALLEE_SAVE(func) \
1560 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1561
1562/* Promise that "func" already uses the right calling convention */
1563#define __PV_IS_CALLEE_SAVE(func) \
1564 ((struct paravirt_callee_save) { func })
1565
1444static inline unsigned long __raw_local_save_flags(void) 1566static inline unsigned long __raw_local_save_flags(void)
1445{ 1567{
1446 unsigned long f; 1568 unsigned long f;
1447 1569
1448 asm volatile(paravirt_alt(PV_SAVE_REGS 1570 asm volatile(paravirt_alt(PARAVIRT_CALL)
1449 PARAVIRT_CALL
1450 PV_RESTORE_REGS)
1451 : "=a"(f) 1571 : "=a"(f)
1452 : paravirt_type(pv_irq_ops.save_fl), 1572 : paravirt_type(pv_irq_ops.save_fl),
1453 paravirt_clobber(CLBR_EAX) 1573 paravirt_clobber(CLBR_EAX)
1454 : "memory", "cc" PV_VEXTRA_CLOBBERS); 1574 : "memory", "cc");
1455 return f; 1575 return f;
1456} 1576}
1457 1577
1458static inline void raw_local_irq_restore(unsigned long f) 1578static inline void raw_local_irq_restore(unsigned long f)
1459{ 1579{
1460 asm volatile(paravirt_alt(PV_SAVE_REGS 1580 asm volatile(paravirt_alt(PARAVIRT_CALL)
1461 PARAVIRT_CALL
1462 PV_RESTORE_REGS)
1463 : "=a"(f) 1581 : "=a"(f)
1464 : PV_FLAGS_ARG(f), 1582 : PV_FLAGS_ARG(f),
1465 paravirt_type(pv_irq_ops.restore_fl), 1583 paravirt_type(pv_irq_ops.restore_fl),
1466 paravirt_clobber(CLBR_EAX) 1584 paravirt_clobber(CLBR_EAX)
1467 : "memory", "cc" PV_EXTRA_CLOBBERS); 1585 : "memory", "cc");
1468} 1586}
1469 1587
1470static inline void raw_local_irq_disable(void) 1588static inline void raw_local_irq_disable(void)
1471{ 1589{
1472 asm volatile(paravirt_alt(PV_SAVE_REGS 1590 asm volatile(paravirt_alt(PARAVIRT_CALL)
1473 PARAVIRT_CALL
1474 PV_RESTORE_REGS)
1475 : 1591 :
1476 : paravirt_type(pv_irq_ops.irq_disable), 1592 : paravirt_type(pv_irq_ops.irq_disable),
1477 paravirt_clobber(CLBR_EAX) 1593 paravirt_clobber(CLBR_EAX)
1478 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1594 : "memory", "eax", "cc");
1479} 1595}
1480 1596
1481static inline void raw_local_irq_enable(void) 1597static inline void raw_local_irq_enable(void)
1482{ 1598{
1483 asm volatile(paravirt_alt(PV_SAVE_REGS 1599 asm volatile(paravirt_alt(PARAVIRT_CALL)
1484 PARAVIRT_CALL
1485 PV_RESTORE_REGS)
1486 : 1600 :
1487 : paravirt_type(pv_irq_ops.irq_enable), 1601 : paravirt_type(pv_irq_ops.irq_enable),
1488 paravirt_clobber(CLBR_EAX) 1602 paravirt_clobber(CLBR_EAX)
1489 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1603 : "memory", "eax", "cc");
1490} 1604}
1491 1605
1492static inline unsigned long __raw_local_irq_save(void) 1606static inline unsigned long __raw_local_irq_save(void)
@@ -1529,33 +1643,49 @@ static inline unsigned long __raw_local_irq_save(void)
1529 .popsection 1643 .popsection
1530 1644
1531 1645
1646#define COND_PUSH(set, mask, reg) \
1647 .if ((~(set)) & mask); push %reg; .endif
1648#define COND_POP(set, mask, reg) \
1649 .if ((~(set)) & mask); pop %reg; .endif
1650
1532#ifdef CONFIG_X86_64 1651#ifdef CONFIG_X86_64
1533#define PV_SAVE_REGS \ 1652
1534 push %rax; \ 1653#define PV_SAVE_REGS(set) \
1535 push %rcx; \ 1654 COND_PUSH(set, CLBR_RAX, rax); \
1536 push %rdx; \ 1655 COND_PUSH(set, CLBR_RCX, rcx); \
1537 push %rsi; \ 1656 COND_PUSH(set, CLBR_RDX, rdx); \
1538 push %rdi; \ 1657 COND_PUSH(set, CLBR_RSI, rsi); \
1539 push %r8; \ 1658 COND_PUSH(set, CLBR_RDI, rdi); \
1540 push %r9; \ 1659 COND_PUSH(set, CLBR_R8, r8); \
1541 push %r10; \ 1660 COND_PUSH(set, CLBR_R9, r9); \
1542 push %r11 1661 COND_PUSH(set, CLBR_R10, r10); \
1543#define PV_RESTORE_REGS \ 1662 COND_PUSH(set, CLBR_R11, r11)
1544 pop %r11; \ 1663#define PV_RESTORE_REGS(set) \
1545 pop %r10; \ 1664 COND_POP(set, CLBR_R11, r11); \
1546 pop %r9; \ 1665 COND_POP(set, CLBR_R10, r10); \
1547 pop %r8; \ 1666 COND_POP(set, CLBR_R9, r9); \
1548 pop %rdi; \ 1667 COND_POP(set, CLBR_R8, r8); \
1549 pop %rsi; \ 1668 COND_POP(set, CLBR_RDI, rdi); \
1550 pop %rdx; \ 1669 COND_POP(set, CLBR_RSI, rsi); \
1551 pop %rcx; \ 1670 COND_POP(set, CLBR_RDX, rdx); \
1552 pop %rax 1671 COND_POP(set, CLBR_RCX, rcx); \
1672 COND_POP(set, CLBR_RAX, rax)
1673
1553#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1674#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1554#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1675#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1555#define PARA_INDIRECT(addr) *addr(%rip) 1676#define PARA_INDIRECT(addr) *addr(%rip)
1556#else 1677#else
1557#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx 1678#define PV_SAVE_REGS(set) \
1558#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax 1679 COND_PUSH(set, CLBR_EAX, eax); \
1680 COND_PUSH(set, CLBR_EDI, edi); \
1681 COND_PUSH(set, CLBR_ECX, ecx); \
1682 COND_PUSH(set, CLBR_EDX, edx)
1683#define PV_RESTORE_REGS(set) \
1684 COND_POP(set, CLBR_EDX, edx); \
1685 COND_POP(set, CLBR_ECX, ecx); \
1686 COND_POP(set, CLBR_EDI, edi); \
1687 COND_POP(set, CLBR_EAX, eax)
1688
1559#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 1689#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1560#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 1690#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1561#define PARA_INDIRECT(addr) *%cs:addr 1691#define PARA_INDIRECT(addr) *%cs:addr
@@ -1567,15 +1697,15 @@ static inline unsigned long __raw_local_irq_save(void)
1567 1697
1568#define DISABLE_INTERRUPTS(clobbers) \ 1698#define DISABLE_INTERRUPTS(clobbers) \
1569 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1699 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1570 PV_SAVE_REGS; \ 1700 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1571 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ 1701 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1572 PV_RESTORE_REGS;) \ 1702 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1573 1703
1574#define ENABLE_INTERRUPTS(clobbers) \ 1704#define ENABLE_INTERRUPTS(clobbers) \
1575 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1705 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1576 PV_SAVE_REGS; \ 1706 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1577 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 1707 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1578 PV_RESTORE_REGS;) 1708 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1579 1709
1580#define USERGS_SYSRET32 \ 1710#define USERGS_SYSRET32 \
1581 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ 1711 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
@@ -1605,11 +1735,15 @@ static inline unsigned long __raw_local_irq_save(void)
1605 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1735 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1606 swapgs) 1736 swapgs)
1607 1737
1738/*
1739 * Note: swapgs is very special, and in practise is either going to be
1740 * implemented with a single "swapgs" instruction or something very
1741 * special. Either way, we don't need to save any registers for
1742 * it.
1743 */
1608#define SWAPGS \ 1744#define SWAPGS \
1609 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1745 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1610 PV_SAVE_REGS; \ 1746 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1611 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1612 PV_RESTORE_REGS \
1613 ) 1747 )
1614 1748
1615#define GET_CR2_INTO_RCX \ 1749#define GET_CR2_INTO_RCX \