diff options
author | Andi Kleen <ak@linux.intel.com> | 2013-10-22 12:07:56 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-01-30 01:17:17 -0500 |
commit | a2e7f0e3a4f0f23fe4cd8cc22da547872f0170bb (patch) | |
tree | c13215deb885bace1fbfe9d084d8d5781e8f7c62 /arch/x86 | |
parent | 824a2870098fa5364d49d4cd5a1f41544d9f6c65 (diff) |
x86, asmlinkage, paravirt: Make paravirt thunks global
The paravirt thunks use a hack of using a static reference to a static
function to reference that function from the top level statement.
This assumes that gcc always generates static function names in a specific
format, which is not necessarily true.
Simply make these functions global and asmlinkage or __visible. This way the
static __used variables are not needed and everything works.
Functions with arguments are __visible to keep the register calling
convention on 32bit.
Changed in paravirt and in all users (Xen and vsmp)
v2: Use __visible for functions with arguments
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Ido Yariv <ido@wizery.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1382458079-24450-5-git-send-email-andi@firstfloor.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vsmp_64.c | 8 | ||||
-rw-r--r-- | arch/x86/xen/irq.c | 8 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 16 |
4 files changed, 17 insertions, 17 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 401f350ef71b..cd6e1610e29e 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -781,9 +781,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, | |||
781 | */ | 781 | */ |
782 | #define PV_CALLEE_SAVE_REGS_THUNK(func) \ | 782 | #define PV_CALLEE_SAVE_REGS_THUNK(func) \ |
783 | extern typeof(func) __raw_callee_save_##func; \ | 783 | extern typeof(func) __raw_callee_save_##func; \ |
784 | static void *__##func##__ __used = func; \ | ||
785 | \ | 784 | \ |
786 | asm(".pushsection .text;" \ | 785 | asm(".pushsection .text;" \ |
786 | ".globl __raw_callee_save_" #func " ; " \ | ||
787 | "__raw_callee_save_" #func ": " \ | 787 | "__raw_callee_save_" #func ": " \ |
788 | PV_SAVE_ALL_CALLER_REGS \ | 788 | PV_SAVE_ALL_CALLER_REGS \ |
789 | "call " #func ";" \ | 789 | "call " #func ";" \ |
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 992f890283e9..f6584a90aba3 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -33,7 +33,7 @@ | |||
33 | * and vice versa. | 33 | * and vice versa. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | static unsigned long vsmp_save_fl(void) | 36 | asmlinkage unsigned long vsmp_save_fl(void) |
37 | { | 37 | { |
38 | unsigned long flags = native_save_fl(); | 38 | unsigned long flags = native_save_fl(); |
39 | 39 | ||
@@ -43,7 +43,7 @@ static unsigned long vsmp_save_fl(void) | |||
43 | } | 43 | } |
44 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); | 44 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); |
45 | 45 | ||
46 | static void vsmp_restore_fl(unsigned long flags) | 46 | __visible void vsmp_restore_fl(unsigned long flags) |
47 | { | 47 | { |
48 | if (flags & X86_EFLAGS_IF) | 48 | if (flags & X86_EFLAGS_IF) |
49 | flags &= ~X86_EFLAGS_AC; | 49 | flags &= ~X86_EFLAGS_AC; |
@@ -53,7 +53,7 @@ static void vsmp_restore_fl(unsigned long flags) | |||
53 | } | 53 | } |
54 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); | 54 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); |
55 | 55 | ||
56 | static void vsmp_irq_disable(void) | 56 | asmlinkage void vsmp_irq_disable(void) |
57 | { | 57 | { |
58 | unsigned long flags = native_save_fl(); | 58 | unsigned long flags = native_save_fl(); |
59 | 59 | ||
@@ -61,7 +61,7 @@ static void vsmp_irq_disable(void) | |||
61 | } | 61 | } |
62 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); | 62 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); |
63 | 63 | ||
64 | static void vsmp_irq_enable(void) | 64 | asmlinkage void vsmp_irq_enable(void) |
65 | { | 65 | { |
66 | unsigned long flags = native_save_fl(); | 66 | unsigned long flags = native_save_fl(); |
67 | 67 | ||
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 0da7f863056f..f56c23b60a6c 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -22,7 +22,7 @@ void xen_force_evtchn_callback(void) | |||
22 | (void)HYPERVISOR_xen_version(0, NULL); | 22 | (void)HYPERVISOR_xen_version(0, NULL); |
23 | } | 23 | } |
24 | 24 | ||
25 | static unsigned long xen_save_fl(void) | 25 | asmlinkage unsigned long xen_save_fl(void) |
26 | { | 26 | { |
27 | struct vcpu_info *vcpu; | 27 | struct vcpu_info *vcpu; |
28 | unsigned long flags; | 28 | unsigned long flags; |
@@ -40,7 +40,7 @@ static unsigned long xen_save_fl(void) | |||
40 | } | 40 | } |
41 | PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); | 41 | PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); |
42 | 42 | ||
43 | static void xen_restore_fl(unsigned long flags) | 43 | __visible void xen_restore_fl(unsigned long flags) |
44 | { | 44 | { |
45 | struct vcpu_info *vcpu; | 45 | struct vcpu_info *vcpu; |
46 | 46 | ||
@@ -62,7 +62,7 @@ static void xen_restore_fl(unsigned long flags) | |||
62 | } | 62 | } |
63 | PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); | 63 | PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); |
64 | 64 | ||
65 | static void xen_irq_disable(void) | 65 | asmlinkage void xen_irq_disable(void) |
66 | { | 66 | { |
67 | /* There's a one instruction preempt window here. We need to | 67 | /* There's a one instruction preempt window here. We need to |
68 | make sure we're don't switch CPUs between getting the vcpu | 68 | make sure we're don't switch CPUs between getting the vcpu |
@@ -73,7 +73,7 @@ static void xen_irq_disable(void) | |||
73 | } | 73 | } |
74 | PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); | 74 | PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); |
75 | 75 | ||
76 | static void xen_irq_enable(void) | 76 | asmlinkage void xen_irq_enable(void) |
77 | { | 77 | { |
78 | struct vcpu_info *vcpu; | 78 | struct vcpu_info *vcpu; |
79 | 79 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index ce563be09cc1..648512c50cc7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -431,7 +431,7 @@ static pteval_t iomap_pte(pteval_t val) | |||
431 | return val; | 431 | return val; |
432 | } | 432 | } |
433 | 433 | ||
434 | static pteval_t xen_pte_val(pte_t pte) | 434 | __visible pteval_t xen_pte_val(pte_t pte) |
435 | { | 435 | { |
436 | pteval_t pteval = pte.pte; | 436 | pteval_t pteval = pte.pte; |
437 | #if 0 | 437 | #if 0 |
@@ -448,7 +448,7 @@ static pteval_t xen_pte_val(pte_t pte) | |||
448 | } | 448 | } |
449 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); | 449 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
450 | 450 | ||
451 | static pgdval_t xen_pgd_val(pgd_t pgd) | 451 | __visible pgdval_t xen_pgd_val(pgd_t pgd) |
452 | { | 452 | { |
453 | return pte_mfn_to_pfn(pgd.pgd); | 453 | return pte_mfn_to_pfn(pgd.pgd); |
454 | } | 454 | } |
@@ -479,7 +479,7 @@ void xen_set_pat(u64 pat) | |||
479 | WARN_ON(pat != 0x0007010600070106ull); | 479 | WARN_ON(pat != 0x0007010600070106ull); |
480 | } | 480 | } |
481 | 481 | ||
482 | static pte_t xen_make_pte(pteval_t pte) | 482 | __visible pte_t xen_make_pte(pteval_t pte) |
483 | { | 483 | { |
484 | phys_addr_t addr = (pte & PTE_PFN_MASK); | 484 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
485 | #if 0 | 485 | #if 0 |
@@ -514,14 +514,14 @@ static pte_t xen_make_pte(pteval_t pte) | |||
514 | } | 514 | } |
515 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); | 515 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); |
516 | 516 | ||
517 | static pgd_t xen_make_pgd(pgdval_t pgd) | 517 | __visible pgd_t xen_make_pgd(pgdval_t pgd) |
518 | { | 518 | { |
519 | pgd = pte_pfn_to_mfn(pgd); | 519 | pgd = pte_pfn_to_mfn(pgd); |
520 | return native_make_pgd(pgd); | 520 | return native_make_pgd(pgd); |
521 | } | 521 | } |
522 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); | 522 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); |
523 | 523 | ||
524 | static pmdval_t xen_pmd_val(pmd_t pmd) | 524 | __visible pmdval_t xen_pmd_val(pmd_t pmd) |
525 | { | 525 | { |
526 | return pte_mfn_to_pfn(pmd.pmd); | 526 | return pte_mfn_to_pfn(pmd.pmd); |
527 | } | 527 | } |
@@ -580,7 +580,7 @@ static void xen_pmd_clear(pmd_t *pmdp) | |||
580 | } | 580 | } |
581 | #endif /* CONFIG_X86_PAE */ | 581 | #endif /* CONFIG_X86_PAE */ |
582 | 582 | ||
583 | static pmd_t xen_make_pmd(pmdval_t pmd) | 583 | __visible pmd_t xen_make_pmd(pmdval_t pmd) |
584 | { | 584 | { |
585 | pmd = pte_pfn_to_mfn(pmd); | 585 | pmd = pte_pfn_to_mfn(pmd); |
586 | return native_make_pmd(pmd); | 586 | return native_make_pmd(pmd); |
@@ -588,13 +588,13 @@ static pmd_t xen_make_pmd(pmdval_t pmd) | |||
588 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); | 588 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); |
589 | 589 | ||
590 | #if PAGETABLE_LEVELS == 4 | 590 | #if PAGETABLE_LEVELS == 4 |
591 | static pudval_t xen_pud_val(pud_t pud) | 591 | __visible pudval_t xen_pud_val(pud_t pud) |
592 | { | 592 | { |
593 | return pte_mfn_to_pfn(pud.pud); | 593 | return pte_mfn_to_pfn(pud.pud); |
594 | } | 594 | } |
595 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); | 595 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); |
596 | 596 | ||
597 | static pud_t xen_make_pud(pudval_t pud) | 597 | __visible pud_t xen_make_pud(pudval_t pud) |
598 | { | 598 | { |
599 | pud = pte_pfn_to_mfn(pud); | 599 | pud = pte_pfn_to_mfn(pud); |
600 | 600 | ||