aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2018-10-30 02:33:01 -0400
committerIngo Molnar <mingo@kernel.org>2018-10-30 04:55:31 -0400
commit7847c7be0481558f17e3ef3b03f573677fd30d29 (patch)
treedb1bce432d81333b4b335dc865daff40fd1ce6ad
parentf77084d96355f5fba8e2c1fb3a51a393b1570de7 (diff)
x86/paravirt: Remove unused _paravirt_ident_32
There is no user of _paravirt_ident_32 left in the tree. Remove it together with the related paravirt_patch_ident_32(). paravirt_patch_ident_64() can be moved inside CONFIG_PARAVIRT_XXL=y. Signed-off-by: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akataria@vmware.com Cc: boris.ostrovsky@oracle.com Cc: rusty@rustcorp.com.au Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20181030063301.15054-1-jgross@suse.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/kernel/paravirt.c26
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c18
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c20
4 files changed, 19 insertions, 47 deletions
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index fba54ca23b2a..26942ad63830 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -361,7 +361,6 @@ extern struct paravirt_patch_template pv_ops;
361 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 361 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
362 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) 362 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
363 363
364unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
365unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); 364unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
366unsigned paravirt_patch_default(u8 type, void *insnbuf, 365unsigned paravirt_patch_default(u8 type, void *insnbuf,
367 unsigned long addr, unsigned len); 366 unsigned long addr, unsigned len);
@@ -651,7 +650,6 @@ void paravirt_leave_lazy_mmu(void);
651void paravirt_flush_lazy_mmu(void); 650void paravirt_flush_lazy_mmu(void);
652 651
653void _paravirt_nop(void); 652void _paravirt_nop(void);
654u32 _paravirt_ident_32(u32);
655u64 _paravirt_ident_64(u64); 653u64 _paravirt_ident_64(u64);
656 654
657#define paravirt_nop ((void *)_paravirt_nop) 655#define paravirt_nop ((void *)_paravirt_nop)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 45123b116c05..c0e0101133f3 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -56,17 +56,6 @@ asm (".pushsection .entry.text, \"ax\"\n"
56 ".type _paravirt_nop, @function\n\t" 56 ".type _paravirt_nop, @function\n\t"
57 ".popsection"); 57 ".popsection");
58 58
59/* identity function, which can be inlined */
60u32 notrace _paravirt_ident_32(u32 x)
61{
62 return x;
63}
64
65u64 notrace _paravirt_ident_64(u64 x)
66{
67 return x;
68}
69
70void __init default_banner(void) 59void __init default_banner(void)
71{ 60{
72 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 61 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -102,6 +91,12 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target,
102} 91}
103 92
104#ifdef CONFIG_PARAVIRT_XXL 93#ifdef CONFIG_PARAVIRT_XXL
94/* identity function, which can be inlined */
95u64 notrace _paravirt_ident_64(u64 x)
96{
97 return x;
98}
99
105static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, 100static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
106 unsigned long addr, unsigned len) 101 unsigned long addr, unsigned len)
107{ 102{
@@ -146,13 +141,11 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf,
146 else if (opfunc == _paravirt_nop) 141 else if (opfunc == _paravirt_nop)
147 ret = 0; 142 ret = 0;
148 143
144#ifdef CONFIG_PARAVIRT_XXL
149 /* identity functions just return their single argument */ 145 /* identity functions just return their single argument */
150 else if (opfunc == _paravirt_ident_32)
151 ret = paravirt_patch_ident_32(insnbuf, len);
152 else if (opfunc == _paravirt_ident_64) 146 else if (opfunc == _paravirt_ident_64)
153 ret = paravirt_patch_ident_64(insnbuf, len); 147 ret = paravirt_patch_ident_64(insnbuf, len);
154 148
155#ifdef CONFIG_PARAVIRT_XXL
156 else if (type == PARAVIRT_PATCH(cpu.iret) || 149 else if (type == PARAVIRT_PATCH(cpu.iret) ||
157 type == PARAVIRT_PATCH(cpu.usergs_sysret64)) 150 type == PARAVIRT_PATCH(cpu.usergs_sysret64))
158 /* If operation requires a jmp, then jmp */ 151 /* If operation requires a jmp, then jmp */
@@ -309,13 +302,8 @@ struct pv_info pv_info = {
309#endif 302#endif
310}; 303};
311 304
312#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
313/* 32-bit pagetable entries */
314#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
315#else
316/* 64-bit pagetable entries */ 305/* 64-bit pagetable entries */
317#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) 306#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
318#endif
319 307
320struct paravirt_patch_template pv_ops = { 308struct paravirt_patch_template pv_ops = {
321 /* Init ops. */ 309 /* Init ops. */
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 6368c22fa1fa..de138d3912e4 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -10,24 +10,18 @@ DEF_NATIVE(cpu, iret, "iret");
10DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); 10DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
11DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); 11DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
12DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); 12DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
13#endif
14
15#if defined(CONFIG_PARAVIRT_SPINLOCKS)
16DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
17DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
18#endif
19
20unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
21{
22 /* arg in %eax, return in %eax */
23 return 0;
24}
25 13
26unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) 14unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
27{ 15{
28 /* arg in %edx:%eax, return in %edx:%eax */ 16 /* arg in %edx:%eax, return in %edx:%eax */
29 return 0; 17 return 0;
30} 18}
19#endif
20
21#if defined(CONFIG_PARAVIRT_SPINLOCKS)
22DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
23DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
24#endif
31 25
32extern bool pv_is_native_spin_unlock(void); 26extern bool pv_is_native_spin_unlock(void);
33extern bool pv_is_native_vcpu_is_preempted(void); 27extern bool pv_is_native_vcpu_is_preempted(void);
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 7ca9cb726f4d..9d9e04b31077 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -15,27 +15,19 @@ DEF_NATIVE(cpu, wbinvd, "wbinvd");
15 15
16DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); 16DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
17DEF_NATIVE(cpu, swapgs, "swapgs"); 17DEF_NATIVE(cpu, swapgs, "swapgs");
18#endif
19
20DEF_NATIVE(, mov32, "mov %edi, %eax");
21DEF_NATIVE(, mov64, "mov %rdi, %rax"); 18DEF_NATIVE(, mov64, "mov %rdi, %rax");
22 19
23#if defined(CONFIG_PARAVIRT_SPINLOCKS)
24DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
25DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
26#endif
27
28unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
29{
30 return paravirt_patch_insns(insnbuf, len,
31 start__mov32, end__mov32);
32}
33
34unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) 20unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
35{ 21{
36 return paravirt_patch_insns(insnbuf, len, 22 return paravirt_patch_insns(insnbuf, len,
37 start__mov64, end__mov64); 23 start__mov64, end__mov64);
38} 24}
25#endif
26
27#if defined(CONFIG_PARAVIRT_SPINLOCKS)
28DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
29DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
30#endif
39 31
40extern bool pv_is_native_spin_unlock(void); 32extern bool pv_is_native_spin_unlock(void);
41extern bool pv_is_native_vcpu_is_preempted(void); 33extern bool pv_is_native_vcpu_is_preempted(void);