aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2015-11-19 16:55:46 -0500
committerIngo Molnar <mingo@kernel.org>2015-11-23 04:48:16 -0500
commit88c15ec90ff16880efab92b519436ee17b198477 (patch)
tree042a7c81586762934c1e66ea390e1e07f191d142
parent5fdf5d37f40a3b18c0d613463867f71c017b75ef (diff)
x86/paravirt: Remove the unused irq_enable_sysexit pv op
As result of commit "x86/xen: Avoid fast syscall path for Xen PV guests", the irq_enable_sysexit pv op is not called by Xen PV guests anymore and since they were the only ones who used it we can safely remove it. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Borislav Petkov <bp@suse.de> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: david.vrabel@citrix.com Cc: konrad.wilk@oracle.com Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1447970147-1733-3-git-send-email-boris.ostrovsky@oracle.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/entry/entry_32.S8
-rw-r--r--arch/x86/include/asm/paravirt.h7
-rw-r--r--arch/x86/include/asm/paravirt_types.h9
-rw-r--r--arch/x86/kernel/asm-offsets.c3
-rw-r--r--arch/x86/kernel/paravirt.c7
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c2
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c1
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/xen-asm_32.S14
-rw-r--r--arch/x86/xen/xen-ops.h3
10 files changed, 2 insertions, 55 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 0870825a9568..9870c972d345 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -329,7 +329,8 @@ sysenter_past_esp:
329 * Return back to the vDSO, which will pop ecx and edx. 329 * Return back to the vDSO, which will pop ecx and edx.
330 * Don't bother with DS and ES (they already contain __USER_DS). 330 * Don't bother with DS and ES (they already contain __USER_DS).
331 */ 331 */
332 ENABLE_INTERRUPTS_SYSEXIT 332 sti
333 sysexit
333 334
334.pushsection .fixup, "ax" 335.pushsection .fixup, "ax"
3352: movl $0, PT_FS(%esp) 3362: movl $0, PT_FS(%esp)
@@ -552,11 +553,6 @@ ENTRY(native_iret)
552 iret 553 iret
553 _ASM_EXTABLE(native_iret, iret_exc) 554 _ASM_EXTABLE(native_iret, iret_exc)
554END(native_iret) 555END(native_iret)
555
556ENTRY(native_irq_enable_sysexit)
557 sti
558 sysexit
559END(native_irq_enable_sysexit)
560#endif 556#endif
561 557
562ENTRY(overflow) 558ENTRY(overflow)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 10d0596433f8..c28518e163f5 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -932,13 +932,6 @@ extern void default_banner(void);
932 push %ecx; push %edx; \ 932 push %ecx; push %edx; \
933 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ 933 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
934 pop %edx; pop %ecx 934 pop %edx; pop %ecx
935
936#define ENABLE_INTERRUPTS_SYSEXIT \
937 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
938 CLBR_NONE, \
939 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
940
941
942#else /* !CONFIG_X86_32 */ 935#else /* !CONFIG_X86_32 */
943 936
944/* 937/*
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 31247b5bff7c..608bbf361c50 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -157,15 +157,6 @@ struct pv_cpu_ops {
157 157
158 u64 (*read_pmc)(int counter); 158 u64 (*read_pmc)(int counter);
159 159
160#ifdef CONFIG_X86_32
161 /*
162 * Atomically enable interrupts and return to userspace. This
163 * is only used in 32-bit kernels. 64-bit kernels use
164 * usergs_sysret32 instead.
165 */
166 void (*irq_enable_sysexit)(void);
167#endif
168
169 /* 160 /*
170 * Switch to usermode gs and return to 64-bit usermode using 161 * Switch to usermode gs and return to 64-bit usermode using
171 * sysret. Only used in 64-bit kernels to return to 64-bit 162 * sysret. Only used in 64-bit kernels to return to 64-bit
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 439df975bc7a..84a7524b202c 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -65,9 +65,6 @@ void common(void) {
65 OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); 65 OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
66 OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); 66 OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
67 OFFSET(PV_CPU_iret, pv_cpu_ops, iret); 67 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
68#ifdef CONFIG_X86_32
69 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
70#endif
71 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); 68 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
72 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); 69 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
73#endif 70#endif
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c2130aef3f9d..c55f4371a43d 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -162,9 +162,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
162 ret = paravirt_patch_ident_64(insnbuf, len); 162 ret = paravirt_patch_ident_64(insnbuf, len);
163 163
164 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 164 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
165#ifdef CONFIG_X86_32
166 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
167#endif
168 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || 165 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
169 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) 166 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
170 /* If operation requires a jmp, then jmp */ 167 /* If operation requires a jmp, then jmp */
@@ -220,7 +217,6 @@ static u64 native_steal_clock(int cpu)
220 217
221/* These are in entry.S */ 218/* These are in entry.S */
222extern void native_iret(void); 219extern void native_iret(void);
223extern void native_irq_enable_sysexit(void);
224extern void native_usergs_sysret32(void); 220extern void native_usergs_sysret32(void);
225extern void native_usergs_sysret64(void); 221extern void native_usergs_sysret64(void);
226 222
@@ -379,9 +375,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
379 375
380 .load_sp0 = native_load_sp0, 376 .load_sp0 = native_load_sp0,
381 377
382#if defined(CONFIG_X86_32)
383 .irq_enable_sysexit = native_irq_enable_sysexit,
384#endif
385#ifdef CONFIG_X86_64 378#ifdef CONFIG_X86_64
386#ifdef CONFIG_IA32_EMULATION 379#ifdef CONFIG_IA32_EMULATION
387 .usergs_sysret32 = native_usergs_sysret32, 380 .usergs_sysret32 = native_usergs_sysret32,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index c89f50a76e97..158dc0650d5d 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -5,7 +5,6 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
5DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); 5DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
6DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); 6DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
7DEF_NATIVE(pv_cpu_ops, iret, "iret"); 7DEF_NATIVE(pv_cpu_ops, iret, "iret");
8DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
9DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); 8DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
10DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); 9DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
11DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); 10DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
@@ -46,7 +45,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
46 PATCH_SITE(pv_irq_ops, restore_fl); 45 PATCH_SITE(pv_irq_ops, restore_fl);
47 PATCH_SITE(pv_irq_ops, save_fl); 46 PATCH_SITE(pv_irq_ops, save_fl);
48 PATCH_SITE(pv_cpu_ops, iret); 47 PATCH_SITE(pv_cpu_ops, iret);
49 PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
50 PATCH_SITE(pv_mmu_ops, read_cr2); 48 PATCH_SITE(pv_mmu_ops, read_cr2);
51 PATCH_SITE(pv_mmu_ops, read_cr3); 49 PATCH_SITE(pv_mmu_ops, read_cr3);
52 PATCH_SITE(pv_mmu_ops, write_cr3); 50 PATCH_SITE(pv_mmu_ops, write_cr3);
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 8aa05583bc42..17c00f80108c 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -13,7 +13,6 @@ DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
13DEF_NATIVE(pv_cpu_ops, clts, "clts"); 13DEF_NATIVE(pv_cpu_ops, clts, "clts");
14DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); 14DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
15 15
16DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit");
17DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); 16DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
18DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); 17DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
19DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); 18DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index d315151411e5..a068e36382b7 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1229,10 +1229,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1229 1229
1230 .iret = xen_iret, 1230 .iret = xen_iret,
1231#ifdef CONFIG_X86_64 1231#ifdef CONFIG_X86_64
1232 .usergs_sysret32 = xen_sysret32,
1233 .usergs_sysret64 = xen_sysret64, 1232 .usergs_sysret64 = xen_sysret64,
1234#else
1235 .irq_enable_sysexit = xen_sysexit,
1236#endif 1233#endif
1237 1234
1238 .load_tr_desc = paravirt_nop, 1235 .load_tr_desc = paravirt_nop,
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index fd92a64d748e..feb6d40a0860 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -35,20 +35,6 @@ check_events:
35 ret 35 ret
36 36
37/* 37/*
38 * We can't use sysexit directly, because we're not running in ring0.
39 * But we can easily fake it up using iret. Assuming xen_sysexit is
40 * jumped to with a standard stack frame, we can just strip it back to
41 * a standard iret frame and use iret.
42 */
43ENTRY(xen_sysexit)
44 movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
45 orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
46 lea PT_EIP(%esp), %esp
47
48 jmp xen_iret
49ENDPROC(xen_sysexit)
50
51/*
52 * This is run where a normal iret would be run, with the same stack setup: 38 * This is run where a normal iret would be run, with the same stack setup:
53 * 8: eflags 39 * 8: eflags
54 * 4: cs 40 * 4: cs
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 1399423f3418..4140b070f2e9 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -139,9 +139,6 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long);
139 139
140/* These are not functions, and cannot be called normally */ 140/* These are not functions, and cannot be called normally */
141__visible void xen_iret(void); 141__visible void xen_iret(void);
142#ifdef CONFIG_X86_32
143__visible void xen_sysexit(void);
144#endif
145__visible void xen_sysret32(void); 142__visible void xen_sysret32(void);
146__visible void xen_sysret64(void); 143__visible void xen_sysret64(void);
147__visible void xen_adjust_exception_frame(void); 144__visible void xen_adjust_exception_frame(void);