aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-06-08 14:48:01 -0400
committerIngo Molnar <mingo@kernel.org>2015-06-08 14:48:20 -0400
commit9dda1658a9bd450d65da5153a2427955785d17c2 (patch)
treec563b728d879c2b446a8f1c33ce351ffb1f1d34e
parentb72e7464e4cf80117938e6adb8c22fdc1ca46d42 (diff)
parenta49976d14f780942dafafbbf16f891c27d385ea0 (diff)
Merge branch 'x86/asm' into x86/core, to prepare for new patch
Collect all changes to arch/x86/entry/entry_64.S, before applying patch that changes most of the file. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/x86/entry_64.txt4
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/x86/Kbuild5
-rw-r--r--arch/x86/Makefile14
-rw-r--r--arch/x86/entry/Makefile10
-rw-r--r--arch/x86/entry/calling.h (renamed from arch/x86/include/asm/calling.h)98
-rw-r--r--arch/x86/entry/entry_32.S1248
-rw-r--r--arch/x86/entry/entry_64.S (renamed from arch/x86/kernel/entry_64.S)316
-rw-r--r--arch/x86/entry/entry_64_compat.S547
-rw-r--r--arch/x86/entry/syscall_32.c (renamed from arch/x86/kernel/syscall_32.c)6
-rw-r--r--arch/x86/entry/syscall_64.c (renamed from arch/x86/kernel/syscall_64.c)0
-rw-r--r--arch/x86/entry/syscalls/Makefile (renamed from arch/x86/syscalls/Makefile)4
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl (renamed from arch/x86/syscalls/syscall_32.tbl)0
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl (renamed from arch/x86/syscalls/syscall_64.tbl)0
-rw-r--r--arch/x86/entry/syscalls/syscallhdr.sh (renamed from arch/x86/syscalls/syscallhdr.sh)0
-rw-r--r--arch/x86/entry/syscalls/syscalltbl.sh (renamed from arch/x86/syscalls/syscalltbl.sh)0
-rw-r--r--arch/x86/entry/thunk_32.S (renamed from arch/x86/lib/thunk_32.S)15
-rw-r--r--arch/x86/entry/thunk_64.S (renamed from arch/x86/lib/thunk_64.S)46
-rw-r--r--arch/x86/entry/vdso/.gitignore (renamed from arch/x86/vdso/.gitignore)0
-rw-r--r--arch/x86/entry/vdso/Makefile (renamed from arch/x86/vdso/Makefile)0
-rwxr-xr-xarch/x86/entry/vdso/checkundef.sh (renamed from arch/x86/vdso/checkundef.sh)0
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c (renamed from arch/x86/vdso/vclock_gettime.c)0
-rw-r--r--arch/x86/entry/vdso/vdso-layout.lds.S (renamed from arch/x86/vdso/vdso-layout.lds.S)0
-rw-r--r--arch/x86/entry/vdso/vdso-note.S (renamed from arch/x86/vdso/vdso-note.S)0
-rw-r--r--arch/x86/entry/vdso/vdso.lds.S (renamed from arch/x86/vdso/vdso.lds.S)0
-rw-r--r--arch/x86/entry/vdso/vdso2c.c (renamed from arch/x86/vdso/vdso2c.c)0
-rw-r--r--arch/x86/entry/vdso/vdso2c.h (renamed from arch/x86/vdso/vdso2c.h)0
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c (renamed from arch/x86/vdso/vdso32-setup.c)0
-rw-r--r--arch/x86/entry/vdso/vdso32/.gitignore (renamed from arch/x86/vdso/vdso32/.gitignore)0
-rw-r--r--arch/x86/entry/vdso/vdso32/int80.S (renamed from arch/x86/vdso/vdso32/int80.S)0
-rw-r--r--arch/x86/entry/vdso/vdso32/note.S (renamed from arch/x86/vdso/vdso32/note.S)0
-rw-r--r--arch/x86/entry/vdso/vdso32/sigreturn.S (renamed from arch/x86/vdso/vdso32/sigreturn.S)0
-rw-r--r--arch/x86/entry/vdso/vdso32/syscall.S (renamed from arch/x86/vdso/vdso32/syscall.S)0
-rw-r--r--arch/x86/entry/vdso/vdso32/sysenter.S (renamed from arch/x86/vdso/vdso32/sysenter.S)0
-rw-r--r--arch/x86/entry/vdso/vdso32/vclock_gettime.c (renamed from arch/x86/vdso/vdso32/vclock_gettime.c)0
-rw-r--r--arch/x86/entry/vdso/vdso32/vdso-fakesections.c (renamed from arch/x86/vdso/vdso32/vdso-fakesections.c)0
-rw-r--r--arch/x86/entry/vdso/vdso32/vdso32.lds.S (renamed from arch/x86/vdso/vdso32/vdso32.lds.S)0
-rw-r--r--arch/x86/entry/vdso/vdsox32.lds.S (renamed from arch/x86/vdso/vdsox32.lds.S)0
-rw-r--r--arch/x86/entry/vdso/vgetcpu.c (renamed from arch/x86/vdso/vgetcpu.c)0
-rw-r--r--arch/x86/entry/vdso/vma.c (renamed from arch/x86/vdso/vma.c)0
-rw-r--r--arch/x86/entry/vsyscall/Makefile7
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c (renamed from arch/x86/kernel/vsyscall_64.c)0
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_emu_64.S (renamed from arch/x86/kernel/vsyscall_emu_64.S)0
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_gtod.c (renamed from arch/x86/kernel/vsyscall_gtod.c)0
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_trace.h (renamed from arch/x86/kernel/vsyscall_trace.h)2
-rw-r--r--arch/x86/ia32/Makefile2
-rw-r--r--arch/x86/ia32/ia32entry.S591
-rw-r--r--arch/x86/include/asm/dwarf2.h170
-rw-r--r--arch/x86/include/asm/frame.h7
-rw-r--r--arch/x86/include/asm/msr.h9
-rw-r--r--arch/x86/include/asm/proto.h10
-rw-r--r--arch/x86/include/asm/segment.h14
-rw-r--r--arch/x86/kernel/Makefile5
-rw-r--r--arch/x86/kernel/asm-offsets_64.c2
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/entry_32.S1401
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_32.S33
-rw-r--r--arch/x86/kernel/head_64.S20
-rw-r--r--arch/x86/kernel/traps.c7
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/atomic64_386_32.S7
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S61
-rw-r--r--arch/x86/lib/checksum_32.S52
-rw-r--r--arch/x86/lib/clear_page_64.S7
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S12
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S11
-rw-r--r--arch/x86/lib/copy_page_64.S11
-rw-r--r--arch/x86/lib/copy_user_64.S15
-rw-r--r--arch/x86/lib/csum-copy_64.S17
-rw-r--r--arch/x86/lib/getuser.S13
-rw-r--r--arch/x86/lib/iomap_copy_64.S3
-rw-r--r--arch/x86/lib/memcpy_64.S3
-rw-r--r--arch/x86/lib/memmove_64.S3
-rw-r--r--arch/x86/lib/memset_64.S5
-rw-r--r--arch/x86/lib/msr-reg.S44
-rw-r--r--arch/x86/lib/putuser.S8
-rw-r--r--arch/x86/lib/rwsem.S49
-rw-r--r--arch/x86/net/bpf_jit.S1
-rw-r--r--arch/x86/um/Makefile2
-rw-r--r--arch/x86/xen/xen-asm_64.S6
-rwxr-xr-xscripts/checksyscalls.sh2
82 files changed, 2120 insertions, 2818 deletions
diff --git a/Documentation/x86/entry_64.txt b/Documentation/x86/entry_64.txt
index 9132b86176a3..33884d156125 100644
--- a/Documentation/x86/entry_64.txt
+++ b/Documentation/x86/entry_64.txt
@@ -18,10 +18,10 @@ Some of these entries are:
18 18
19 - system_call: syscall instruction from 64-bit code. 19 - system_call: syscall instruction from 64-bit code.
20 20
21 - ia32_syscall: int 0x80 from 32-bit or 64-bit code; compat syscall 21 - entry_INT80_compat: int 0x80 from 32-bit or 64-bit code; compat syscall
22 either way. 22 either way.
23 23
24 - ia32_syscall, ia32_sysenter: syscall and sysenter from 32-bit 24 - entry_INT80_compat, ia32_sysenter: syscall and sysenter from 32-bit
25 code 25 code
26 26
27 - interrupt: An array of entries. Every IDT vector that doesn't 27 - interrupt: An array of entries. Every IDT vector that doesn't
diff --git a/MAINTAINERS b/MAINTAINERS
index e30871880fdb..2987968e235c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10893,7 +10893,7 @@ M: Andy Lutomirski <luto@amacapital.net>
10893L: linux-kernel@vger.kernel.org 10893L: linux-kernel@vger.kernel.org
10894T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso 10894T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
10895S: Maintained 10895S: Maintained
10896F: arch/x86/vdso/ 10896F: arch/x86/entry/vdso/
10897 10897
10898XC2028/3028 TUNER DRIVER 10898XC2028/3028 TUNER DRIVER
10899M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 10899M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 3942f74c92d7..1538562cc720 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -1,3 +1,6 @@
1
2obj-y += entry/
3
1obj-$(CONFIG_KVM) += kvm/ 4obj-$(CONFIG_KVM) += kvm/
2 5
3# Xen paravirtualization support 6# Xen paravirtualization support
@@ -11,7 +14,7 @@ obj-y += kernel/
11obj-y += mm/ 14obj-y += mm/
12 15
13obj-y += crypto/ 16obj-y += crypto/
14obj-y += vdso/ 17
15obj-$(CONFIG_IA32_EMULATION) += ia32/ 18obj-$(CONFIG_IA32_EMULATION) += ia32/
16 19
17obj-y += platform/ 20obj-y += platform/
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 57996ee840dd..118e6debc483 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -149,12 +149,6 @@ endif
149sp-$(CONFIG_X86_32) := esp 149sp-$(CONFIG_X86_32) := esp
150sp-$(CONFIG_X86_64) := rsp 150sp-$(CONFIG_X86_64) := rsp
151 151
152# do binutils support CFI?
153cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
154# is .cfi_signal_frame supported too?
155cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
156cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
157
158# does binutils support specific instructions? 152# does binutils support specific instructions?
159asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) 153asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
160asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) 154asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
162avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) 156avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
163avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) 157avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
164 158
165KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) 159KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
166KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) 160KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
167 161
168LDFLAGS := -m elf_$(UTS_MACHINE) 162LDFLAGS := -m elf_$(UTS_MACHINE)
169 163
@@ -187,7 +181,7 @@ archscripts: scripts_basic
187# Syscall table generation 181# Syscall table generation
188 182
189archheaders: 183archheaders:
190 $(Q)$(MAKE) $(build)=arch/x86/syscalls all 184 $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
191 185
192archprepare: 186archprepare:
193ifeq ($(CONFIG_KEXEC_FILE),y) 187ifeq ($(CONFIG_KEXEC_FILE),y)
@@ -250,7 +244,7 @@ install:
250 244
251PHONY += vdso_install 245PHONY += vdso_install
252vdso_install: 246vdso_install:
253 $(Q)$(MAKE) $(build)=arch/x86/vdso $@ 247 $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
254 248
255archclean: 249archclean:
256 $(Q)rm -rf $(objtree)/arch/i386 250 $(Q)rm -rf $(objtree)/arch/i386
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
new file mode 100644
index 000000000000..7a144971db79
--- /dev/null
+++ b/arch/x86/entry/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the x86 low level entry code
3#
4obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
5
6obj-y += vdso/
7obj-y += vsyscall/
8
9obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o
10
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/entry/calling.h
index 1c8b50edb2db..f4e6308c4200 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/entry/calling.h
@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
46 46
47*/ 47*/
48 48
49#include <asm/dwarf2.h>
50
51#ifdef CONFIG_X86_64 49#ifdef CONFIG_X86_64
52 50
53/* 51/*
@@ -91,28 +89,27 @@ For 32-bit we have the following conventions - kernel is built with
91#define SIZEOF_PTREGS 21*8 89#define SIZEOF_PTREGS 21*8
92 90
93 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0 91 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
94 subq $15*8+\addskip, %rsp 92 addq $-(15*8+\addskip), %rsp
95 CFI_ADJUST_CFA_OFFSET 15*8+\addskip
96 .endm 93 .endm
97 94
98 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 95 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
99 .if \r11 96 .if \r11
100 movq_cfi r11, 6*8+\offset 97 movq %r11, 6*8+\offset(%rsp)
101 .endif 98 .endif
102 .if \r8910 99 .if \r8910
103 movq_cfi r10, 7*8+\offset 100 movq %r10, 7*8+\offset(%rsp)
104 movq_cfi r9, 8*8+\offset 101 movq %r9, 8*8+\offset(%rsp)
105 movq_cfi r8, 9*8+\offset 102 movq %r8, 9*8+\offset(%rsp)
106 .endif 103 .endif
107 .if \rax 104 .if \rax
108 movq_cfi rax, 10*8+\offset 105 movq %rax, 10*8+\offset(%rsp)
109 .endif 106 .endif
110 .if \rcx 107 .if \rcx
111 movq_cfi rcx, 11*8+\offset 108 movq %rcx, 11*8+\offset(%rsp)
112 .endif 109 .endif
113 movq_cfi rdx, 12*8+\offset 110 movq %rdx, 12*8+\offset(%rsp)
114 movq_cfi rsi, 13*8+\offset 111 movq %rsi, 13*8+\offset(%rsp)
115 movq_cfi rdi, 14*8+\offset 112 movq %rdi, 14*8+\offset(%rsp)
116 .endm 113 .endm
117 .macro SAVE_C_REGS offset=0 114 .macro SAVE_C_REGS offset=0
118 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 115 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
131 .endm 128 .endm
132 129
133 .macro SAVE_EXTRA_REGS offset=0 130 .macro SAVE_EXTRA_REGS offset=0
134 movq_cfi r15, 0*8+\offset 131 movq %r15, 0*8+\offset(%rsp)
135 movq_cfi r14, 1*8+\offset 132 movq %r14, 1*8+\offset(%rsp)
136 movq_cfi r13, 2*8+\offset 133 movq %r13, 2*8+\offset(%rsp)
137 movq_cfi r12, 3*8+\offset 134 movq %r12, 3*8+\offset(%rsp)
138 movq_cfi rbp, 4*8+\offset 135 movq %rbp, 4*8+\offset(%rsp)
139 movq_cfi rbx, 5*8+\offset 136 movq %rbx, 5*8+\offset(%rsp)
140 .endm 137 .endm
141 .macro SAVE_EXTRA_REGS_RBP offset=0 138 .macro SAVE_EXTRA_REGS_RBP offset=0
142 movq_cfi rbp, 4*8+\offset 139 movq %rbp, 4*8+\offset(%rsp)
143 .endm 140 .endm
144 141
145 .macro RESTORE_EXTRA_REGS offset=0 142 .macro RESTORE_EXTRA_REGS offset=0
146 movq_cfi_restore 0*8+\offset, r15 143 movq 0*8+\offset(%rsp), %r15
147 movq_cfi_restore 1*8+\offset, r14 144 movq 1*8+\offset(%rsp), %r14
148 movq_cfi_restore 2*8+\offset, r13 145 movq 2*8+\offset(%rsp), %r13
149 movq_cfi_restore 3*8+\offset, r12 146 movq 3*8+\offset(%rsp), %r12
150 movq_cfi_restore 4*8+\offset, rbp 147 movq 4*8+\offset(%rsp), %rbp
151 movq_cfi_restore 5*8+\offset, rbx 148 movq 5*8+\offset(%rsp), %rbx
152 .endm 149 .endm
153 150
154 .macro ZERO_EXTRA_REGS 151 .macro ZERO_EXTRA_REGS
@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
162 159
163 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 160 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
164 .if \rstor_r11 161 .if \rstor_r11
165 movq_cfi_restore 6*8, r11 162 movq 6*8(%rsp), %r11
166 .endif 163 .endif
167 .if \rstor_r8910 164 .if \rstor_r8910
168 movq_cfi_restore 7*8, r10 165 movq 7*8(%rsp), %r10
169 movq_cfi_restore 8*8, r9 166 movq 8*8(%rsp), %r9
170 movq_cfi_restore 9*8, r8 167 movq 9*8(%rsp), %r8
171 .endif 168 .endif
172 .if \rstor_rax 169 .if \rstor_rax
173 movq_cfi_restore 10*8, rax 170 movq 10*8(%rsp), %rax
174 .endif 171 .endif
175 .if \rstor_rcx 172 .if \rstor_rcx
176 movq_cfi_restore 11*8, rcx 173 movq 11*8(%rsp), %rcx
177 .endif 174 .endif
178 .if \rstor_rdx 175 .if \rstor_rdx
179 movq_cfi_restore 12*8, rdx 176 movq 12*8(%rsp), %rdx
180 .endif 177 .endif
181 movq_cfi_restore 13*8, rsi 178 movq 13*8(%rsp), %rsi
182 movq_cfi_restore 14*8, rdi 179 movq 14*8(%rsp), %rdi
183 .endm 180 .endm
184 .macro RESTORE_C_REGS 181 .macro RESTORE_C_REGS
185 RESTORE_C_REGS_HELPER 1,1,1,1,1 182 RESTORE_C_REGS_HELPER 1,1,1,1,1
@@ -204,8 +201,7 @@ For 32-bit we have the following conventions - kernel is built with
204 .endm 201 .endm
205 202
206 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 203 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
207 addq $15*8+\addskip, %rsp 204 subq $-(15*8+\addskip), %rsp
208 CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
209 .endm 205 .endm
210 206
211 .macro icebp 207 .macro icebp
@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
224 */ 220 */
225 221
226 .macro SAVE_ALL 222 .macro SAVE_ALL
227 pushl_cfi_reg eax 223 pushl %eax
228 pushl_cfi_reg ebp 224 pushl %ebp
229 pushl_cfi_reg edi 225 pushl %edi
230 pushl_cfi_reg esi 226 pushl %esi
231 pushl_cfi_reg edx 227 pushl %edx
232 pushl_cfi_reg ecx 228 pushl %ecx
233 pushl_cfi_reg ebx 229 pushl %ebx
234 .endm 230 .endm
235 231
236 .macro RESTORE_ALL 232 .macro RESTORE_ALL
237 popl_cfi_reg ebx 233 popl %ebx
238 popl_cfi_reg ecx 234 popl %ecx
239 popl_cfi_reg edx 235 popl %edx
240 popl_cfi_reg esi 236 popl %esi
241 popl_cfi_reg edi 237 popl %edi
242 popl_cfi_reg ebp 238 popl %ebp
243 popl_cfi_reg eax 239 popl %eax
244 .endm 240 .endm
245 241
246#endif /* CONFIG_X86_64 */ 242#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
new file mode 100644
index 000000000000..edd7aadfacfa
--- /dev/null
+++ b/arch/x86/entry/entry_32.S
@@ -0,0 +1,1248 @@
1/*
2 * Copyright (C) 1991,1992 Linus Torvalds
3 *
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
5 *
6 * Stack layout in 'syscall_exit':
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
11 *
12 * 0(%esp) - %ebx
13 * 4(%esp) - %ecx
14 * 8(%esp) - %edx
15 * C(%esp) - %esi
16 * 10(%esp) - %edi
17 * 14(%esp) - %ebp
18 * 18(%esp) - %eax
19 * 1C(%esp) - %ds
20 * 20(%esp) - %es
21 * 24(%esp) - %fs
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
23 * 2C(%esp) - orig_eax
24 * 30(%esp) - %eip
25 * 34(%esp) - %cs
26 * 38(%esp) - %eflags
27 * 3C(%esp) - %oldesp
28 * 40(%esp) - %oldss
29 */
30
31#include <linux/linkage.h>
32#include <linux/err.h>
33#include <asm/thread_info.h>
34#include <asm/irqflags.h>
35#include <asm/errno.h>
36#include <asm/segment.h>
37#include <asm/smp.h>
38#include <asm/page_types.h>
39#include <asm/percpu.h>
40#include <asm/processor-flags.h>
41#include <asm/ftrace.h>
42#include <asm/irq_vectors.h>
43#include <asm/cpufeature.h>
44#include <asm/alternative-asm.h>
45#include <asm/asm.h>
46#include <asm/smap.h>
47
48/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
49#include <linux/elf-em.h>
50#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
51#define __AUDIT_ARCH_LE 0x40000000
52
53#ifndef CONFIG_AUDITSYSCALL
54# define sysenter_audit syscall_trace_entry
55# define sysexit_audit syscall_exit_work
56#endif
57
58 .section .entry.text, "ax"
59
60/*
61 * We use macros for low-level operations which need to be overridden
62 * for paravirtualization. The following will never clobber any registers:
63 * INTERRUPT_RETURN (aka. "iret")
64 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
65 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
66 *
67 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
68 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
69 * Allowing a register to be clobbered can shrink the paravirt replacement
70 * enough to patch inline, increasing performance.
71 */
72
73#ifdef CONFIG_PREEMPT
74# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
75#else
76# define preempt_stop(clobbers)
77# define resume_kernel restore_all
78#endif
79
80.macro TRACE_IRQS_IRET
81#ifdef CONFIG_TRACE_IRQFLAGS
82 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
83 jz 1f
84 TRACE_IRQS_ON
851:
86#endif
87.endm
88
89/*
90 * User gs save/restore
91 *
92 * %gs is used for userland TLS and kernel only uses it for stack
93 * canary which is required to be at %gs:20 by gcc. Read the comment
94 * at the top of stackprotector.h for more info.
95 *
96 * Local labels 98 and 99 are used.
97 */
98#ifdef CONFIG_X86_32_LAZY_GS
99
100 /* unfortunately push/pop can't be no-op */
101.macro PUSH_GS
102 pushl $0
103.endm
104.macro POP_GS pop=0
105 addl $(4 + \pop), %esp
106.endm
107.macro POP_GS_EX
108.endm
109
110 /* all the rest are no-op */
111.macro PTGS_TO_GS
112.endm
113.macro PTGS_TO_GS_EX
114.endm
115.macro GS_TO_REG reg
116.endm
117.macro REG_TO_PTGS reg
118.endm
119.macro SET_KERNEL_GS reg
120.endm
121
122#else /* CONFIG_X86_32_LAZY_GS */
123
124.macro PUSH_GS
125 pushl %gs
126.endm
127
128.macro POP_GS pop=0
12998: popl %gs
130 .if \pop <> 0
131 add $\pop, %esp
132 .endif
133.endm
134.macro POP_GS_EX
135.pushsection .fixup, "ax"
13699: movl $0, (%esp)
137 jmp 98b
138.popsection
139 _ASM_EXTABLE(98b, 99b)
140.endm
141
142.macro PTGS_TO_GS
14398: mov PT_GS(%esp), %gs
144.endm
145.macro PTGS_TO_GS_EX
146.pushsection .fixup, "ax"
14799: movl $0, PT_GS(%esp)
148 jmp 98b
149.popsection
150 _ASM_EXTABLE(98b, 99b)
151.endm
152
153.macro GS_TO_REG reg
154 movl %gs, \reg
155.endm
156.macro REG_TO_PTGS reg
157 movl \reg, PT_GS(%esp)
158.endm
159.macro SET_KERNEL_GS reg
160 movl $(__KERNEL_STACK_CANARY), \reg
161 movl \reg, %gs
162.endm
163
164#endif /* CONFIG_X86_32_LAZY_GS */
165
166.macro SAVE_ALL
167 cld
168 PUSH_GS
169 pushl %fs
170 pushl %es
171 pushl %ds
172 pushl %eax
173 pushl %ebp
174 pushl %edi
175 pushl %esi
176 pushl %edx
177 pushl %ecx
178 pushl %ebx
179 movl $(__USER_DS), %edx
180 movl %edx, %ds
181 movl %edx, %es
182 movl $(__KERNEL_PERCPU), %edx
183 movl %edx, %fs
184 SET_KERNEL_GS %edx
185.endm
186
187.macro RESTORE_INT_REGS
188 popl %ebx
189 popl %ecx
190 popl %edx
191 popl %esi
192 popl %edi
193 popl %ebp
194 popl %eax
195.endm
196
197.macro RESTORE_REGS pop=0
198 RESTORE_INT_REGS
1991: popl %ds
2002: popl %es
2013: popl %fs
202 POP_GS \pop
203.pushsection .fixup, "ax"
2044: movl $0, (%esp)
205 jmp 1b
2065: movl $0, (%esp)
207 jmp 2b
2086: movl $0, (%esp)
209 jmp 3b
210.popsection
211 _ASM_EXTABLE(1b, 4b)
212 _ASM_EXTABLE(2b, 5b)
213 _ASM_EXTABLE(3b, 6b)
214 POP_GS_EX
215.endm
216
217ENTRY(ret_from_fork)
218 pushl %eax
219 call schedule_tail
220 GET_THREAD_INFO(%ebp)
221 popl %eax
222 pushl $0x0202 # Reset kernel eflags
223 popfl
224 jmp syscall_exit
225END(ret_from_fork)
226
227ENTRY(ret_from_kernel_thread)
228 pushl %eax
229 call schedule_tail
230 GET_THREAD_INFO(%ebp)
231 popl %eax
232 pushl $0x0202 # Reset kernel eflags
233 popfl
234 movl PT_EBP(%esp), %eax
235 call *PT_EBX(%esp)
236 movl $0, PT_EAX(%esp)
237 jmp syscall_exit
238ENDPROC(ret_from_kernel_thread)
239
240/*
241 * Return to user mode is not as complex as all this looks,
242 * but we want the default path for a system call return to
243 * go as quickly as possible which is why some of this is
244 * less clear than it otherwise should be.
245 */
246
247 # userspace resumption stub bypassing syscall exit tracing
248 ALIGN
249ret_from_exception:
250 preempt_stop(CLBR_ANY)
251ret_from_intr:
252 GET_THREAD_INFO(%ebp)
253#ifdef CONFIG_VM86
254 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
255 movb PT_CS(%esp), %al
256 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
257#else
258 /*
259 * We can be coming here from child spawned by kernel_thread().
260 */
261 movl PT_CS(%esp), %eax
262 andl $SEGMENT_RPL_MASK, %eax
263#endif
264 cmpl $USER_RPL, %eax
265 jb resume_kernel # not returning to v8086 or userspace
266
267ENTRY(resume_userspace)
268 LOCKDEP_SYS_EXIT
269 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
270 # setting need_resched or sigpending
271 # between sampling and the iret
272 TRACE_IRQS_OFF
273 movl TI_flags(%ebp), %ecx
274 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
275 # int/exception return?
276 jne work_pending
277 jmp restore_all
278END(ret_from_exception)
279
280#ifdef CONFIG_PREEMPT
281ENTRY(resume_kernel)
282 DISABLE_INTERRUPTS(CLBR_ANY)
283need_resched:
284 cmpl $0, PER_CPU_VAR(__preempt_count)
285 jnz restore_all
286 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
287 jz restore_all
288 call preempt_schedule_irq
289 jmp need_resched
290END(resume_kernel)
291#endif
292
293/*
294 * SYSENTER_RETURN points to after the SYSENTER instruction
295 * in the vsyscall page. See vsyscall-sysentry.S, which defines
296 * the symbol.
297 */
298
299 # SYSENTER call handler stub
300ENTRY(entry_SYSENTER_32)
301 movl TSS_sysenter_sp0(%esp), %esp
302sysenter_past_esp:
303 /*
304 * Interrupts are disabled here, but we can't trace it until
305 * enough kernel state to call TRACE_IRQS_OFF can be called - but
306 * we immediately enable interrupts at that point anyway.
307 */
308 pushl $__USER_DS
309 pushl %ebp
310 pushfl
311 orl $X86_EFLAGS_IF, (%esp)
312 pushl $__USER_CS
313 /*
314 * Push current_thread_info()->sysenter_return to the stack.
315 * A tiny bit of offset fixup is necessary: TI_sysenter_return
316 * is relative to thread_info, which is at the bottom of the
317 * kernel stack page. 4*4 means the 4 words pushed above;
318 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
319 * and THREAD_SIZE takes us to the bottom.
320 */
321 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
322
323 pushl %eax
324 SAVE_ALL
325 ENABLE_INTERRUPTS(CLBR_NONE)
326
327/*
328 * Load the potential sixth argument from user stack.
329 * Careful about security.
330 */
331 cmpl $__PAGE_OFFSET-3, %ebp
332 jae syscall_fault
333 ASM_STAC
3341: movl (%ebp), %ebp
335 ASM_CLAC
336 movl %ebp, PT_EBP(%esp)
337 _ASM_EXTABLE(1b, syscall_fault)
338
339 GET_THREAD_INFO(%ebp)
340
341 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
342 jnz sysenter_audit
343sysenter_do_call:
344 cmpl $(NR_syscalls), %eax
345 jae sysenter_badsys
346 call *sys_call_table(, %eax, 4)
347sysenter_after_call:
348 movl %eax, PT_EAX(%esp)
349 LOCKDEP_SYS_EXIT
350 DISABLE_INTERRUPTS(CLBR_ANY)
351 TRACE_IRQS_OFF
352 movl TI_flags(%ebp), %ecx
353 testl $_TIF_ALLWORK_MASK, %ecx
354 jnz sysexit_audit
355sysenter_exit:
356/* if something modifies registers it must also disable sysexit */
357 movl PT_EIP(%esp), %edx
358 movl PT_OLDESP(%esp), %ecx
359 xorl %ebp, %ebp
360 TRACE_IRQS_ON
3611: mov PT_FS(%esp), %fs
362 PTGS_TO_GS
363 ENABLE_INTERRUPTS_SYSEXIT
364
365#ifdef CONFIG_AUDITSYSCALL
366sysenter_audit:
367 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
368 jnz syscall_trace_entry
369 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
370 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
371 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
372 pushl PT_ESI(%esp) /* a3: 5th arg */
373 pushl PT_EDX+4(%esp) /* a2: 4th arg */
374 call __audit_syscall_entry
375 popl %ecx /* get that remapped edx off the stack */
376 popl %ecx /* get that remapped esi off the stack */
377 movl PT_EAX(%esp), %eax /* reload syscall number */
378 jmp sysenter_do_call
379
380sysexit_audit:
381 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
382 jnz syscall_exit_work
383 TRACE_IRQS_ON
384 ENABLE_INTERRUPTS(CLBR_ANY)
385 movl %eax, %edx /* second arg, syscall return value */
386 cmpl $-MAX_ERRNO, %eax /* is it an error ? */
387 setbe %al /* 1 if so, 0 if not */
388 movzbl %al, %eax /* zero-extend that */
389 call __audit_syscall_exit
390 DISABLE_INTERRUPTS(CLBR_ANY)
391 TRACE_IRQS_OFF
392 movl TI_flags(%ebp), %ecx
393 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
394 jnz syscall_exit_work
395 movl PT_EAX(%esp), %eax /* reload syscall return value */
396 jmp sysenter_exit
397#endif
398
399.pushsection .fixup, "ax"
4002: movl $0, PT_FS(%esp)
401 jmp 1b
402.popsection
403 _ASM_EXTABLE(1b, 2b)
404 PTGS_TO_GS_EX
405ENDPROC(entry_SYSENTER_32)
406
407 # system call handler stub
408ENTRY(entry_INT80_32)
409 ASM_CLAC
410 pushl %eax # save orig_eax
411 SAVE_ALL
412 GET_THREAD_INFO(%ebp)
413 # system call tracing in operation / emulation
414 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
415 jnz syscall_trace_entry
416 cmpl $(NR_syscalls), %eax
417 jae syscall_badsys
418syscall_call:
419 call *sys_call_table(, %eax, 4)
420syscall_after_call:
421 movl %eax, PT_EAX(%esp) # store the return value
422syscall_exit:
423 LOCKDEP_SYS_EXIT
424 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
425 # setting need_resched or sigpending
426 # between sampling and the iret
427 TRACE_IRQS_OFF
428 movl TI_flags(%ebp), %ecx
429 testl $_TIF_ALLWORK_MASK, %ecx # current->work
430 jnz syscall_exit_work
431
432restore_all:
433 TRACE_IRQS_IRET
434restore_all_notrace:
435#ifdef CONFIG_X86_ESPFIX32
436 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
437 /*
438 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
439 * are returning to the kernel.
440 * See comments in process.c:copy_thread() for details.
441 */
442 movb PT_OLDSS(%esp), %ah
443 movb PT_CS(%esp), %al
444 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
445 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
446 je ldt_ss # returning to user-space with LDT SS
447#endif
448restore_nocheck:
449 RESTORE_REGS 4 # skip orig_eax/error_code
450irq_return:
451 INTERRUPT_RETURN
452.section .fixup, "ax"
453ENTRY(iret_exc )
454 pushl $0 # no error code
455 pushl $do_iret_error
456 jmp error_code
457.previous
458 _ASM_EXTABLE(irq_return, iret_exc)
459
460#ifdef CONFIG_X86_ESPFIX32
461ldt_ss:
462#ifdef CONFIG_PARAVIRT
463 /*
464 * The kernel can't run on a non-flat stack if paravirt mode
465 * is active. Rather than try to fixup the high bits of
466 * ESP, bypass this code entirely. This may break DOSemu
467 * and/or Wine support in a paravirt VM, although the option
468 * is still available to implement the setting of the high
469 * 16-bits in the INTERRUPT_RETURN paravirt-op.
470 */
471 cmpl $0, pv_info+PARAVIRT_enabled
472 jne restore_nocheck
473#endif
474
475/*
476 * Setup and switch to ESPFIX stack
477 *
478 * We're returning to userspace with a 16 bit stack. The CPU will not
479 * restore the high word of ESP for us on executing iret... This is an
480 * "official" bug of all the x86-compatible CPUs, which we can work
481 * around to make dosemu and wine happy. We do this by preloading the
482 * high word of ESP with the high word of the userspace ESP while
483 * compensating for the offset by changing to the ESPFIX segment with
484 * a base address that matches for the difference.
485 */
486#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
487 mov %esp, %edx /* load kernel esp */
488 mov PT_OLDESP(%esp), %eax /* load userspace esp */
489 mov %dx, %ax /* eax: new kernel esp */
490 sub %eax, %edx /* offset (low word is 0) */
491 shr $16, %edx
492 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
493 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
494 pushl $__ESPFIX_SS
495 pushl %eax /* new kernel esp */
496 /*
497 * Disable interrupts, but do not irqtrace this section: we
498 * will soon execute iret and the tracer was already set to
499 * the irqstate after the IRET:
500 */
501 DISABLE_INTERRUPTS(CLBR_EAX)
502 lss (%esp), %esp /* switch to espfix segment */
503 jmp restore_nocheck
504#endif
505ENDPROC(entry_INT80_32)
506
507 # perform work that needs to be done immediately before resumption
508 ALIGN
509work_pending:
510 testb $_TIF_NEED_RESCHED, %cl
511 jz work_notifysig
512work_resched:
513 call schedule
514 LOCKDEP_SYS_EXIT
515 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
516 # setting need_resched or sigpending
517 # between sampling and the iret
518 TRACE_IRQS_OFF
519 movl TI_flags(%ebp), %ecx
520 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
521 # than syscall tracing?
522 jz restore_all
523 testb $_TIF_NEED_RESCHED, %cl
524 jnz work_resched
525
526work_notifysig: # deal with pending signals and
527 # notify-resume requests
528#ifdef CONFIG_VM86
529 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
530 movl %esp, %eax
531 jnz work_notifysig_v86 # returning to kernel-space or
532 # vm86-space
5331:
534#else
535 movl %esp, %eax
536#endif
537 TRACE_IRQS_ON
538 ENABLE_INTERRUPTS(CLBR_NONE)
539 movb PT_CS(%esp), %bl
540 andb $SEGMENT_RPL_MASK, %bl
541 cmpb $USER_RPL, %bl
542 jb resume_kernel
543 xorl %edx, %edx
544 call do_notify_resume
545 jmp resume_userspace
546
547#ifdef CONFIG_VM86
548 ALIGN
549work_notifysig_v86:
550 pushl %ecx # save ti_flags for do_notify_resume
551 call save_v86_state # %eax contains pt_regs pointer
552 popl %ecx
553 movl %eax, %esp
554 jmp 1b
555#endif
556END(work_pending)
557
558 # perform syscall exit tracing
559 ALIGN
560syscall_trace_entry:
561 movl $-ENOSYS, PT_EAX(%esp)
562 movl %esp, %eax
563 call syscall_trace_enter
564 /* What it returned is what we'll actually use. */
565 cmpl $(NR_syscalls), %eax
566 jnae syscall_call
567 jmp syscall_exit
568END(syscall_trace_entry)
569
570 # perform syscall exit tracing
571 ALIGN
572syscall_exit_work:
573 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
574 jz work_pending
575 TRACE_IRQS_ON
576 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
577 # schedule() instead
578 movl %esp, %eax
579 call syscall_trace_leave
580 jmp resume_userspace
581END(syscall_exit_work)
582
583syscall_fault:
584 ASM_CLAC
585 GET_THREAD_INFO(%ebp)
586 movl $-EFAULT, PT_EAX(%esp)
587 jmp resume_userspace
588END(syscall_fault)
589
590syscall_badsys:
591 movl $-ENOSYS, %eax
592 jmp syscall_after_call
593END(syscall_badsys)
594
595sysenter_badsys:
596 movl $-ENOSYS, %eax
597 jmp sysenter_after_call
598END(sysenter_badsys)
599
600.macro FIXUP_ESPFIX_STACK
601/*
602 * Switch back for ESPFIX stack to the normal zerobased stack
603 *
604 * We can't call C functions using the ESPFIX stack. This code reads
605 * the high word of the segment base from the GDT and swiches to the
606 * normal stack and adjusts ESP with the matching offset.
607 */
608#ifdef CONFIG_X86_ESPFIX32
609 /* fixup the stack */
610 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
611 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
612 shl $16, %eax
613 addl %esp, %eax /* the adjusted stack pointer */
614 pushl $__KERNEL_DS
615 pushl %eax
616 lss (%esp), %esp /* switch to the normal stack segment */
617#endif
618.endm
619.macro UNWIND_ESPFIX_STACK
620#ifdef CONFIG_X86_ESPFIX32
621 movl %ss, %eax
622 /* see if on espfix stack */
623 cmpw $__ESPFIX_SS, %ax
624 jne 27f
625 movl $__KERNEL_DS, %eax
626 movl %eax, %ds
627 movl %eax, %es
628 /* switch to normal stack */
629 FIXUP_ESPFIX_STACK
63027:
631#endif
632.endm
633
634/*
635 * Build the entry stubs with some assembler magic.
636 * We pack 1 stub into every 8-byte block.
637 */
638 .align 8
639ENTRY(irq_entries_start)
640 vector=FIRST_EXTERNAL_VECTOR
641 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
642 pushl $(~vector+0x80) /* Note: always in signed byte range */
643 vector=vector+1
644 jmp common_interrupt
645 .align 8
646 .endr
647END(irq_entries_start)
648
649/*
650 * the CPU automatically disables interrupts when executing an IRQ vector,
651 * so IRQ-flags tracing has to follow that:
652 */
653 .p2align CONFIG_X86_L1_CACHE_SHIFT
654common_interrupt:
655 ASM_CLAC
656 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
657 SAVE_ALL
658 TRACE_IRQS_OFF
659 movl %esp, %eax
660 call do_IRQ
661 jmp ret_from_intr
662ENDPROC(common_interrupt)
663
664#define BUILD_INTERRUPT3(name, nr, fn) \
665ENTRY(name) \
666 ASM_CLAC; \
667 pushl $~(nr); \
668 SAVE_ALL; \
669 TRACE_IRQS_OFF \
670 movl %esp, %eax; \
671 call fn; \
672 jmp ret_from_intr; \
673ENDPROC(name)
674
675
676#ifdef CONFIG_TRACING
677# define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
678#else
679# define TRACE_BUILD_INTERRUPT(name, nr)
680#endif
681
682#define BUILD_INTERRUPT(name, nr) \
683 BUILD_INTERRUPT3(name, nr, smp_##name); \
684 TRACE_BUILD_INTERRUPT(name, nr)
685
686/* The include is where all of the SMP etc. interrupts come from */
687#include <asm/entry_arch.h>
688
689ENTRY(coprocessor_error)
690 ASM_CLAC
691 pushl $0
692 pushl $do_coprocessor_error
693 jmp error_code
694END(coprocessor_error)
695
696ENTRY(simd_coprocessor_error)
697 ASM_CLAC
698 pushl $0
699#ifdef CONFIG_X86_INVD_BUG
700 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
701 ALTERNATIVE "pushl $do_general_protection", \
702 "pushl $do_simd_coprocessor_error", \
703 X86_FEATURE_XMM
704#else
705 pushl $do_simd_coprocessor_error
706#endif
707 jmp error_code
708END(simd_coprocessor_error)
709
710ENTRY(device_not_available)
711 ASM_CLAC
712 pushl $-1 # mark this as an int
713 pushl $do_device_not_available
714 jmp error_code
715END(device_not_available)
716
717#ifdef CONFIG_PARAVIRT
718ENTRY(native_iret)
719 iret
720 _ASM_EXTABLE(native_iret, iret_exc)
721END(native_iret)
722
723ENTRY(native_irq_enable_sysexit)
724 sti
725 sysexit
726END(native_irq_enable_sysexit)
727#endif
728
729ENTRY(overflow)
730 ASM_CLAC
731 pushl $0
732 pushl $do_overflow
733 jmp error_code
734END(overflow)
735
736ENTRY(bounds)
737 ASM_CLAC
738 pushl $0
739 pushl $do_bounds
740 jmp error_code
741END(bounds)
742
743ENTRY(invalid_op)
744 ASM_CLAC
745 pushl $0
746 pushl $do_invalid_op
747 jmp error_code
748END(invalid_op)
749
750ENTRY(coprocessor_segment_overrun)
751 ASM_CLAC
752 pushl $0
753 pushl $do_coprocessor_segment_overrun
754 jmp error_code
755END(coprocessor_segment_overrun)
756
757ENTRY(invalid_TSS)
758 ASM_CLAC
759 pushl $do_invalid_TSS
760 jmp error_code
761END(invalid_TSS)
762
763ENTRY(segment_not_present)
764 ASM_CLAC
765 pushl $do_segment_not_present
766 jmp error_code
767END(segment_not_present)
768
769ENTRY(stack_segment)
770 ASM_CLAC
771 pushl $do_stack_segment
772 jmp error_code
773END(stack_segment)
774
775ENTRY(alignment_check)
776 ASM_CLAC
777 pushl $do_alignment_check
778 jmp error_code
779END(alignment_check)
780
781ENTRY(divide_error)
782 ASM_CLAC
783 pushl $0 # no error code
784 pushl $do_divide_error
785 jmp error_code
786END(divide_error)
787
788#ifdef CONFIG_X86_MCE
789ENTRY(machine_check)
790 ASM_CLAC
791 pushl $0
792 pushl machine_check_vector
793 jmp error_code
794END(machine_check)
795#endif
796
797ENTRY(spurious_interrupt_bug)
798 ASM_CLAC
799 pushl $0
800 pushl $do_spurious_interrupt_bug
801 jmp error_code
802END(spurious_interrupt_bug)
803
804#ifdef CONFIG_XEN
805/*
806 * Xen doesn't set %esp to be precisely what the normal SYSENTER
807 * entry point expects, so fix it up before using the normal path.
808 */
809ENTRY(xen_sysenter_target)
810 addl $5*4, %esp /* remove xen-provided frame */
811 jmp sysenter_past_esp
812
813ENTRY(xen_hypervisor_callback)
814 pushl $-1 /* orig_ax = -1 => not a system call */
815 SAVE_ALL
816 TRACE_IRQS_OFF
817
818 /*
819 * Check to see if we got the event in the critical
820 * region in xen_iret_direct, after we've reenabled
821 * events and checked for pending events. This simulates
822 * iret instruction's behaviour where it delivers a
823 * pending interrupt when enabling interrupts:
824 */
825 movl PT_EIP(%esp), %eax
826 cmpl $xen_iret_start_crit, %eax
827 jb 1f
828 cmpl $xen_iret_end_crit, %eax
829 jae 1f
830
831 jmp xen_iret_crit_fixup
832
833ENTRY(xen_do_upcall)
8341: mov %esp, %eax
835 call xen_evtchn_do_upcall
836#ifndef CONFIG_PREEMPT
837 call xen_maybe_preempt_hcall
838#endif
839 jmp ret_from_intr
840ENDPROC(xen_hypervisor_callback)
841
842/*
843 * Hypervisor uses this for application faults while it executes.
844 * We get here for two reasons:
845 * 1. Fault while reloading DS, ES, FS or GS
846 * 2. Fault while executing IRET
847 * Category 1 we fix up by reattempting the load, and zeroing the segment
848 * register if the load fails.
849 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
850 * normal Linux return path in this case because if we use the IRET hypercall
851 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
852 * We distinguish between categories by maintaining a status value in EAX.
853 */
854ENTRY(xen_failsafe_callback)
855 pushl %eax
856 movl $1, %eax
8571: mov 4(%esp), %ds
8582: mov 8(%esp), %es
8593: mov 12(%esp), %fs
8604: mov 16(%esp), %gs
861 /* EAX == 0 => Category 1 (Bad segment)
862 EAX != 0 => Category 2 (Bad IRET) */
863 testl %eax, %eax
864 popl %eax
865 lea 16(%esp), %esp
866 jz 5f
867 jmp iret_exc
8685: pushl $-1 /* orig_ax = -1 => not a system call */
869 SAVE_ALL
870 jmp ret_from_exception
871
872.section .fixup, "ax"
8736: xorl %eax, %eax
874 movl %eax, 4(%esp)
875 jmp 1b
8767: xorl %eax, %eax
877 movl %eax, 8(%esp)
878 jmp 2b
8798: xorl %eax, %eax
880 movl %eax, 12(%esp)
881 jmp 3b
8829: xorl %eax, %eax
883 movl %eax, 16(%esp)
884 jmp 4b
885.previous
886 _ASM_EXTABLE(1b, 6b)
887 _ASM_EXTABLE(2b, 7b)
888 _ASM_EXTABLE(3b, 8b)
889 _ASM_EXTABLE(4b, 9b)
890ENDPROC(xen_failsafe_callback)
891
892BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
893 xen_evtchn_do_upcall)
894
895#endif /* CONFIG_XEN */
896
897#if IS_ENABLED(CONFIG_HYPERV)
898
899BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
900 hyperv_vector_handler)
901
902#endif /* CONFIG_HYPERV */
903
904#ifdef CONFIG_FUNCTION_TRACER
905#ifdef CONFIG_DYNAMIC_FTRACE
906
907ENTRY(mcount)
908 ret
909END(mcount)
910
911ENTRY(ftrace_caller)
912 pushl %eax
913 pushl %ecx
914 pushl %edx
915 pushl $0 /* Pass NULL as regs pointer */
916 movl 4*4(%esp), %eax
917 movl 0x4(%ebp), %edx
918 movl function_trace_op, %ecx
919 subl $MCOUNT_INSN_SIZE, %eax
920
921.globl ftrace_call
922ftrace_call:
923 call ftrace_stub
924
925 addl $4, %esp /* skip NULL pointer */
926 popl %edx
927 popl %ecx
928 popl %eax
929ftrace_ret:
930#ifdef CONFIG_FUNCTION_GRAPH_TRACER
931.globl ftrace_graph_call
932ftrace_graph_call:
933 jmp ftrace_stub
934#endif
935
936.globl ftrace_stub
937ftrace_stub:
938 ret
939END(ftrace_caller)
940
941ENTRY(ftrace_regs_caller)
942 pushf /* push flags before compare (in cs location) */
943
944 /*
945 * i386 does not save SS and ESP when coming from kernel.
946 * Instead, to get sp, &regs->sp is used (see ptrace.h).
947 * Unfortunately, that means eflags must be at the same location
948 * as the current return ip is. We move the return ip into the
949 * ip location, and move flags into the return ip location.
950 */
951 pushl 4(%esp) /* save return ip into ip slot */
952
953 pushl $0 /* Load 0 into orig_ax */
954 pushl %gs
955 pushl %fs
956 pushl %es
957 pushl %ds
958 pushl %eax
959 pushl %ebp
960 pushl %edi
961 pushl %esi
962 pushl %edx
963 pushl %ecx
964 pushl %ebx
965
966 movl 13*4(%esp), %eax /* Get the saved flags */
967 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
968 /* clobbering return ip */
969 movl $__KERNEL_CS, 13*4(%esp)
970
971 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
972 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
973 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
974 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
975 pushl %esp /* Save pt_regs as 4th parameter */
976
977GLOBAL(ftrace_regs_call)
978 call ftrace_stub
979
980 addl $4, %esp /* Skip pt_regs */
981 movl 14*4(%esp), %eax /* Move flags back into cs */
982 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
983 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
984 movl %eax, 14*4(%esp) /* Put return ip back for ret */
985
986 popl %ebx
987 popl %ecx
988 popl %edx
989 popl %esi
990 popl %edi
991 popl %ebp
992 popl %eax
993 popl %ds
994 popl %es
995 popl %fs
996 popl %gs
997 addl $8, %esp /* Skip orig_ax and ip */
998 popf /* Pop flags at end (no addl to corrupt flags) */
999 jmp ftrace_ret
1000
1001 popf
1002 jmp ftrace_stub
1003#else /* ! CONFIG_DYNAMIC_FTRACE */
1004
1005ENTRY(mcount)
1006 cmpl $__PAGE_OFFSET, %esp
1007 jb ftrace_stub /* Paging not enabled yet? */
1008
1009 cmpl $ftrace_stub, ftrace_trace_function
1010 jnz trace
1011#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1012 cmpl $ftrace_stub, ftrace_graph_return
1013 jnz ftrace_graph_caller
1014
1015 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1016 jnz ftrace_graph_caller
1017#endif
1018.globl ftrace_stub
1019ftrace_stub:
1020 ret
1021
1022 /* taken from glibc */
1023trace:
1024 pushl %eax
1025 pushl %ecx
1026 pushl %edx
1027 movl 0xc(%esp), %eax
1028 movl 0x4(%ebp), %edx
1029 subl $MCOUNT_INSN_SIZE, %eax
1030
1031 call *ftrace_trace_function
1032
1033 popl %edx
1034 popl %ecx
1035 popl %eax
1036 jmp ftrace_stub
1037END(mcount)
1038#endif /* CONFIG_DYNAMIC_FTRACE */
1039#endif /* CONFIG_FUNCTION_TRACER */
1040
1041#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1042ENTRY(ftrace_graph_caller)
1043 pushl %eax
1044 pushl %ecx
1045 pushl %edx
1046 movl 0xc(%esp), %eax
1047 lea 0x4(%ebp), %edx
1048 movl (%ebp), %ecx
1049 subl $MCOUNT_INSN_SIZE, %eax
1050 call prepare_ftrace_return
1051 popl %edx
1052 popl %ecx
1053 popl %eax
1054 ret
1055END(ftrace_graph_caller)
1056
1057.globl return_to_handler
1058return_to_handler:
1059 pushl %eax
1060 pushl %edx
1061 movl %ebp, %eax
1062 call ftrace_return_to_handler
1063 movl %eax, %ecx
1064 popl %edx
1065 popl %eax
1066 jmp *%ecx
1067#endif
1068
1069#ifdef CONFIG_TRACING
1070ENTRY(trace_page_fault)
1071 ASM_CLAC
1072 pushl $trace_do_page_fault
1073 jmp error_code
1074END(trace_page_fault)
1075#endif
1076
1077ENTRY(page_fault)
1078 ASM_CLAC
1079 pushl $do_page_fault
1080 ALIGN
1081error_code:
1082 /* the function address is in %gs's slot on the stack */
1083 pushl %fs
1084 pushl %es
1085 pushl %ds
1086 pushl %eax
1087 pushl %ebp
1088 pushl %edi
1089 pushl %esi
1090 pushl %edx
1091 pushl %ecx
1092 pushl %ebx
1093 cld
1094 movl $(__KERNEL_PERCPU), %ecx
1095 movl %ecx, %fs
1096 UNWIND_ESPFIX_STACK
1097 GS_TO_REG %ecx
1098 movl PT_GS(%esp), %edi # get the function address
1099 movl PT_ORIG_EAX(%esp), %edx # get the error code
1100 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1101 REG_TO_PTGS %ecx
1102 SET_KERNEL_GS %ecx
1103 movl $(__USER_DS), %ecx
1104 movl %ecx, %ds
1105 movl %ecx, %es
1106 TRACE_IRQS_OFF
1107 movl %esp, %eax # pt_regs pointer
1108 call *%edi
1109 jmp ret_from_exception
1110END(page_fault)
1111
1112/*
1113 * Debug traps and NMI can happen at the one SYSENTER instruction
1114 * that sets up the real kernel stack. Check here, since we can't
1115 * allow the wrong stack to be used.
1116 *
1117 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1118 * already pushed 3 words if it hits on the sysenter instruction:
1119 * eflags, cs and eip.
1120 *
1121 * We just load the right stack, and push the three (known) values
1122 * by hand onto the new stack - while updating the return eip past
1123 * the instruction that would have done it for sysenter.
1124 */
1125.macro FIX_STACK offset ok label
1126 cmpw $__KERNEL_CS, 4(%esp)
1127 jne \ok
1128\label:
1129 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1130 pushfl
1131 pushl $__KERNEL_CS
1132 pushl $sysenter_past_esp
1133.endm
1134
1135ENTRY(debug)
1136 ASM_CLAC
1137 cmpl $entry_SYSENTER_32, (%esp)
1138 jne debug_stack_correct
1139 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1140debug_stack_correct:
1141 pushl $-1 # mark this as an int
1142 SAVE_ALL
1143 TRACE_IRQS_OFF
1144 xorl %edx, %edx # error code 0
1145 movl %esp, %eax # pt_regs pointer
1146 call do_debug
1147 jmp ret_from_exception
1148END(debug)
1149
1150/*
1151 * NMI is doubly nasty. It can happen _while_ we're handling
1152 * a debug fault, and the debug fault hasn't yet been able to
1153 * clear up the stack. So we first check whether we got an
1154 * NMI on the sysenter entry path, but after that we need to
1155 * check whether we got an NMI on the debug path where the debug
1156 * fault happened on the sysenter path.
1157 */
1158ENTRY(nmi)
1159 ASM_CLAC
1160#ifdef CONFIG_X86_ESPFIX32
1161 pushl %eax
1162 movl %ss, %eax
1163 cmpw $__ESPFIX_SS, %ax
1164 popl %eax
1165 je nmi_espfix_stack
1166#endif
1167 cmpl $entry_SYSENTER_32, (%esp)
1168 je nmi_stack_fixup
1169 pushl %eax
1170 movl %esp, %eax
1171 /*
1172 * Do not access memory above the end of our stack page,
1173 * it might not exist.
1174 */
1175 andl $(THREAD_SIZE-1), %eax
1176 cmpl $(THREAD_SIZE-20), %eax
1177 popl %eax
1178 jae nmi_stack_correct
1179 cmpl $entry_SYSENTER_32, 12(%esp)
1180 je nmi_debug_stack_check
1181nmi_stack_correct:
1182 pushl %eax
1183 SAVE_ALL
1184 xorl %edx, %edx # zero error code
1185 movl %esp, %eax # pt_regs pointer
1186 call do_nmi
1187 jmp restore_all_notrace
1188
1189nmi_stack_fixup:
1190 FIX_STACK 12, nmi_stack_correct, 1
1191 jmp nmi_stack_correct
1192
1193nmi_debug_stack_check:
1194 cmpw $__KERNEL_CS, 16(%esp)
1195 jne nmi_stack_correct
1196 cmpl $debug, (%esp)
1197 jb nmi_stack_correct
1198 cmpl $debug_esp_fix_insn, (%esp)
1199 ja nmi_stack_correct
1200 FIX_STACK 24, nmi_stack_correct, 1
1201 jmp nmi_stack_correct
1202
1203#ifdef CONFIG_X86_ESPFIX32
1204nmi_espfix_stack:
1205 /*
1206 * create the pointer to lss back
1207 */
1208 pushl %ss
1209 pushl %esp
1210 addl $4, (%esp)
1211 /* copy the iret frame of 12 bytes */
1212 .rept 3
1213 pushl 16(%esp)
1214 .endr
1215 pushl %eax
1216 SAVE_ALL
1217 FIXUP_ESPFIX_STACK # %eax == %esp
1218 xorl %edx, %edx # zero error code
1219 call do_nmi
1220 RESTORE_REGS
1221 lss 12+4(%esp), %esp # back to espfix stack
1222 jmp irq_return
1223#endif
1224END(nmi)
1225
1226ENTRY(int3)
1227 ASM_CLAC
1228 pushl $-1 # mark this as an int
1229 SAVE_ALL
1230 TRACE_IRQS_OFF
1231 xorl %edx, %edx # zero error code
1232 movl %esp, %eax # pt_regs pointer
1233 call do_int3
1234 jmp ret_from_exception
1235END(int3)
1236
1237ENTRY(general_protection)
1238 pushl $do_general_protection
1239 jmp error_code
1240END(general_protection)
1241
1242#ifdef CONFIG_KVM_GUEST
1243ENTRY(async_page_fault)
1244 ASM_CLAC
1245 pushl $do_async_page_fault
1246 jmp error_code
1247END(async_page_fault)
1248#endif
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/entry/entry_64.S
index 0395a59f67c4..d2a0ed211bed 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -19,8 +19,6 @@
19 * at the top of the kernel process stack. 19 * at the top of the kernel process stack.
20 * 20 *
21 * Some macro usage: 21 * Some macro usage:
22 * - CFI macros are used to generate dwarf2 unwind information for better
23 * backtraces. They don't change any code.
24 * - ENTRY/END Define functions in the symbol table. 22 * - ENTRY/END Define functions in the symbol table.
25 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. 23 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
26 * - idtentry - Define exception entry points. 24 * - idtentry - Define exception entry points.
@@ -30,8 +28,7 @@
30#include <asm/segment.h> 28#include <asm/segment.h>
31#include <asm/cache.h> 29#include <asm/cache.h>
32#include <asm/errno.h> 30#include <asm/errno.h>
33#include <asm/dwarf2.h> 31#include "calling.h"
34#include <asm/calling.h>
35#include <asm/asm-offsets.h> 32#include <asm/asm-offsets.h>
36#include <asm/msr.h> 33#include <asm/msr.h>
37#include <asm/unistd.h> 34#include <asm/unistd.h>
@@ -113,61 +110,6 @@ ENDPROC(native_usergs_sysret64)
113#endif 110#endif
114 111
115/* 112/*
116 * empty frame
117 */
118 .macro EMPTY_FRAME start=1 offset=0
119 .if \start
120 CFI_STARTPROC simple
121 CFI_SIGNAL_FRAME
122 CFI_DEF_CFA rsp,8+\offset
123 .else
124 CFI_DEF_CFA_OFFSET 8+\offset
125 .endif
126 .endm
127
128/*
129 * initial frame state for interrupts (and exceptions without error code)
130 */
131 .macro INTR_FRAME start=1 offset=0
132 EMPTY_FRAME \start, 5*8+\offset
133 /*CFI_REL_OFFSET ss, 4*8+\offset*/
134 CFI_REL_OFFSET rsp, 3*8+\offset
135 /*CFI_REL_OFFSET rflags, 2*8+\offset*/
136 /*CFI_REL_OFFSET cs, 1*8+\offset*/
137 CFI_REL_OFFSET rip, 0*8+\offset
138 .endm
139
140/*
141 * initial frame state for exceptions with error code (and interrupts
142 * with vector already pushed)
143 */
144 .macro XCPT_FRAME start=1 offset=0
145 INTR_FRAME \start, 1*8+\offset
146 .endm
147
148/*
149 * frame that enables passing a complete pt_regs to a C function.
150 */
151 .macro DEFAULT_FRAME start=1 offset=0
152 XCPT_FRAME \start, ORIG_RAX+\offset
153 CFI_REL_OFFSET rdi, RDI+\offset
154 CFI_REL_OFFSET rsi, RSI+\offset
155 CFI_REL_OFFSET rdx, RDX+\offset
156 CFI_REL_OFFSET rcx, RCX+\offset
157 CFI_REL_OFFSET rax, RAX+\offset
158 CFI_REL_OFFSET r8, R8+\offset
159 CFI_REL_OFFSET r9, R9+\offset
160 CFI_REL_OFFSET r10, R10+\offset
161 CFI_REL_OFFSET r11, R11+\offset
162 CFI_REL_OFFSET rbx, RBX+\offset
163 CFI_REL_OFFSET rbp, RBP+\offset
164 CFI_REL_OFFSET r12, R12+\offset
165 CFI_REL_OFFSET r13, R13+\offset
166 CFI_REL_OFFSET r14, R14+\offset
167 CFI_REL_OFFSET r15, R15+\offset
168 .endm
169
170/*
171 * 64bit SYSCALL instruction entry. Up to 6 arguments in registers. 113 * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
172 * 114 *
173 * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 115 * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
@@ -195,13 +137,7 @@ ENDPROC(native_usergs_sysret64)
195 * with them due to bugs in both AMD and Intel CPUs. 137 * with them due to bugs in both AMD and Intel CPUs.
196 */ 138 */
197 139
198ENTRY(system_call) 140ENTRY(entry_SYSCALL_64)
199 CFI_STARTPROC simple
200 CFI_SIGNAL_FRAME
201 CFI_DEF_CFA rsp,0
202 CFI_REGISTER rip,rcx
203 /*CFI_REGISTER rflags,r11*/
204
205 /* 141 /*
206 * Interrupts are off on entry. 142 * Interrupts are off on entry.
207 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 143 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -213,14 +149,14 @@ ENTRY(system_call)
213 * after the swapgs, so that it can do the swapgs 149 * after the swapgs, so that it can do the swapgs
214 * for the guest and jump here on syscall. 150 * for the guest and jump here on syscall.
215 */ 151 */
216GLOBAL(system_call_after_swapgs) 152GLOBAL(entry_SYSCALL_64_after_swapgs)
217 153
218 movq %rsp,PER_CPU_VAR(rsp_scratch) 154 movq %rsp,PER_CPU_VAR(rsp_scratch)
219 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp 155 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
220 156
221 /* Construct struct pt_regs on stack */ 157 /* Construct struct pt_regs on stack */
222 pushq_cfi $__USER_DS /* pt_regs->ss */ 158 pushq $__USER_DS /* pt_regs->ss */
223 pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 159 pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
224 /* 160 /*
225 * Re-enable interrupts. 161 * Re-enable interrupts.
226 * We use 'rsp_scratch' as a scratch space, hence irq-off block above 162 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
@@ -229,26 +165,24 @@ GLOBAL(system_call_after_swapgs)
229 * with using rsp_scratch: 165 * with using rsp_scratch:
230 */ 166 */
231 ENABLE_INTERRUPTS(CLBR_NONE) 167 ENABLE_INTERRUPTS(CLBR_NONE)
232 pushq_cfi %r11 /* pt_regs->flags */ 168 pushq %r11 /* pt_regs->flags */
233 pushq_cfi $__USER_CS /* pt_regs->cs */ 169 pushq $__USER_CS /* pt_regs->cs */
234 pushq_cfi %rcx /* pt_regs->ip */ 170 pushq %rcx /* pt_regs->ip */
235 CFI_REL_OFFSET rip,0 171 pushq %rax /* pt_regs->orig_ax */
236 pushq_cfi_reg rax /* pt_regs->orig_ax */ 172 pushq %rdi /* pt_regs->di */
237 pushq_cfi_reg rdi /* pt_regs->di */ 173 pushq %rsi /* pt_regs->si */
238 pushq_cfi_reg rsi /* pt_regs->si */ 174 pushq %rdx /* pt_regs->dx */
239 pushq_cfi_reg rdx /* pt_regs->dx */ 175 pushq %rcx /* pt_regs->cx */
240 pushq_cfi_reg rcx /* pt_regs->cx */ 176 pushq $-ENOSYS /* pt_regs->ax */
241 pushq_cfi $-ENOSYS /* pt_regs->ax */ 177 pushq %r8 /* pt_regs->r8 */
242 pushq_cfi_reg r8 /* pt_regs->r8 */ 178 pushq %r9 /* pt_regs->r9 */
243 pushq_cfi_reg r9 /* pt_regs->r9 */ 179 pushq %r10 /* pt_regs->r10 */
244 pushq_cfi_reg r10 /* pt_regs->r10 */ 180 pushq %r11 /* pt_regs->r11 */
245 pushq_cfi_reg r11 /* pt_regs->r11 */
246 sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */ 181 sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
247 CFI_ADJUST_CFA_OFFSET 6*8
248 182
249 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 183 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
250 jnz tracesys 184 jnz tracesys
251system_call_fastpath: 185entry_SYSCALL_64_fastpath:
252#if __SYSCALL_MASK == ~0 186#if __SYSCALL_MASK == ~0
253 cmpq $__NR_syscall_max,%rax 187 cmpq $__NR_syscall_max,%rax
254#else 188#else
@@ -282,13 +216,9 @@ system_call_fastpath:
282 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 216 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
283 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ 217 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
284 218
285 CFI_REMEMBER_STATE
286
287 RESTORE_C_REGS_EXCEPT_RCX_R11 219 RESTORE_C_REGS_EXCEPT_RCX_R11
288 movq RIP(%rsp),%rcx 220 movq RIP(%rsp),%rcx
289 CFI_REGISTER rip,rcx
290 movq EFLAGS(%rsp),%r11 221 movq EFLAGS(%rsp),%r11
291 /*CFI_REGISTER rflags,r11*/
292 movq RSP(%rsp),%rsp 222 movq RSP(%rsp),%rsp
293 /* 223 /*
294 * 64bit SYSRET restores rip from rcx, 224 * 64bit SYSRET restores rip from rcx,
@@ -307,8 +237,6 @@ system_call_fastpath:
307 */ 237 */
308 USERGS_SYSRET64 238 USERGS_SYSRET64
309 239
310 CFI_RESTORE_STATE
311
312 /* Do syscall entry tracing */ 240 /* Do syscall entry tracing */
313tracesys: 241tracesys:
314 movq %rsp, %rdi 242 movq %rsp, %rdi
@@ -318,7 +246,7 @@ tracesys:
318 jnz tracesys_phase2 /* if needed, run the slow path */ 246 jnz tracesys_phase2 /* if needed, run the slow path */
319 RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */ 247 RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
320 movq ORIG_RAX(%rsp), %rax 248 movq ORIG_RAX(%rsp), %rax
321 jmp system_call_fastpath /* and return to the fast path */ 249 jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
322 250
323tracesys_phase2: 251tracesys_phase2:
324 SAVE_EXTRA_REGS 252 SAVE_EXTRA_REGS
@@ -374,9 +302,9 @@ int_careful:
374 jnc int_very_careful 302 jnc int_very_careful
375 TRACE_IRQS_ON 303 TRACE_IRQS_ON
376 ENABLE_INTERRUPTS(CLBR_NONE) 304 ENABLE_INTERRUPTS(CLBR_NONE)
377 pushq_cfi %rdi 305 pushq %rdi
378 SCHEDULE_USER 306 SCHEDULE_USER
379 popq_cfi %rdi 307 popq %rdi
380 DISABLE_INTERRUPTS(CLBR_NONE) 308 DISABLE_INTERRUPTS(CLBR_NONE)
381 TRACE_IRQS_OFF 309 TRACE_IRQS_OFF
382 jmp int_with_check 310 jmp int_with_check
@@ -389,10 +317,10 @@ int_very_careful:
389 /* Check for syscall exit trace */ 317 /* Check for syscall exit trace */
390 testl $_TIF_WORK_SYSCALL_EXIT,%edx 318 testl $_TIF_WORK_SYSCALL_EXIT,%edx
391 jz int_signal 319 jz int_signal
392 pushq_cfi %rdi 320 pushq %rdi
393 leaq 8(%rsp),%rdi # &ptregs -> arg1 321 leaq 8(%rsp),%rdi # &ptregs -> arg1
394 call syscall_trace_leave 322 call syscall_trace_leave
395 popq_cfi %rdi 323 popq %rdi
396 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi 324 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
397 jmp int_restore_rest 325 jmp int_restore_rest
398 326
@@ -475,27 +403,21 @@ syscall_return:
475 * perf profiles. Nothing jumps here. 403 * perf profiles. Nothing jumps here.
476 */ 404 */
477syscall_return_via_sysret: 405syscall_return_via_sysret:
478 CFI_REMEMBER_STATE
479 /* rcx and r11 are already restored (see code above) */ 406 /* rcx and r11 are already restored (see code above) */
480 RESTORE_C_REGS_EXCEPT_RCX_R11 407 RESTORE_C_REGS_EXCEPT_RCX_R11
481 movq RSP(%rsp),%rsp 408 movq RSP(%rsp),%rsp
482 USERGS_SYSRET64 409 USERGS_SYSRET64
483 CFI_RESTORE_STATE
484 410
485opportunistic_sysret_failed: 411opportunistic_sysret_failed:
486 SWAPGS 412 SWAPGS
487 jmp restore_c_regs_and_iret 413 jmp restore_c_regs_and_iret
488 CFI_ENDPROC 414END(entry_SYSCALL_64)
489END(system_call)
490 415
491 416
492 .macro FORK_LIKE func 417 .macro FORK_LIKE func
493ENTRY(stub_\func) 418ENTRY(stub_\func)
494 CFI_STARTPROC
495 DEFAULT_FRAME 0, 8 /* offset 8: return address */
496 SAVE_EXTRA_REGS 8 419 SAVE_EXTRA_REGS 8
497 jmp sys_\func 420 jmp sys_\func
498 CFI_ENDPROC
499END(stub_\func) 421END(stub_\func)
500 .endm 422 .endm
501 423
@@ -504,8 +426,6 @@ END(stub_\func)
504 FORK_LIKE vfork 426 FORK_LIKE vfork
505 427
506ENTRY(stub_execve) 428ENTRY(stub_execve)
507 CFI_STARTPROC
508 DEFAULT_FRAME 0, 8
509 call sys_execve 429 call sys_execve
510return_from_execve: 430return_from_execve:
511 testl %eax, %eax 431 testl %eax, %eax
@@ -515,11 +435,9 @@ return_from_execve:
5151: 4351:
516 /* must use IRET code path (pt_regs->cs may have changed) */ 436 /* must use IRET code path (pt_regs->cs may have changed) */
517 addq $8, %rsp 437 addq $8, %rsp
518 CFI_ADJUST_CFA_OFFSET -8
519 ZERO_EXTRA_REGS 438 ZERO_EXTRA_REGS
520 movq %rax,RAX(%rsp) 439 movq %rax,RAX(%rsp)
521 jmp int_ret_from_sys_call 440 jmp int_ret_from_sys_call
522 CFI_ENDPROC
523END(stub_execve) 441END(stub_execve)
524/* 442/*
525 * Remaining execve stubs are only 7 bytes long. 443 * Remaining execve stubs are only 7 bytes long.
@@ -527,32 +445,23 @@ END(stub_execve)
527 */ 445 */
528 .align 8 446 .align 8
529GLOBAL(stub_execveat) 447GLOBAL(stub_execveat)
530 CFI_STARTPROC
531 DEFAULT_FRAME 0, 8
532 call sys_execveat 448 call sys_execveat
533 jmp return_from_execve 449 jmp return_from_execve
534 CFI_ENDPROC
535END(stub_execveat) 450END(stub_execveat)
536 451
537#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) 452#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
538 .align 8 453 .align 8
539GLOBAL(stub_x32_execve) 454GLOBAL(stub_x32_execve)
540GLOBAL(stub32_execve) 455GLOBAL(stub32_execve)
541 CFI_STARTPROC
542 DEFAULT_FRAME 0, 8
543 call compat_sys_execve 456 call compat_sys_execve
544 jmp return_from_execve 457 jmp return_from_execve
545 CFI_ENDPROC
546END(stub32_execve) 458END(stub32_execve)
547END(stub_x32_execve) 459END(stub_x32_execve)
548 .align 8 460 .align 8
549GLOBAL(stub_x32_execveat) 461GLOBAL(stub_x32_execveat)
550GLOBAL(stub32_execveat) 462GLOBAL(stub32_execveat)
551 CFI_STARTPROC
552 DEFAULT_FRAME 0, 8
553 call compat_sys_execveat 463 call compat_sys_execveat
554 jmp return_from_execve 464 jmp return_from_execve
555 CFI_ENDPROC
556END(stub32_execveat) 465END(stub32_execveat)
557END(stub_x32_execveat) 466END(stub_x32_execveat)
558#endif 467#endif
@@ -562,8 +471,6 @@ END(stub_x32_execveat)
562 * This cannot be done with SYSRET, so use the IRET return path instead. 471 * This cannot be done with SYSRET, so use the IRET return path instead.
563 */ 472 */
564ENTRY(stub_rt_sigreturn) 473ENTRY(stub_rt_sigreturn)
565 CFI_STARTPROC
566 DEFAULT_FRAME 0, 8
567 /* 474 /*
568 * SAVE_EXTRA_REGS result is not normally needed: 475 * SAVE_EXTRA_REGS result is not normally needed:
569 * sigreturn overwrites all pt_regs->GPREGS. 476 * sigreturn overwrites all pt_regs->GPREGS.
@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn)
575 call sys_rt_sigreturn 482 call sys_rt_sigreturn
576return_from_stub: 483return_from_stub:
577 addq $8, %rsp 484 addq $8, %rsp
578 CFI_ADJUST_CFA_OFFSET -8
579 RESTORE_EXTRA_REGS 485 RESTORE_EXTRA_REGS
580 movq %rax,RAX(%rsp) 486 movq %rax,RAX(%rsp)
581 jmp int_ret_from_sys_call 487 jmp int_ret_from_sys_call
582 CFI_ENDPROC
583END(stub_rt_sigreturn) 488END(stub_rt_sigreturn)
584 489
585#ifdef CONFIG_X86_X32_ABI 490#ifdef CONFIG_X86_X32_ABI
586ENTRY(stub_x32_rt_sigreturn) 491ENTRY(stub_x32_rt_sigreturn)
587 CFI_STARTPROC
588 DEFAULT_FRAME 0, 8
589 SAVE_EXTRA_REGS 8 492 SAVE_EXTRA_REGS 8
590 call sys32_x32_rt_sigreturn 493 call sys32_x32_rt_sigreturn
591 jmp return_from_stub 494 jmp return_from_stub
592 CFI_ENDPROC
593END(stub_x32_rt_sigreturn) 495END(stub_x32_rt_sigreturn)
594#endif 496#endif
595 497
@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn)
599 * rdi: prev task we switched from 501 * rdi: prev task we switched from
600 */ 502 */
601ENTRY(ret_from_fork) 503ENTRY(ret_from_fork)
602 DEFAULT_FRAME
603 504
604 LOCK ; btr $TIF_FORK,TI_flags(%r8) 505 LOCK ; btr $TIF_FORK,TI_flags(%r8)
605 506
606 pushq_cfi $0x0002 507 pushq $0x0002
607 popfq_cfi # reset kernel eflags 508 popfq # reset kernel eflags
608 509
609 call schedule_tail # rdi: 'prev' task parameter 510 call schedule_tail # rdi: 'prev' task parameter
610 511
@@ -615,7 +516,7 @@ ENTRY(ret_from_fork)
615 /* 516 /*
616 * By the time we get here, we have no idea whether our pt_regs, 517 * By the time we get here, we have no idea whether our pt_regs,
617 * ti flags, and ti status came from the 64-bit SYSCALL fast path, 518 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
618 * the slow path, or one of the ia32entry paths. 519 * the slow path, or one of the 32-bit compat paths.
619 * Use IRET code path to return, since it can safely handle 520 * Use IRET code path to return, since it can safely handle
620 * all of the above. 521 * all of the above.
621 */ 522 */
@@ -628,7 +529,6 @@ ENTRY(ret_from_fork)
628 movl $0, RAX(%rsp) 529 movl $0, RAX(%rsp)
629 RESTORE_EXTRA_REGS 530 RESTORE_EXTRA_REGS
630 jmp int_ret_from_sys_call 531 jmp int_ret_from_sys_call
631 CFI_ENDPROC
632END(ret_from_fork) 532END(ret_from_fork)
633 533
634/* 534/*
@@ -637,16 +537,13 @@ END(ret_from_fork)
637 */ 537 */
638 .align 8 538 .align 8
639ENTRY(irq_entries_start) 539ENTRY(irq_entries_start)
640 INTR_FRAME
641 vector=FIRST_EXTERNAL_VECTOR 540 vector=FIRST_EXTERNAL_VECTOR
642 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 541 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
643 pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ 542 pushq $(~vector+0x80) /* Note: always in signed byte range */
644 vector=vector+1 543 vector=vector+1
645 jmp common_interrupt 544 jmp common_interrupt
646 CFI_ADJUST_CFA_OFFSET -8
647 .align 8 545 .align 8
648 .endr 546 .endr
649 CFI_ENDPROC
650END(irq_entries_start) 547END(irq_entries_start)
651 548
652/* 549/*
@@ -688,17 +585,7 @@ END(irq_entries_start)
688 movq %rsp, %rsi 585 movq %rsp, %rsi
689 incl PER_CPU_VAR(irq_count) 586 incl PER_CPU_VAR(irq_count)
690 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp 587 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
691 CFI_DEF_CFA_REGISTER rsi
692 pushq %rsi 588 pushq %rsi
693 /*
694 * For debugger:
695 * "CFA (Current Frame Address) is the value on stack + offset"
696 */
697 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
698 0x77 /* DW_OP_breg7 (rsp) */, 0, \
699 0x06 /* DW_OP_deref */, \
700 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
701 0x22 /* DW_OP_plus */
702 /* We entered an interrupt context - irqs are off: */ 589 /* We entered an interrupt context - irqs are off: */
703 TRACE_IRQS_OFF 590 TRACE_IRQS_OFF
704 591
@@ -711,7 +598,6 @@ END(irq_entries_start)
711 */ 598 */
712 .p2align CONFIG_X86_L1_CACHE_SHIFT 599 .p2align CONFIG_X86_L1_CACHE_SHIFT
713common_interrupt: 600common_interrupt:
714 XCPT_FRAME
715 ASM_CLAC 601 ASM_CLAC
716 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 602 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
717 interrupt do_IRQ 603 interrupt do_IRQ
@@ -723,16 +609,13 @@ ret_from_intr:
723 609
724 /* Restore saved previous stack */ 610 /* Restore saved previous stack */
725 popq %rsi 611 popq %rsi
726 CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
727 /* return code expects complete pt_regs - adjust rsp accordingly: */ 612 /* return code expects complete pt_regs - adjust rsp accordingly: */
728 leaq -RBP(%rsi),%rsp 613 leaq -RBP(%rsi),%rsp
729 CFI_DEF_CFA_REGISTER rsp
730 CFI_ADJUST_CFA_OFFSET RBP
731 614
732 testb $3, CS(%rsp) 615 testb $3, CS(%rsp)
733 jz retint_kernel 616 jz retint_kernel
734 /* Interrupt came from user space */ 617 /* Interrupt came from user space */
735 618retint_user:
736 GET_THREAD_INFO(%rcx) 619 GET_THREAD_INFO(%rcx)
737 /* 620 /*
738 * %rcx: thread info. Interrupts off. 621 * %rcx: thread info. Interrupts off.
@@ -743,7 +626,6 @@ retint_check:
743 LOCKDEP_SYS_EXIT_IRQ 626 LOCKDEP_SYS_EXIT_IRQ
744 movl TI_flags(%rcx),%edx 627 movl TI_flags(%rcx),%edx
745 andl %edi,%edx 628 andl %edi,%edx
746 CFI_REMEMBER_STATE
747 jnz retint_careful 629 jnz retint_careful
748 630
749retint_swapgs: /* return to user-space */ 631retint_swapgs: /* return to user-space */
@@ -781,8 +663,6 @@ retint_kernel:
781restore_c_regs_and_iret: 663restore_c_regs_and_iret:
782 RESTORE_C_REGS 664 RESTORE_C_REGS
783 REMOVE_PT_GPREGS_FROM_STACK 8 665 REMOVE_PT_GPREGS_FROM_STACK 8
784
785irq_return:
786 INTERRUPT_RETURN 666 INTERRUPT_RETURN
787 667
788ENTRY(native_iret) 668ENTRY(native_iret)
@@ -807,8 +687,8 @@ native_irq_return_iret:
807 687
808#ifdef CONFIG_X86_ESPFIX64 688#ifdef CONFIG_X86_ESPFIX64
809native_irq_return_ldt: 689native_irq_return_ldt:
810 pushq_cfi %rax 690 pushq %rax
811 pushq_cfi %rdi 691 pushq %rdi
812 SWAPGS 692 SWAPGS
813 movq PER_CPU_VAR(espfix_waddr),%rdi 693 movq PER_CPU_VAR(espfix_waddr),%rdi
814 movq %rax,(0*8)(%rdi) /* RAX */ 694 movq %rax,(0*8)(%rdi) /* RAX */
@@ -823,24 +703,23 @@ native_irq_return_ldt:
823 movq (5*8)(%rsp),%rax /* RSP */ 703 movq (5*8)(%rsp),%rax /* RSP */
824 movq %rax,(4*8)(%rdi) 704 movq %rax,(4*8)(%rdi)
825 andl $0xffff0000,%eax 705 andl $0xffff0000,%eax
826 popq_cfi %rdi 706 popq %rdi
827 orq PER_CPU_VAR(espfix_stack),%rax 707 orq PER_CPU_VAR(espfix_stack),%rax
828 SWAPGS 708 SWAPGS
829 movq %rax,%rsp 709 movq %rax,%rsp
830 popq_cfi %rax 710 popq %rax
831 jmp native_irq_return_iret 711 jmp native_irq_return_iret
832#endif 712#endif
833 713
834 /* edi: workmask, edx: work */ 714 /* edi: workmask, edx: work */
835retint_careful: 715retint_careful:
836 CFI_RESTORE_STATE
837 bt $TIF_NEED_RESCHED,%edx 716 bt $TIF_NEED_RESCHED,%edx
838 jnc retint_signal 717 jnc retint_signal
839 TRACE_IRQS_ON 718 TRACE_IRQS_ON
840 ENABLE_INTERRUPTS(CLBR_NONE) 719 ENABLE_INTERRUPTS(CLBR_NONE)
841 pushq_cfi %rdi 720 pushq %rdi
842 SCHEDULE_USER 721 SCHEDULE_USER
843 popq_cfi %rdi 722 popq %rdi
844 GET_THREAD_INFO(%rcx) 723 GET_THREAD_INFO(%rcx)
845 DISABLE_INTERRUPTS(CLBR_NONE) 724 DISABLE_INTERRUPTS(CLBR_NONE)
846 TRACE_IRQS_OFF 725 TRACE_IRQS_OFF
@@ -862,7 +741,6 @@ retint_signal:
862 GET_THREAD_INFO(%rcx) 741 GET_THREAD_INFO(%rcx)
863 jmp retint_with_reschedule 742 jmp retint_with_reschedule
864 743
865 CFI_ENDPROC
866END(common_interrupt) 744END(common_interrupt)
867 745
868/* 746/*
@@ -870,13 +748,11 @@ END(common_interrupt)
870 */ 748 */
871.macro apicinterrupt3 num sym do_sym 749.macro apicinterrupt3 num sym do_sym
872ENTRY(\sym) 750ENTRY(\sym)
873 INTR_FRAME
874 ASM_CLAC 751 ASM_CLAC
875 pushq_cfi $~(\num) 752 pushq $~(\num)
876.Lcommon_\sym: 753.Lcommon_\sym:
877 interrupt \do_sym 754 interrupt \do_sym
878 jmp ret_from_intr 755 jmp ret_from_intr
879 CFI_ENDPROC
880END(\sym) 756END(\sym)
881.endm 757.endm
882 758
@@ -966,24 +842,17 @@ ENTRY(\sym)
966 .error "using shift_ist requires paranoid=1" 842 .error "using shift_ist requires paranoid=1"
967 .endif 843 .endif
968 844
969 .if \has_error_code
970 XCPT_FRAME
971 .else
972 INTR_FRAME
973 .endif
974
975 ASM_CLAC 845 ASM_CLAC
976 PARAVIRT_ADJUST_EXCEPTION_FRAME 846 PARAVIRT_ADJUST_EXCEPTION_FRAME
977 847
978 .ifeq \has_error_code 848 .ifeq \has_error_code
979 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 849 pushq $-1 /* ORIG_RAX: no syscall to restart */
980 .endif 850 .endif
981 851
982 ALLOC_PT_GPREGS_ON_STACK 852 ALLOC_PT_GPREGS_ON_STACK
983 853
984 .if \paranoid 854 .if \paranoid
985 .if \paranoid == 1 855 .if \paranoid == 1
986 CFI_REMEMBER_STATE
987 testb $3, CS(%rsp) /* If coming from userspace, switch */ 856 testb $3, CS(%rsp) /* If coming from userspace, switch */
988 jnz 1f /* stacks. */ 857 jnz 1f /* stacks. */
989 .endif 858 .endif
@@ -993,8 +862,6 @@ ENTRY(\sym)
993 .endif 862 .endif
994 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 863 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
995 864
996 DEFAULT_FRAME 0
997
998 .if \paranoid 865 .if \paranoid
999 .if \shift_ist != -1 866 .if \shift_ist != -1
1000 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 867 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
@@ -1030,7 +897,6 @@ ENTRY(\sym)
1030 .endif 897 .endif
1031 898
1032 .if \paranoid == 1 899 .if \paranoid == 1
1033 CFI_RESTORE_STATE
1034 /* 900 /*
1035 * Paranoid entry from userspace. Switch stacks and treat it 901 * Paranoid entry from userspace. Switch stacks and treat it
1036 * as a normal entry. This means that paranoid handlers 902 * as a normal entry. This means that paranoid handlers
@@ -1039,7 +905,6 @@ ENTRY(\sym)
10391: 9051:
1040 call error_entry 906 call error_entry
1041 907
1042 DEFAULT_FRAME 0
1043 908
1044 movq %rsp,%rdi /* pt_regs pointer */ 909 movq %rsp,%rdi /* pt_regs pointer */
1045 call sync_regs 910 call sync_regs
@@ -1058,8 +923,6 @@ ENTRY(\sym)
1058 923
1059 jmp error_exit /* %ebx: no swapgs flag */ 924 jmp error_exit /* %ebx: no swapgs flag */
1060 .endif 925 .endif
1061
1062 CFI_ENDPROC
1063END(\sym) 926END(\sym)
1064.endm 927.endm
1065 928
@@ -1092,17 +955,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
1092 /* Reload gs selector with exception handling */ 955 /* Reload gs selector with exception handling */
1093 /* edi: new selector */ 956 /* edi: new selector */
1094ENTRY(native_load_gs_index) 957ENTRY(native_load_gs_index)
1095 CFI_STARTPROC 958 pushfq
1096 pushfq_cfi
1097 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 959 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1098 SWAPGS 960 SWAPGS
1099gs_change: 961gs_change:
1100 movl %edi,%gs 962 movl %edi,%gs
11012: mfence /* workaround */ 9632: mfence /* workaround */
1102 SWAPGS 964 SWAPGS
1103 popfq_cfi 965 popfq
1104 ret 966 ret
1105 CFI_ENDPROC
1106END(native_load_gs_index) 967END(native_load_gs_index)
1107 968
1108 _ASM_EXTABLE(gs_change,bad_gs) 969 _ASM_EXTABLE(gs_change,bad_gs)
@@ -1117,22 +978,15 @@ bad_gs:
1117 978
1118/* Call softirq on interrupt stack. Interrupts are off. */ 979/* Call softirq on interrupt stack. Interrupts are off. */
1119ENTRY(do_softirq_own_stack) 980ENTRY(do_softirq_own_stack)
1120 CFI_STARTPROC 981 pushq %rbp
1121 pushq_cfi %rbp
1122 CFI_REL_OFFSET rbp,0
1123 mov %rsp,%rbp 982 mov %rsp,%rbp
1124 CFI_DEF_CFA_REGISTER rbp
1125 incl PER_CPU_VAR(irq_count) 983 incl PER_CPU_VAR(irq_count)
1126 cmove PER_CPU_VAR(irq_stack_ptr),%rsp 984 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1127 push %rbp # backlink for old unwinder 985 push %rbp # backlink for old unwinder
1128 call __do_softirq 986 call __do_softirq
1129 leaveq 987 leaveq
1130 CFI_RESTORE rbp
1131 CFI_DEF_CFA_REGISTER rsp
1132 CFI_ADJUST_CFA_OFFSET -8
1133 decl PER_CPU_VAR(irq_count) 988 decl PER_CPU_VAR(irq_count)
1134 ret 989 ret
1135 CFI_ENDPROC
1136END(do_softirq_own_stack) 990END(do_softirq_own_stack)
1137 991
1138#ifdef CONFIG_XEN 992#ifdef CONFIG_XEN
@@ -1152,28 +1006,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
1152 * activation and restart the handler using the previous one. 1006 * activation and restart the handler using the previous one.
1153 */ 1007 */
1154ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) 1008ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1155 CFI_STARTPROC
1156/* 1009/*
1157 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1010 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1158 * see the correct pointer to the pt_regs 1011 * see the correct pointer to the pt_regs
1159 */ 1012 */
1160 movq %rdi, %rsp # we don't return, adjust the stack frame 1013 movq %rdi, %rsp # we don't return, adjust the stack frame
1161 CFI_ENDPROC
1162 DEFAULT_FRAME
116311: incl PER_CPU_VAR(irq_count) 101411: incl PER_CPU_VAR(irq_count)
1164 movq %rsp,%rbp 1015 movq %rsp,%rbp
1165 CFI_DEF_CFA_REGISTER rbp
1166 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp 1016 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
1167 pushq %rbp # backlink for old unwinder 1017 pushq %rbp # backlink for old unwinder
1168 call xen_evtchn_do_upcall 1018 call xen_evtchn_do_upcall
1169 popq %rsp 1019 popq %rsp
1170 CFI_DEF_CFA_REGISTER rsp
1171 decl PER_CPU_VAR(irq_count) 1020 decl PER_CPU_VAR(irq_count)
1172#ifndef CONFIG_PREEMPT 1021#ifndef CONFIG_PREEMPT
1173 call xen_maybe_preempt_hcall 1022 call xen_maybe_preempt_hcall
1174#endif 1023#endif
1175 jmp error_exit 1024 jmp error_exit
1176 CFI_ENDPROC
1177END(xen_do_hypervisor_callback) 1025END(xen_do_hypervisor_callback)
1178 1026
1179/* 1027/*
@@ -1190,16 +1038,8 @@ END(xen_do_hypervisor_callback)
1190 * with its current contents: any discrepancy means we in category 1. 1038 * with its current contents: any discrepancy means we in category 1.
1191 */ 1039 */
1192ENTRY(xen_failsafe_callback) 1040ENTRY(xen_failsafe_callback)
1193 INTR_FRAME 1 (6*8)
1194 /*CFI_REL_OFFSET gs,GS*/
1195 /*CFI_REL_OFFSET fs,FS*/
1196 /*CFI_REL_OFFSET es,ES*/
1197 /*CFI_REL_OFFSET ds,DS*/
1198 CFI_REL_OFFSET r11,8
1199 CFI_REL_OFFSET rcx,0
1200 movl %ds,%ecx 1041 movl %ds,%ecx
1201 cmpw %cx,0x10(%rsp) 1042 cmpw %cx,0x10(%rsp)
1202 CFI_REMEMBER_STATE
1203 jne 1f 1043 jne 1f
1204 movl %es,%ecx 1044 movl %es,%ecx
1205 cmpw %cx,0x18(%rsp) 1045 cmpw %cx,0x18(%rsp)
@@ -1212,29 +1052,21 @@ ENTRY(xen_failsafe_callback)
1212 jne 1f 1052 jne 1f
1213 /* All segments match their saved values => Category 2 (Bad IRET). */ 1053 /* All segments match their saved values => Category 2 (Bad IRET). */
1214 movq (%rsp),%rcx 1054 movq (%rsp),%rcx
1215 CFI_RESTORE rcx
1216 movq 8(%rsp),%r11 1055 movq 8(%rsp),%r11
1217 CFI_RESTORE r11
1218 addq $0x30,%rsp 1056 addq $0x30,%rsp
1219 CFI_ADJUST_CFA_OFFSET -0x30 1057 pushq $0 /* RIP */
1220 pushq_cfi $0 /* RIP */ 1058 pushq %r11
1221 pushq_cfi %r11 1059 pushq %rcx
1222 pushq_cfi %rcx
1223 jmp general_protection 1060 jmp general_protection
1224 CFI_RESTORE_STATE
12251: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 10611: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1226 movq (%rsp),%rcx 1062 movq (%rsp),%rcx
1227 CFI_RESTORE rcx
1228 movq 8(%rsp),%r11 1063 movq 8(%rsp),%r11
1229 CFI_RESTORE r11
1230 addq $0x30,%rsp 1064 addq $0x30,%rsp
1231 CFI_ADJUST_CFA_OFFSET -0x30 1065 pushq $-1 /* orig_ax = -1 => not a system call */
1232 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
1233 ALLOC_PT_GPREGS_ON_STACK 1066 ALLOC_PT_GPREGS_ON_STACK
1234 SAVE_C_REGS 1067 SAVE_C_REGS
1235 SAVE_EXTRA_REGS 1068 SAVE_EXTRA_REGS
1236 jmp error_exit 1069 jmp error_exit
1237 CFI_ENDPROC
1238END(xen_failsafe_callback) 1070END(xen_failsafe_callback)
1239 1071
1240apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1072apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
@@ -1270,7 +1102,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
1270 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1102 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1271 */ 1103 */
1272ENTRY(paranoid_entry) 1104ENTRY(paranoid_entry)
1273 XCPT_FRAME 1 15*8
1274 cld 1105 cld
1275 SAVE_C_REGS 8 1106 SAVE_C_REGS 8
1276 SAVE_EXTRA_REGS 8 1107 SAVE_EXTRA_REGS 8
@@ -1282,7 +1113,6 @@ ENTRY(paranoid_entry)
1282 SWAPGS 1113 SWAPGS
1283 xorl %ebx,%ebx 1114 xorl %ebx,%ebx
12841: ret 11151: ret
1285 CFI_ENDPROC
1286END(paranoid_entry) 1116END(paranoid_entry)
1287 1117
1288/* 1118/*
@@ -1297,7 +1127,6 @@ END(paranoid_entry)
1297 */ 1127 */
1298/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ 1128/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
1299ENTRY(paranoid_exit) 1129ENTRY(paranoid_exit)
1300 DEFAULT_FRAME
1301 DISABLE_INTERRUPTS(CLBR_NONE) 1130 DISABLE_INTERRUPTS(CLBR_NONE)
1302 TRACE_IRQS_OFF_DEBUG 1131 TRACE_IRQS_OFF_DEBUG
1303 testl %ebx,%ebx /* swapgs needed? */ 1132 testl %ebx,%ebx /* swapgs needed? */
@@ -1312,7 +1141,6 @@ paranoid_exit_restore:
1312 RESTORE_C_REGS 1141 RESTORE_C_REGS
1313 REMOVE_PT_GPREGS_FROM_STACK 8 1142 REMOVE_PT_GPREGS_FROM_STACK 8
1314 INTERRUPT_RETURN 1143 INTERRUPT_RETURN
1315 CFI_ENDPROC
1316END(paranoid_exit) 1144END(paranoid_exit)
1317 1145
1318/* 1146/*
@@ -1320,7 +1148,6 @@ END(paranoid_exit)
1320 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1148 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1321 */ 1149 */
1322ENTRY(error_entry) 1150ENTRY(error_entry)
1323 XCPT_FRAME 1 15*8
1324 cld 1151 cld
1325 SAVE_C_REGS 8 1152 SAVE_C_REGS 8
1326 SAVE_EXTRA_REGS 8 1153 SAVE_EXTRA_REGS 8
@@ -1340,7 +1167,6 @@ error_sti:
1340 * for these here too. 1167 * for these here too.
1341 */ 1168 */
1342error_kernelspace: 1169error_kernelspace:
1343 CFI_REL_OFFSET rcx, RCX+8
1344 incl %ebx 1170 incl %ebx
1345 leaq native_irq_return_iret(%rip),%rcx 1171 leaq native_irq_return_iret(%rip),%rcx
1346 cmpq %rcx,RIP+8(%rsp) 1172 cmpq %rcx,RIP+8(%rsp)
@@ -1364,32 +1190,22 @@ error_bad_iret:
1364 mov %rax,%rsp 1190 mov %rax,%rsp
1365 decl %ebx /* Return to usergs */ 1191 decl %ebx /* Return to usergs */
1366 jmp error_sti 1192 jmp error_sti
1367 CFI_ENDPROC
1368END(error_entry) 1193END(error_entry)
1369 1194
1370 1195
1371/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ 1196/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
1372ENTRY(error_exit) 1197ENTRY(error_exit)
1373 DEFAULT_FRAME
1374 movl %ebx,%eax 1198 movl %ebx,%eax
1375 RESTORE_EXTRA_REGS 1199 RESTORE_EXTRA_REGS
1376 DISABLE_INTERRUPTS(CLBR_NONE) 1200 DISABLE_INTERRUPTS(CLBR_NONE)
1377 TRACE_IRQS_OFF 1201 TRACE_IRQS_OFF
1378 GET_THREAD_INFO(%rcx)
1379 testl %eax,%eax 1202 testl %eax,%eax
1380 jnz retint_kernel 1203 jnz retint_kernel
1381 LOCKDEP_SYS_EXIT_IRQ 1204 jmp retint_user
1382 movl TI_flags(%rcx),%edx
1383 movl $_TIF_WORK_MASK,%edi
1384 andl %edi,%edx
1385 jnz retint_careful
1386 jmp retint_swapgs
1387 CFI_ENDPROC
1388END(error_exit) 1205END(error_exit)
1389 1206
1390/* Runs on exception stack */ 1207/* Runs on exception stack */
1391ENTRY(nmi) 1208ENTRY(nmi)
1392 INTR_FRAME
1393 PARAVIRT_ADJUST_EXCEPTION_FRAME 1209 PARAVIRT_ADJUST_EXCEPTION_FRAME
1394 /* 1210 /*
1395 * We allow breakpoints in NMIs. If a breakpoint occurs, then 1211 * We allow breakpoints in NMIs. If a breakpoint occurs, then
@@ -1424,8 +1240,7 @@ ENTRY(nmi)
1424 */ 1240 */
1425 1241
1426 /* Use %rdx as our temp variable throughout */ 1242 /* Use %rdx as our temp variable throughout */
1427 pushq_cfi %rdx 1243 pushq %rdx
1428 CFI_REL_OFFSET rdx, 0
1429 1244
1430 /* 1245 /*
1431 * If %cs was not the kernel segment, then the NMI triggered in user 1246 * If %cs was not the kernel segment, then the NMI triggered in user
@@ -1459,8 +1274,6 @@ ENTRY(nmi)
1459 jb first_nmi 1274 jb first_nmi
1460 /* Ah, it is within the NMI stack, treat it as nested */ 1275 /* Ah, it is within the NMI stack, treat it as nested */
1461 1276
1462 CFI_REMEMBER_STATE
1463
1464nested_nmi: 1277nested_nmi:
1465 /* 1278 /*
1466 * Do nothing if we interrupted the fixup in repeat_nmi. 1279 * Do nothing if we interrupted the fixup in repeat_nmi.
@@ -1478,26 +1291,22 @@ nested_nmi:
1478 /* Set up the interrupted NMIs stack to jump to repeat_nmi */ 1291 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
1479 leaq -1*8(%rsp), %rdx 1292 leaq -1*8(%rsp), %rdx
1480 movq %rdx, %rsp 1293 movq %rdx, %rsp
1481 CFI_ADJUST_CFA_OFFSET 1*8
1482 leaq -10*8(%rsp), %rdx 1294 leaq -10*8(%rsp), %rdx
1483 pushq_cfi $__KERNEL_DS 1295 pushq $__KERNEL_DS
1484 pushq_cfi %rdx 1296 pushq %rdx
1485 pushfq_cfi 1297 pushfq
1486 pushq_cfi $__KERNEL_CS 1298 pushq $__KERNEL_CS
1487 pushq_cfi $repeat_nmi 1299 pushq $repeat_nmi
1488 1300
1489 /* Put stack back */ 1301 /* Put stack back */
1490 addq $(6*8), %rsp 1302 addq $(6*8), %rsp
1491 CFI_ADJUST_CFA_OFFSET -6*8
1492 1303
1493nested_nmi_out: 1304nested_nmi_out:
1494 popq_cfi %rdx 1305 popq %rdx
1495 CFI_RESTORE rdx
1496 1306
1497 /* No need to check faults here */ 1307 /* No need to check faults here */
1498 INTERRUPT_RETURN 1308 INTERRUPT_RETURN
1499 1309
1500 CFI_RESTORE_STATE
1501first_nmi: 1310first_nmi:
1502 /* 1311 /*
1503 * Because nested NMIs will use the pushed location that we 1312 * Because nested NMIs will use the pushed location that we
@@ -1536,22 +1345,19 @@ first_nmi:
1536 */ 1345 */
1537 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ 1346 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
1538 movq (%rsp), %rdx 1347 movq (%rsp), %rdx
1539 CFI_RESTORE rdx
1540 1348
1541 /* Set the NMI executing variable on the stack. */ 1349 /* Set the NMI executing variable on the stack. */
1542 pushq_cfi $1 1350 pushq $1
1543 1351
1544 /* 1352 /*
1545 * Leave room for the "copied" frame 1353 * Leave room for the "copied" frame
1546 */ 1354 */
1547 subq $(5*8), %rsp 1355 subq $(5*8), %rsp
1548 CFI_ADJUST_CFA_OFFSET 5*8
1549 1356
1550 /* Copy the stack frame to the Saved frame */ 1357 /* Copy the stack frame to the Saved frame */
1551 .rept 5 1358 .rept 5
1552 pushq_cfi 11*8(%rsp) 1359 pushq 11*8(%rsp)
1553 .endr 1360 .endr
1554 CFI_DEF_CFA_OFFSET 5*8
1555 1361
1556 /* Everything up to here is safe from nested NMIs */ 1362 /* Everything up to here is safe from nested NMIs */
1557 1363
@@ -1574,12 +1380,10 @@ repeat_nmi:
1574 1380
1575 /* Make another copy, this one may be modified by nested NMIs */ 1381 /* Make another copy, this one may be modified by nested NMIs */
1576 addq $(10*8), %rsp 1382 addq $(10*8), %rsp
1577 CFI_ADJUST_CFA_OFFSET -10*8
1578 .rept 5 1383 .rept 5
1579 pushq_cfi -6*8(%rsp) 1384 pushq -6*8(%rsp)
1580 .endr 1385 .endr
1581 subq $(5*8), %rsp 1386 subq $(5*8), %rsp
1582 CFI_DEF_CFA_OFFSET 5*8
1583end_repeat_nmi: 1387end_repeat_nmi:
1584 1388
1585 /* 1389 /*
@@ -1587,7 +1391,7 @@ end_repeat_nmi:
1587 * NMI if the first NMI took an exception and reset our iret stack 1391 * NMI if the first NMI took an exception and reset our iret stack
1588 * so that we repeat another NMI. 1392 * so that we repeat another NMI.
1589 */ 1393 */
1590 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1394 pushq $-1 /* ORIG_RAX: no syscall to restart */
1591 ALLOC_PT_GPREGS_ON_STACK 1395 ALLOC_PT_GPREGS_ON_STACK
1592 1396
1593 /* 1397 /*
@@ -1598,7 +1402,6 @@ end_repeat_nmi:
1598 * exceptions might do. 1402 * exceptions might do.
1599 */ 1403 */
1600 call paranoid_entry 1404 call paranoid_entry
1601 DEFAULT_FRAME 0
1602 1405
1603 /* 1406 /*
1604 * Save off the CR2 register. If we take a page fault in the NMI then 1407 * Save off the CR2 register. If we take a page fault in the NMI then
@@ -1634,14 +1437,11 @@ nmi_restore:
1634 1437
1635 /* Clear the NMI executing stack variable */ 1438 /* Clear the NMI executing stack variable */
1636 movq $0, 5*8(%rsp) 1439 movq $0, 5*8(%rsp)
1637 jmp irq_return 1440 INTERRUPT_RETURN
1638 CFI_ENDPROC
1639END(nmi) 1441END(nmi)
1640 1442
1641ENTRY(ignore_sysret) 1443ENTRY(ignore_sysret)
1642 CFI_STARTPROC
1643 mov $-ENOSYS,%eax 1444 mov $-ENOSYS,%eax
1644 sysret 1445 sysret
1645 CFI_ENDPROC
1646END(ignore_sysret) 1446END(ignore_sysret)
1647 1447
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
new file mode 100644
index 000000000000..59840e33d203
--- /dev/null
+++ b/arch/x86/entry/entry_64_compat.S
@@ -0,0 +1,547 @@
1/*
2 * Compatibility mode system call entry point for x86-64.
3 *
4 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
5 */
6#include "calling.h"
7#include <asm/asm-offsets.h>
8#include <asm/current.h>
9#include <asm/errno.h>
10#include <asm/ia32_unistd.h>
11#include <asm/thread_info.h>
12#include <asm/segment.h>
13#include <asm/irqflags.h>
14#include <asm/asm.h>
15#include <asm/smap.h>
16#include <linux/linkage.h>
17#include <linux/err.h>
18
19/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
20#include <linux/elf-em.h>
21#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
22#define __AUDIT_ARCH_LE 0x40000000
23
24#ifndef CONFIG_AUDITSYSCALL
25# define sysexit_audit ia32_ret_from_sys_call
26# define sysretl_audit ia32_ret_from_sys_call
27#endif
28
29 .section .entry.text, "ax"
30
31#ifdef CONFIG_PARAVIRT
32ENTRY(native_usergs_sysret32)
33 swapgs
34 sysretl
35ENDPROC(native_usergs_sysret32)
36#endif
37
38/*
39 * 32-bit SYSENTER instruction entry.
40 *
41 * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
42 * IF and VM in rflags are cleared (IOW: interrupts are off).
43 * SYSENTER does not save anything on the stack,
44 * and does not save old rip (!!!) and rflags.
45 *
46 * Arguments:
47 * eax system call number
48 * ebx arg1
49 * ecx arg2
50 * edx arg3
51 * esi arg4
52 * edi arg5
53 * ebp user stack
54 * 0(%ebp) arg6
55 *
56 * This is purely a fast path. For anything complicated we use the int 0x80
57 * path below. We set up a complete hardware stack frame to share code
58 * with the int 0x80 path.
59 */
60ENTRY(entry_SYSENTER_compat)
61 /*
62 * Interrupts are off on entry.
63 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
64 * it is too small to ever cause noticeable irq latency.
65 */
66 SWAPGS_UNSAFE_STACK
67 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
68 ENABLE_INTERRUPTS(CLBR_NONE)
69
70 /* Zero-extending 32-bit regs, do not remove */
71 movl %ebp, %ebp
72 movl %eax, %eax
73
74 movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
75
76 /* Construct struct pt_regs on stack */
77 pushq $__USER32_DS /* pt_regs->ss */
78 pushq %rbp /* pt_regs->sp */
79 pushfq /* pt_regs->flags */
80 pushq $__USER32_CS /* pt_regs->cs */
81 pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
82 pushq %rax /* pt_regs->orig_ax */
83 pushq %rdi /* pt_regs->di */
84 pushq %rsi /* pt_regs->si */
85 pushq %rdx /* pt_regs->dx */
86 pushq %rcx /* pt_regs->cx */
87 pushq $-ENOSYS /* pt_regs->ax */
88 cld
89 sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
90
91 /*
92 * no need to do an access_ok check here because rbp has been
93 * 32-bit zero extended
94 */
95 ASM_STAC
961: movl (%rbp), %ebp
97 _ASM_EXTABLE(1b, ia32_badarg)
98 ASM_CLAC
99
100 /*
101 * Sysenter doesn't filter flags, so we need to clear NT
102 * ourselves. To save a few cycles, we can check whether
103 * NT was set instead of doing an unconditional popfq.
104 */
105 testl $X86_EFLAGS_NT, EFLAGS(%rsp)
106 jnz sysenter_fix_flags
107sysenter_flags_fixed:
108
109 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
110 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
111 jnz sysenter_tracesys
112
113sysenter_do_call:
114 /* 32-bit syscall -> 64-bit C ABI argument conversion */
115 movl %edi, %r8d /* arg5 */
116 movl %ebp, %r9d /* arg6 */
117 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
118 movl %ebx, %edi /* arg1 */
119 movl %edx, %edx /* arg3 (zero extension) */
120sysenter_dispatch:
121 cmpq $(IA32_NR_syscalls-1), %rax
122 ja 1f
123 call *ia32_sys_call_table(, %rax, 8)
124 movq %rax, RAX(%rsp)
1251:
126 DISABLE_INTERRUPTS(CLBR_NONE)
127 TRACE_IRQS_OFF
128 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
129 jnz sysexit_audit
130sysexit_from_sys_call:
131 /*
132 * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
133 * NMI between STI and SYSEXIT has poorly specified behavior,
134 * and and NMI followed by an IRQ with usergs is fatal. So
135 * we just pretend we're using SYSEXIT but we really use
136 * SYSRETL instead.
137 *
138 * This code path is still called 'sysexit' because it pairs
139 * with 'sysenter' and it uses the SYSENTER calling convention.
140 */
141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
142 movl RIP(%rsp), %ecx /* User %eip */
143 RESTORE_RSI_RDI
144 xorl %edx, %edx /* Do not leak kernel information */
145 xorq %r8, %r8
146 xorq %r9, %r9
147 xorq %r10, %r10
148 movl EFLAGS(%rsp), %r11d /* User eflags */
149 TRACE_IRQS_ON
150
151 /*
152 * SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
153 * since it avoids a dicey window with interrupts enabled.
154 */
155 movl RSP(%rsp), %esp
156
157 /*
158 * USERGS_SYSRET32 does:
159 * gsbase = user's gs base
160 * eip = ecx
161 * rflags = r11
162 * cs = __USER32_CS
163 * ss = __USER_DS
164 *
165 * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
166 *
167 * pop %ebp
168 * pop %edx
169 * pop %ecx
170 *
171 * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
172 * avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
173 * address (already known to user code), and R12-R15 are
174 * callee-saved and therefore don't contain any interesting
175 * kernel data.
176 */
177 USERGS_SYSRET32
178
179#ifdef CONFIG_AUDITSYSCALL
180 .macro auditsys_entry_common
181 movl %esi, %r8d /* 5th arg: 4th syscall arg */
182 movl %ecx, %r9d /* swap with edx */
183 movl %edx, %ecx /* 4th arg: 3rd syscall arg */
184 movl %r9d, %edx /* 3rd arg: 2nd syscall arg */
185 movl %ebx, %esi /* 2nd arg: 1st syscall arg */
186 movl %eax, %edi /* 1st arg: syscall number */
187 call __audit_syscall_entry
188 movl ORIG_RAX(%rsp), %eax /* reload syscall number */
189 movl %ebx, %edi /* reload 1st syscall arg */
190 movl RCX(%rsp), %esi /* reload 2nd syscall arg */
191 movl RDX(%rsp), %edx /* reload 3rd syscall arg */
192 movl RSI(%rsp), %ecx /* reload 4th syscall arg */
193 movl RDI(%rsp), %r8d /* reload 5th syscall arg */
194 .endm
195
196 .macro auditsys_exit exit
197 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
198 jnz ia32_ret_from_sys_call
199 TRACE_IRQS_ON
200 ENABLE_INTERRUPTS(CLBR_NONE)
201 movl %eax, %esi /* second arg, syscall return value */
202 cmpl $-MAX_ERRNO, %eax /* is it an error ? */
203 jbe 1f
204 movslq %eax, %rsi /* if error sign extend to 64 bits */
2051: setbe %al /* 1 if error, 0 if not */
206 movzbl %al, %edi /* zero-extend that into %edi */
207 call __audit_syscall_exit
208 movq RAX(%rsp), %rax /* reload syscall return value */
209 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
210 DISABLE_INTERRUPTS(CLBR_NONE)
211 TRACE_IRQS_OFF
212 testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
213 jz \exit
214 xorl %eax, %eax /* Do not leak kernel information */
215 movq %rax, R11(%rsp)
216 movq %rax, R10(%rsp)
217 movq %rax, R9(%rsp)
218 movq %rax, R8(%rsp)
219 jmp int_with_check
220 .endm
221
222sysenter_auditsys:
223 auditsys_entry_common
224 movl %ebp, %r9d /* reload 6th syscall arg */
225 jmp sysenter_dispatch
226
227sysexit_audit:
228 auditsys_exit sysexit_from_sys_call
229#endif
230
231sysenter_fix_flags:
232 pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
233 popfq
234 jmp sysenter_flags_fixed
235
236sysenter_tracesys:
237#ifdef CONFIG_AUDITSYSCALL
238 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
239 jz sysenter_auditsys
240#endif
241 SAVE_EXTRA_REGS
242 xorl %eax, %eax /* Do not leak kernel information */
243 movq %rax, R11(%rsp)
244 movq %rax, R10(%rsp)
245 movq %rax, R9(%rsp)
246 movq %rax, R8(%rsp)
247 movq %rsp, %rdi /* &pt_regs -> arg1 */
248 call syscall_trace_enter
249
250 /* Reload arg registers from stack. (see sysenter_tracesys) */
251 movl RCX(%rsp), %ecx
252 movl RDX(%rsp), %edx
253 movl RSI(%rsp), %esi
254 movl RDI(%rsp), %edi
255 movl %eax, %eax /* zero extension */
256
257 RESTORE_EXTRA_REGS
258 jmp sysenter_do_call
259ENDPROC(entry_SYSENTER_compat)
260
261/*
262 * 32-bit SYSCALL instruction entry.
263 *
264 * 32-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
265 * then loads new ss, cs, and rip from previously programmed MSRs.
266 * rflags gets masked by a value from another MSR (so CLD and CLAC
267 * are not needed). SYSCALL does not save anything on the stack
268 * and does not change rsp.
269 *
270 * Note: rflags saving+masking-with-MSR happens only in Long mode
271 * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
272 * Don't get confused: rflags saving+masking depends on Long Mode Active bit
273 * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
274 * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
275 *
276 * Arguments:
277 * eax system call number
278 * ecx return address
279 * ebx arg1
280 * ebp arg2 (note: not saved in the stack frame, should not be touched)
281 * edx arg3
282 * esi arg4
283 * edi arg5
284 * esp user stack
285 * 0(%esp) arg6
286 *
287 * This is purely a fast path. For anything complicated we use the int 0x80
288 * path below. We set up a complete hardware stack frame to share code
289 * with the int 0x80 path.
290 */
291ENTRY(entry_SYSCALL_compat)
292 /*
293 * Interrupts are off on entry.
294 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
295 * it is too small to ever cause noticeable irq latency.
296 */
297 SWAPGS_UNSAFE_STACK
298 movl %esp, %r8d
299 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
300 ENABLE_INTERRUPTS(CLBR_NONE)
301
302 /* Zero-extending 32-bit regs, do not remove */
303 movl %eax, %eax
304
305 /* Construct struct pt_regs on stack */
306 pushq $__USER32_DS /* pt_regs->ss */
307 pushq %r8 /* pt_regs->sp */
308 pushq %r11 /* pt_regs->flags */
309 pushq $__USER32_CS /* pt_regs->cs */
310 pushq %rcx /* pt_regs->ip */
311 pushq %rax /* pt_regs->orig_ax */
312 pushq %rdi /* pt_regs->di */
313 pushq %rsi /* pt_regs->si */
314 pushq %rdx /* pt_regs->dx */
315 pushq %rbp /* pt_regs->cx */
316 movl %ebp, %ecx
317 pushq $-ENOSYS /* pt_regs->ax */
318 sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
319
320 /*
321 * No need to do an access_ok check here because r8 has been
322 * 32-bit zero extended:
323 */
324 ASM_STAC
3251: movl (%r8), %ebp
326 _ASM_EXTABLE(1b, ia32_badarg)
327 ASM_CLAC
328 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
329 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
330 jnz cstar_tracesys
331
332cstar_do_call:
333 /* 32-bit syscall -> 64-bit C ABI argument conversion */
334 movl %edi, %r8d /* arg5 */
335 movl %ebp, %r9d /* arg6 */
336 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
337 movl %ebx, %edi /* arg1 */
338 movl %edx, %edx /* arg3 (zero extension) */
339
340cstar_dispatch:
341 cmpq $(IA32_NR_syscalls-1), %rax
342 ja 1f
343
344 call *ia32_sys_call_table(, %rax, 8)
345 movq %rax, RAX(%rsp)
3461:
347 DISABLE_INTERRUPTS(CLBR_NONE)
348 TRACE_IRQS_OFF
349 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
350 jnz sysretl_audit
351
352sysretl_from_sys_call:
353 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
354 movl RCX(%rsp), %ebp
355 RESTORE_RSI_RDI_RDX
356 movl RIP(%rsp), %ecx
357 movl EFLAGS(%rsp), %r11d
358 xorq %r10, %r10
359 xorq %r9, %r9
360 xorq %r8, %r8
361 TRACE_IRQS_ON
362 movl RSP(%rsp), %esp
363 /*
364 * 64-bit->32-bit SYSRET restores eip from ecx,
365 * eflags from r11 (but RF and VM bits are forced to 0),
366 * cs and ss are loaded from MSRs.
367 * (Note: 32-bit->32-bit SYSRET is different: since r11
368 * does not exist, it merely sets eflags.IF=1).
369 *
370 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
371 * descriptor is not reinitialized. This means that we must
372 * avoid SYSRET with SS == NULL, which could happen if we schedule,
373 * exit the kernel, and re-enter using an interrupt vector. (All
374 * interrupt entries on x86_64 set SS to NULL.) We prevent that
375 * from happening by reloading SS in __switch_to.
376 */
377 USERGS_SYSRET32
378
379#ifdef CONFIG_AUDITSYSCALL
380cstar_auditsys:
381 auditsys_entry_common
382 movl %ebp, %r9d /* reload 6th syscall arg */
383 jmp cstar_dispatch
384
385sysretl_audit:
386 auditsys_exit sysretl_from_sys_call
387#endif
388
389cstar_tracesys:
390#ifdef CONFIG_AUDITSYSCALL
391 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
392 jz cstar_auditsys
393#endif
394 SAVE_EXTRA_REGS
395 xorl %eax, %eax /* Do not leak kernel information */
396 movq %rax, R11(%rsp)
397 movq %rax, R10(%rsp)
398 movq %rax, R9(%rsp)
399 movq %rax, R8(%rsp)
400 movq %rsp, %rdi /* &pt_regs -> arg1 */
401 call syscall_trace_enter
402
403 /* Reload arg registers from stack. (see sysenter_tracesys) */
404 movl RCX(%rsp), %ecx
405 movl RDX(%rsp), %edx
406 movl RSI(%rsp), %esi
407 movl RDI(%rsp), %edi
408 movl %eax, %eax /* zero extension */
409
410 RESTORE_EXTRA_REGS
411 jmp cstar_do_call
412END(entry_SYSCALL_compat)
413
414ia32_badarg:
415 ASM_CLAC
416 movq $-EFAULT, %rax
417 jmp ia32_sysret
418
419ia32_ret_from_sys_call:
420 xorl %eax, %eax /* Do not leak kernel information */
421 movq %rax, R11(%rsp)
422 movq %rax, R10(%rsp)
423 movq %rax, R9(%rsp)
424 movq %rax, R8(%rsp)
425 jmp int_ret_from_sys_call
426
427/*
428 * Emulated IA32 system calls via int 0x80.
429 *
430 * Arguments:
431 * eax system call number
432 * ebx arg1
433 * ecx arg2
434 * edx arg3
435 * esi arg4
436 * edi arg5
437 * ebp arg6 (note: not saved in the stack frame, should not be touched)
438 *
439 * Notes:
440 * Uses the same stack frame as the x86-64 version.
441 * All registers except eax must be saved (but ptrace may violate that).
442 * Arguments are zero extended. For system calls that want sign extension and
443 * take long arguments a wrapper is needed. Most calls can just be called
444 * directly.
445 * Assumes it is only called from user space and entered with interrupts off.
446 */
447
448ENTRY(entry_INT80_compat)
449 /*
450 * Interrupts are off on entry.
451 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
452 * it is too small to ever cause noticeable irq latency.
453 */
454 PARAVIRT_ADJUST_EXCEPTION_FRAME
455 SWAPGS
456 ENABLE_INTERRUPTS(CLBR_NONE)
457
458 /* Zero-extending 32-bit regs, do not remove */
459 movl %eax, %eax
460
461 /* Construct struct pt_regs on stack (iret frame is already on stack) */
462 pushq %rax /* pt_regs->orig_ax */
463 pushq %rdi /* pt_regs->di */
464 pushq %rsi /* pt_regs->si */
465 pushq %rdx /* pt_regs->dx */
466 pushq %rcx /* pt_regs->cx */
467 pushq $-ENOSYS /* pt_regs->ax */
468 pushq $0 /* pt_regs->r8 */
469 pushq $0 /* pt_regs->r9 */
470 pushq $0 /* pt_regs->r10 */
471 pushq $0 /* pt_regs->r11 */
472 cld
473 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
474
475 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
476 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
477 jnz ia32_tracesys
478
479ia32_do_call:
480 /* 32-bit syscall -> 64-bit C ABI argument conversion */
481 movl %edi, %r8d /* arg5 */
482 movl %ebp, %r9d /* arg6 */
483 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
484 movl %ebx, %edi /* arg1 */
485 movl %edx, %edx /* arg3 (zero extension) */
486 cmpq $(IA32_NR_syscalls-1), %rax
487 ja 1f
488
489 call *ia32_sys_call_table(, %rax, 8) /* RIP relative */
490
491ia32_sysret:
492 movq %rax, RAX(%rsp)
4931:
494 jmp int_ret_from_sys_call
495
496ia32_tracesys:
497 SAVE_EXTRA_REGS
498 movq %rsp, %rdi /* &pt_regs -> arg1 */
499 call syscall_trace_enter
500 /*
501 * Reload arg registers from stack in case ptrace changed them.
502 * Don't reload %eax because syscall_trace_enter() returned
503 * the %rax value we should see. But do truncate it to 32 bits.
504 * If it's -1 to make us punt the syscall, then (u32)-1 is still
505 * an appropriately invalid value.
506 */
507 movl RCX(%rsp), %ecx
508 movl RDX(%rsp), %edx
509 movl RSI(%rsp), %esi
510 movl RDI(%rsp), %edi
511 movl %eax, %eax /* zero extension */
512 RESTORE_EXTRA_REGS
513 jmp ia32_do_call
514END(entry_INT80_compat)
515
516 .macro PTREGSCALL label, func
517 ALIGN
518GLOBAL(\label)
519 leaq \func(%rip), %rax
520 jmp ia32_ptregs_common
521 .endm
522
523 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
524 PTREGSCALL stub32_sigreturn, sys32_sigreturn
525 PTREGSCALL stub32_fork, sys_fork
526 PTREGSCALL stub32_vfork, sys_vfork
527
528 ALIGN
529GLOBAL(stub32_clone)
530 leaq sys_clone(%rip), %rax
531 /*
532 * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
533 * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
534 *
535 * The native 64-bit kernel's sys_clone() implements the latter,
536 * so we need to swap arguments here before calling it:
537 */
538 xchg %r8, %rcx
539 jmp ia32_ptregs_common
540
541 ALIGN
542ia32_ptregs_common:
543 SAVE_EXTRA_REGS 8
544 call *%rax
545 RESTORE_EXTRA_REGS 8
546 ret
547END(ia32_ptregs_common)
diff --git a/arch/x86/kernel/syscall_32.c b/arch/x86/entry/syscall_32.c
index 3777189c4a19..e398d033673f 100644
--- a/arch/x86/kernel/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -10,7 +10,7 @@
10#else 10#else
11#define SYM(sym, compat) sym 11#define SYM(sym, compat) sym
12#define ia32_sys_call_table sys_call_table 12#define ia32_sys_call_table sys_call_table
13#define __NR_ia32_syscall_max __NR_syscall_max 13#define __NR_entry_INT80_compat_max __NR_syscall_max
14#endif 14#endif
15 15
16#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ; 16#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ;
@@ -23,11 +23,11 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
23 23
24extern asmlinkage void sys_ni_syscall(void); 24extern asmlinkage void sys_ni_syscall(void);
25 25
26__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = { 26__visible const sys_call_ptr_t ia32_sys_call_table[__NR_entry_INT80_compat_max+1] = {
27 /* 27 /*
28 * Smells like a compiler bug -- it doesn't work 28 * Smells like a compiler bug -- it doesn't work
29 * when the & below is removed. 29 * when the & below is removed.
30 */ 30 */
31 [0 ... __NR_ia32_syscall_max] = &sys_ni_syscall, 31 [0 ... __NR_entry_INT80_compat_max] = &sys_ni_syscall,
32#include <asm/syscalls_32.h> 32#include <asm/syscalls_32.h>
33}; 33};
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/entry/syscall_64.c
index 4ac730b37f0b..4ac730b37f0b 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile
index a55abb9f6c5e..57aa59fd140c 100644
--- a/arch/x86/syscalls/Makefile
+++ b/arch/x86/entry/syscalls/Makefile
@@ -1,5 +1,5 @@
1out := $(obj)/../include/generated/asm 1out := $(obj)/../../include/generated/asm
2uapi := $(obj)/../include/generated/uapi/asm 2uapi := $(obj)/../../include/generated/uapi/asm
3 3
4# Create output directory if not already present 4# Create output directory if not already present
5_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \ 5_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index ef8187f9d28d..ef8187f9d28d 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 9ef32d5f1b19..9ef32d5f1b19 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
diff --git a/arch/x86/syscalls/syscallhdr.sh b/arch/x86/entry/syscalls/syscallhdr.sh
index 31fd5f1f38f7..31fd5f1f38f7 100644
--- a/arch/x86/syscalls/syscallhdr.sh
+++ b/arch/x86/entry/syscalls/syscallhdr.sh
diff --git a/arch/x86/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
index 0e7f8ec071e7..0e7f8ec071e7 100644
--- a/arch/x86/syscalls/syscalltbl.sh
+++ b/arch/x86/entry/syscalls/syscalltbl.sh
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/entry/thunk_32.S
index 5eb715087b80..e9acf5f4fc92 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -6,16 +6,14 @@
6 */ 6 */
7 #include <linux/linkage.h> 7 #include <linux/linkage.h>
8 #include <asm/asm.h> 8 #include <asm/asm.h>
9 #include <asm/dwarf2.h>
10 9
11 /* put return address in eax (arg1) */ 10 /* put return address in eax (arg1) */
12 .macro THUNK name, func, put_ret_addr_in_eax=0 11 .macro THUNK name, func, put_ret_addr_in_eax=0
13 .globl \name 12 .globl \name
14\name: 13\name:
15 CFI_STARTPROC 14 pushl %eax
16 pushl_cfi_reg eax 15 pushl %ecx
17 pushl_cfi_reg ecx 16 pushl %edx
18 pushl_cfi_reg edx
19 17
20 .if \put_ret_addr_in_eax 18 .if \put_ret_addr_in_eax
21 /* Place EIP in the arg1 */ 19 /* Place EIP in the arg1 */
@@ -23,11 +21,10 @@
23 .endif 21 .endif
24 22
25 call \func 23 call \func
26 popl_cfi_reg edx 24 popl %edx
27 popl_cfi_reg ecx 25 popl %ecx
28 popl_cfi_reg eax 26 popl %eax
29 ret 27 ret
30 CFI_ENDPROC
31 _ASM_NOKPROBE(\name) 28 _ASM_NOKPROBE(\name)
32 .endm 29 .endm
33 30
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/entry/thunk_64.S
index f89ba4e93025..3e95681b4e2d 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -6,35 +6,32 @@
6 * Subject to the GNU public license, v.2. No warranty of any kind. 6 * Subject to the GNU public license, v.2. No warranty of any kind.
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h> 9#include "calling.h"
10#include <asm/calling.h>
11#include <asm/asm.h> 10#include <asm/asm.h>
12 11
13 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ 12 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
14 .macro THUNK name, func, put_ret_addr_in_rdi=0 13 .macro THUNK name, func, put_ret_addr_in_rdi=0
15 .globl \name 14 .globl \name
16\name: 15\name:
17 CFI_STARTPROC
18 16
19 /* this one pushes 9 elems, the next one would be %rIP */ 17 /* this one pushes 9 elems, the next one would be %rIP */
20 pushq_cfi_reg rdi 18 pushq %rdi
21 pushq_cfi_reg rsi 19 pushq %rsi
22 pushq_cfi_reg rdx 20 pushq %rdx
23 pushq_cfi_reg rcx 21 pushq %rcx
24 pushq_cfi_reg rax 22 pushq %rax
25 pushq_cfi_reg r8 23 pushq %r8
26 pushq_cfi_reg r9 24 pushq %r9
27 pushq_cfi_reg r10 25 pushq %r10
28 pushq_cfi_reg r11 26 pushq %r11
29 27
30 .if \put_ret_addr_in_rdi 28 .if \put_ret_addr_in_rdi
31 /* 9*8(%rsp) is return addr on stack */ 29 /* 9*8(%rsp) is return addr on stack */
32 movq_cfi_restore 9*8, rdi 30 movq 9*8(%rsp), %rdi
33 .endif 31 .endif
34 32
35 call \func 33 call \func
36 jmp restore 34 jmp restore
37 CFI_ENDPROC
38 _ASM_NOKPROBE(\name) 35 _ASM_NOKPROBE(\name)
39 .endm 36 .endm
40 37
@@ -57,19 +54,16 @@
57#if defined(CONFIG_TRACE_IRQFLAGS) \ 54#if defined(CONFIG_TRACE_IRQFLAGS) \
58 || defined(CONFIG_DEBUG_LOCK_ALLOC) \ 55 || defined(CONFIG_DEBUG_LOCK_ALLOC) \
59 || defined(CONFIG_PREEMPT) 56 || defined(CONFIG_PREEMPT)
60 CFI_STARTPROC
61 CFI_ADJUST_CFA_OFFSET 9*8
62restore: 57restore:
63 popq_cfi_reg r11 58 popq %r11
64 popq_cfi_reg r10 59 popq %r10
65 popq_cfi_reg r9 60 popq %r9
66 popq_cfi_reg r8 61 popq %r8
67 popq_cfi_reg rax 62 popq %rax
68 popq_cfi_reg rcx 63 popq %rcx
69 popq_cfi_reg rdx 64 popq %rdx
70 popq_cfi_reg rsi 65 popq %rsi
71 popq_cfi_reg rdi 66 popq %rdi
72 ret 67 ret
73 CFI_ENDPROC
74 _ASM_NOKPROBE(restore) 68 _ASM_NOKPROBE(restore)
75#endif 69#endif
diff --git a/arch/x86/vdso/.gitignore b/arch/x86/entry/vdso/.gitignore
index aae8ffdd5880..aae8ffdd5880 100644
--- a/arch/x86/vdso/.gitignore
+++ b/arch/x86/entry/vdso/.gitignore
diff --git a/arch/x86/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index e97032069f88..e97032069f88 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
diff --git a/arch/x86/vdso/checkundef.sh b/arch/x86/entry/vdso/checkundef.sh
index 7ee90a9b549d..7ee90a9b549d 100755
--- a/arch/x86/vdso/checkundef.sh
+++ b/arch/x86/entry/vdso/checkundef.sh
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 9793322751e0..9793322751e0 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S
index de2c921025f5..de2c921025f5 100644
--- a/arch/x86/vdso/vdso-layout.lds.S
+++ b/arch/x86/entry/vdso/vdso-layout.lds.S
diff --git a/arch/x86/vdso/vdso-note.S b/arch/x86/entry/vdso/vdso-note.S
index 79a071e4357e..79a071e4357e 100644
--- a/arch/x86/vdso/vdso-note.S
+++ b/arch/x86/entry/vdso/vdso-note.S
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S
index 6807932643c2..6807932643c2 100644
--- a/arch/x86/vdso/vdso.lds.S
+++ b/arch/x86/entry/vdso/vdso.lds.S
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 8627db24a7f6..8627db24a7f6 100644
--- a/arch/x86/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 0224987556ce..0224987556ce 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index e904c270573b..e904c270573b 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
diff --git a/arch/x86/vdso/vdso32/.gitignore b/arch/x86/entry/vdso/vdso32/.gitignore
index e45fba9d0ced..e45fba9d0ced 100644
--- a/arch/x86/vdso/vdso32/.gitignore
+++ b/arch/x86/entry/vdso/vdso32/.gitignore
diff --git a/arch/x86/vdso/vdso32/int80.S b/arch/x86/entry/vdso/vdso32/int80.S
index b15b7c01aedb..b15b7c01aedb 100644
--- a/arch/x86/vdso/vdso32/int80.S
+++ b/arch/x86/entry/vdso/vdso32/int80.S
diff --git a/arch/x86/vdso/vdso32/note.S b/arch/x86/entry/vdso/vdso32/note.S
index c83f25734696..c83f25734696 100644
--- a/arch/x86/vdso/vdso32/note.S
+++ b/arch/x86/entry/vdso/vdso32/note.S
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/entry/vdso/vdso32/sigreturn.S
index d7ec4e251c0a..d7ec4e251c0a 100644
--- a/arch/x86/vdso/vdso32/sigreturn.S
+++ b/arch/x86/entry/vdso/vdso32/sigreturn.S
diff --git a/arch/x86/vdso/vdso32/syscall.S b/arch/x86/entry/vdso/vdso32/syscall.S
index 6b286bb5251c..6b286bb5251c 100644
--- a/arch/x86/vdso/vdso32/syscall.S
+++ b/arch/x86/entry/vdso/vdso32/syscall.S
diff --git a/arch/x86/vdso/vdso32/sysenter.S b/arch/x86/entry/vdso/vdso32/sysenter.S
index e354bceee0e0..e354bceee0e0 100644
--- a/arch/x86/vdso/vdso32/sysenter.S
+++ b/arch/x86/entry/vdso/vdso32/sysenter.S
diff --git a/arch/x86/vdso/vdso32/vclock_gettime.c b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
index 175cc72c0f68..175cc72c0f68 100644
--- a/arch/x86/vdso/vdso32/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
diff --git a/arch/x86/vdso/vdso32/vdso-fakesections.c b/arch/x86/entry/vdso/vdso32/vdso-fakesections.c
index 541468e25265..541468e25265 100644
--- a/arch/x86/vdso/vdso32/vdso-fakesections.c
+++ b/arch/x86/entry/vdso/vdso32/vdso-fakesections.c
diff --git a/arch/x86/vdso/vdso32/vdso32.lds.S b/arch/x86/entry/vdso/vdso32/vdso32.lds.S
index 31056cf294bf..31056cf294bf 100644
--- a/arch/x86/vdso/vdso32/vdso32.lds.S
+++ b/arch/x86/entry/vdso/vdso32/vdso32.lds.S
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/entry/vdso/vdsox32.lds.S
index 697c11ece90c..697c11ece90c 100644
--- a/arch/x86/vdso/vdsox32.lds.S
+++ b/arch/x86/entry/vdso/vdsox32.lds.S
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/entry/vdso/vgetcpu.c
index 8ec3d1f4ce9a..8ec3d1f4ce9a 100644
--- a/arch/x86/vdso/vgetcpu.c
+++ b/arch/x86/entry/vdso/vgetcpu.c
diff --git a/arch/x86/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 1c9f750c3859..1c9f750c3859 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
diff --git a/arch/x86/entry/vsyscall/Makefile b/arch/x86/entry/vsyscall/Makefile
new file mode 100644
index 000000000000..a9f4856f622a
--- /dev/null
+++ b/arch/x86/entry/vsyscall/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the x86 low level vsyscall code
3#
4obj-y := vsyscall_gtod.o
5
6obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
7
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 2dcc6ff6fdcc..2dcc6ff6fdcc 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
diff --git a/arch/x86/kernel/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S
index c9596a9af159..c9596a9af159 100644
--- a/arch/x86/kernel/vsyscall_emu_64.S
+++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/entry/vsyscall/vsyscall_gtod.c
index 51e330416995..51e330416995 100644
--- a/arch/x86/kernel/vsyscall_gtod.c
+++ b/arch/x86/entry/vsyscall/vsyscall_gtod.c
diff --git a/arch/x86/kernel/vsyscall_trace.h b/arch/x86/entry/vsyscall/vsyscall_trace.h
index a8b2edec54fe..9dd7359a38a8 100644
--- a/arch/x86/kernel/vsyscall_trace.h
+++ b/arch/x86/entry/vsyscall/vsyscall_trace.h
@@ -24,6 +24,6 @@ TRACE_EVENT(emulate_vsyscall,
24#endif 24#endif
25 25
26#undef TRACE_INCLUDE_PATH 26#undef TRACE_INCLUDE_PATH
27#define TRACE_INCLUDE_PATH ../../arch/x86/kernel 27#define TRACE_INCLUDE_PATH ../../arch/x86/entry/vsyscall/
28#define TRACE_INCLUDE_FILE vsyscall_trace 28#define TRACE_INCLUDE_FILE vsyscall_trace
29#include <trace/define_trace.h> 29#include <trace/define_trace.h>
diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
index bb635c641869..cd4339bae066 100644
--- a/arch/x86/ia32/Makefile
+++ b/arch/x86/ia32/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the ia32 kernel emulation subsystem. 2# Makefile for the ia32 kernel emulation subsystem.
3# 3#
4 4
5obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o 5obj-$(CONFIG_IA32_EMULATION) := sys_ia32.o ia32_signal.o
6 6
7obj-$(CONFIG_IA32_AOUT) += ia32_aout.o 7obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
8 8
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
deleted file mode 100644
index 63450a596800..000000000000
--- a/arch/x86/ia32/ia32entry.S
+++ /dev/null
@@ -1,591 +0,0 @@
1/*
2 * Compatibility mode system call entry point for x86-64.
3 *
4 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
5 */
6
7#include <asm/dwarf2.h>
8#include <asm/calling.h>
9#include <asm/asm-offsets.h>
10#include <asm/current.h>
11#include <asm/errno.h>
12#include <asm/ia32_unistd.h>
13#include <asm/thread_info.h>
14#include <asm/segment.h>
15#include <asm/irqflags.h>
16#include <asm/asm.h>
17#include <asm/smap.h>
18#include <linux/linkage.h>
19#include <linux/err.h>
20
21/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22#include <linux/elf-em.h>
23#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
24#define __AUDIT_ARCH_LE 0x40000000
25
26#ifndef CONFIG_AUDITSYSCALL
27#define sysexit_audit ia32_ret_from_sys_call
28#define sysretl_audit ia32_ret_from_sys_call
29#endif
30
31 .section .entry.text, "ax"
32
33 /* clobbers %rax */
34 .macro CLEAR_RREGS _r9=rax
35 xorl %eax,%eax
36 movq %rax,R11(%rsp)
37 movq %rax,R10(%rsp)
38 movq %\_r9,R9(%rsp)
39 movq %rax,R8(%rsp)
40 .endm
41
42 /*
43 * Reload arg registers from stack in case ptrace changed them.
44 * We don't reload %eax because syscall_trace_enter() returned
45 * the %rax value we should see. Instead, we just truncate that
46 * value to 32 bits again as we did on entry from user mode.
47 * If it's a new value set by user_regset during entry tracing,
48 * this matches the normal truncation of the user-mode value.
49 * If it's -1 to make us punt the syscall, then (u32)-1 is still
50 * an appropriately invalid value.
51 */
52 .macro LOAD_ARGS32 _r9=0
53 .if \_r9
54 movl R9(%rsp),%r9d
55 .endif
56 movl RCX(%rsp),%ecx
57 movl RDX(%rsp),%edx
58 movl RSI(%rsp),%esi
59 movl RDI(%rsp),%edi
60 movl %eax,%eax /* zero extension */
61 .endm
62
63 .macro CFI_STARTPROC32 simple
64 CFI_STARTPROC \simple
65 CFI_UNDEFINED r8
66 CFI_UNDEFINED r9
67 CFI_UNDEFINED r10
68 CFI_UNDEFINED r11
69 CFI_UNDEFINED r12
70 CFI_UNDEFINED r13
71 CFI_UNDEFINED r14
72 CFI_UNDEFINED r15
73 .endm
74
75#ifdef CONFIG_PARAVIRT
76ENTRY(native_usergs_sysret32)
77 swapgs
78 sysretl
79ENDPROC(native_usergs_sysret32)
80#endif
81
82/*
83 * 32bit SYSENTER instruction entry.
84 *
85 * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
86 * IF and VM in rflags are cleared (IOW: interrupts are off).
87 * SYSENTER does not save anything on the stack,
88 * and does not save old rip (!!!) and rflags.
89 *
90 * Arguments:
91 * eax system call number
92 * ebx arg1
93 * ecx arg2
94 * edx arg3
95 * esi arg4
96 * edi arg5
97 * ebp user stack
98 * 0(%ebp) arg6
99 *
100 * This is purely a fast path. For anything complicated we use the int 0x80
101 * path below. We set up a complete hardware stack frame to share code
102 * with the int 0x80 path.
103 */
104ENTRY(ia32_sysenter_target)
105 CFI_STARTPROC32 simple
106 CFI_SIGNAL_FRAME
107 CFI_DEF_CFA rsp,0
108 CFI_REGISTER rsp,rbp
109
110 /*
111 * Interrupts are off on entry.
112 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
113 * it is too small to ever cause noticeable irq latency.
114 */
115 SWAPGS_UNSAFE_STACK
116 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
117 ENABLE_INTERRUPTS(CLBR_NONE)
118
119 /* Zero-extending 32-bit regs, do not remove */
120 movl %ebp, %ebp
121 movl %eax, %eax
122
123 movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
124 CFI_REGISTER rip,r10
125
126 /* Construct struct pt_regs on stack */
127 pushq_cfi $__USER32_DS /* pt_regs->ss */
128 pushq_cfi %rbp /* pt_regs->sp */
129 CFI_REL_OFFSET rsp,0
130 pushfq_cfi /* pt_regs->flags */
131 pushq_cfi $__USER32_CS /* pt_regs->cs */
132 pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */
133 CFI_REL_OFFSET rip,0
134 pushq_cfi_reg rax /* pt_regs->orig_ax */
135 pushq_cfi_reg rdi /* pt_regs->di */
136 pushq_cfi_reg rsi /* pt_regs->si */
137 pushq_cfi_reg rdx /* pt_regs->dx */
138 pushq_cfi_reg rcx /* pt_regs->cx */
139 pushq_cfi $-ENOSYS /* pt_regs->ax */
140 cld
141 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
142 CFI_ADJUST_CFA_OFFSET 10*8
143
144 /*
145 * no need to do an access_ok check here because rbp has been
146 * 32bit zero extended
147 */
148 ASM_STAC
1491: movl (%rbp),%ebp
150 _ASM_EXTABLE(1b,ia32_badarg)
151 ASM_CLAC
152
153 /*
154 * Sysenter doesn't filter flags, so we need to clear NT
155 * ourselves. To save a few cycles, we can check whether
156 * NT was set instead of doing an unconditional popfq.
157 */
158 testl $X86_EFLAGS_NT,EFLAGS(%rsp)
159 jnz sysenter_fix_flags
160sysenter_flags_fixed:
161
162 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
163 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
164 CFI_REMEMBER_STATE
165 jnz sysenter_tracesys
166sysenter_do_call:
167 /* 32bit syscall -> 64bit C ABI argument conversion */
168 movl %edi,%r8d /* arg5 */
169 movl %ebp,%r9d /* arg6 */
170 xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
171 movl %ebx,%edi /* arg1 */
172 movl %edx,%edx /* arg3 (zero extension) */
173sysenter_dispatch:
174 cmpq $(IA32_NR_syscalls-1),%rax
175 ja 1f
176 call *ia32_sys_call_table(,%rax,8)
177 movq %rax,RAX(%rsp)
1781:
179 DISABLE_INTERRUPTS(CLBR_NONE)
180 TRACE_IRQS_OFF
181 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
182 jnz sysexit_audit
183sysexit_from_sys_call:
184 /*
185 * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
186 * NMI between STI and SYSEXIT has poorly specified behavior,
187 * and and NMI followed by an IRQ with usergs is fatal. So
188 * we just pretend we're using SYSEXIT but we really use
189 * SYSRETL instead.
190 *
191 * This code path is still called 'sysexit' because it pairs
192 * with 'sysenter' and it uses the SYSENTER calling convention.
193 */
194 andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
195 movl RIP(%rsp),%ecx /* User %eip */
196 CFI_REGISTER rip,rcx
197 RESTORE_RSI_RDI
198 xorl %edx,%edx /* avoid info leaks */
199 xorq %r8,%r8
200 xorq %r9,%r9
201 xorq %r10,%r10
202 movl EFLAGS(%rsp),%r11d /* User eflags */
203 /*CFI_RESTORE rflags*/
204 TRACE_IRQS_ON
205
206 /*
207 * SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
208 * since it avoids a dicey window with interrupts enabled.
209 */
210 movl RSP(%rsp),%esp
211
212 /*
213 * USERGS_SYSRET32 does:
214 * gsbase = user's gs base
215 * eip = ecx
216 * rflags = r11
217 * cs = __USER32_CS
218 * ss = __USER_DS
219 *
220 * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
221 *
222 * pop %ebp
223 * pop %edx
224 * pop %ecx
225 *
226 * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
227 * avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
228 * address (already known to user code), and R12-R15 are
229 * callee-saved and therefore don't contain any interesting
230 * kernel data.
231 */
232 USERGS_SYSRET32
233
234 CFI_RESTORE_STATE
235
236#ifdef CONFIG_AUDITSYSCALL
237 .macro auditsys_entry_common
238 movl %esi,%r8d /* 5th arg: 4th syscall arg */
239 movl %ecx,%r9d /*swap with edx*/
240 movl %edx,%ecx /* 4th arg: 3rd syscall arg */
241 movl %r9d,%edx /* 3rd arg: 2nd syscall arg */
242 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
243 movl %eax,%edi /* 1st arg: syscall number */
244 call __audit_syscall_entry
245 movl ORIG_RAX(%rsp),%eax /* reload syscall number */
246 movl %ebx,%edi /* reload 1st syscall arg */
247 movl RCX(%rsp),%esi /* reload 2nd syscall arg */
248 movl RDX(%rsp),%edx /* reload 3rd syscall arg */
249 movl RSI(%rsp),%ecx /* reload 4th syscall arg */
250 movl RDI(%rsp),%r8d /* reload 5th syscall arg */
251 .endm
252
253 .macro auditsys_exit exit
254 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
255 jnz ia32_ret_from_sys_call
256 TRACE_IRQS_ON
257 ENABLE_INTERRUPTS(CLBR_NONE)
258 movl %eax,%esi /* second arg, syscall return value */
259 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
260 jbe 1f
261 movslq %eax, %rsi /* if error sign extend to 64 bits */
2621: setbe %al /* 1 if error, 0 if not */
263 movzbl %al,%edi /* zero-extend that into %edi */
264 call __audit_syscall_exit
265 movq RAX(%rsp),%rax /* reload syscall return value */
266 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
267 DISABLE_INTERRUPTS(CLBR_NONE)
268 TRACE_IRQS_OFF
269 testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
270 jz \exit
271 CLEAR_RREGS
272 jmp int_with_check
273 .endm
274
275sysenter_auditsys:
276 auditsys_entry_common
277 movl %ebp,%r9d /* reload 6th syscall arg */
278 jmp sysenter_dispatch
279
280sysexit_audit:
281 auditsys_exit sysexit_from_sys_call
282#endif
283
284sysenter_fix_flags:
285 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
286 popfq_cfi
287 jmp sysenter_flags_fixed
288
289sysenter_tracesys:
290#ifdef CONFIG_AUDITSYSCALL
291 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
292 jz sysenter_auditsys
293#endif
294 SAVE_EXTRA_REGS
295 CLEAR_RREGS
296 movq %rsp,%rdi /* &pt_regs -> arg1 */
297 call syscall_trace_enter
298 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
299 RESTORE_EXTRA_REGS
300 jmp sysenter_do_call
301 CFI_ENDPROC
302ENDPROC(ia32_sysenter_target)
303
304/*
305 * 32bit SYSCALL instruction entry.
306 *
307 * 32bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
308 * then loads new ss, cs, and rip from previously programmed MSRs.
309 * rflags gets masked by a value from another MSR (so CLD and CLAC
310 * are not needed). SYSCALL does not save anything on the stack
311 * and does not change rsp.
312 *
313 * Note: rflags saving+masking-with-MSR happens only in Long mode
314 * (in legacy 32bit mode, IF, RF and VM bits are cleared and that's it).
315 * Don't get confused: rflags saving+masking depends on Long Mode Active bit
316 * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
317 * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
318 *
319 * Arguments:
320 * eax system call number
321 * ecx return address
322 * ebx arg1
323 * ebp arg2 (note: not saved in the stack frame, should not be touched)
324 * edx arg3
325 * esi arg4
326 * edi arg5
327 * esp user stack
328 * 0(%esp) arg6
329 *
330 * This is purely a fast path. For anything complicated we use the int 0x80
331 * path below. We set up a complete hardware stack frame to share code
332 * with the int 0x80 path.
333 */
334ENTRY(ia32_cstar_target)
335 CFI_STARTPROC32 simple
336 CFI_SIGNAL_FRAME
337 CFI_DEF_CFA rsp,0
338 CFI_REGISTER rip,rcx
339 /*CFI_REGISTER rflags,r11*/
340
341 /*
342 * Interrupts are off on entry.
343 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
344 * it is too small to ever cause noticeable irq latency.
345 */
346 SWAPGS_UNSAFE_STACK
347 movl %esp,%r8d
348 CFI_REGISTER rsp,r8
349 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
350 ENABLE_INTERRUPTS(CLBR_NONE)
351
352 /* Zero-extending 32-bit regs, do not remove */
353 movl %eax,%eax
354
355 /* Construct struct pt_regs on stack */
356 pushq_cfi $__USER32_DS /* pt_regs->ss */
357 pushq_cfi %r8 /* pt_regs->sp */
358 CFI_REL_OFFSET rsp,0
359 pushq_cfi %r11 /* pt_regs->flags */
360 pushq_cfi $__USER32_CS /* pt_regs->cs */
361 pushq_cfi %rcx /* pt_regs->ip */
362 CFI_REL_OFFSET rip,0
363 pushq_cfi_reg rax /* pt_regs->orig_ax */
364 pushq_cfi_reg rdi /* pt_regs->di */
365 pushq_cfi_reg rsi /* pt_regs->si */
366 pushq_cfi_reg rdx /* pt_regs->dx */
367 pushq_cfi_reg rbp /* pt_regs->cx */
368 movl %ebp,%ecx
369 pushq_cfi $-ENOSYS /* pt_regs->ax */
370 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
371 CFI_ADJUST_CFA_OFFSET 10*8
372
373 /*
374 * no need to do an access_ok check here because r8 has been
375 * 32bit zero extended
376 */
377 ASM_STAC
3781: movl (%r8),%r9d
379 _ASM_EXTABLE(1b,ia32_badarg)
380 ASM_CLAC
381 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
382 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
383 CFI_REMEMBER_STATE
384 jnz cstar_tracesys
385cstar_do_call:
386 /* 32bit syscall -> 64bit C ABI argument conversion */
387 movl %edi,%r8d /* arg5 */
388 /* r9 already loaded */ /* arg6 */
389 xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
390 movl %ebx,%edi /* arg1 */
391 movl %edx,%edx /* arg3 (zero extension) */
392cstar_dispatch:
393 cmpq $(IA32_NR_syscalls-1),%rax
394 ja 1f
395 call *ia32_sys_call_table(,%rax,8)
396 movq %rax,RAX(%rsp)
3971:
398 DISABLE_INTERRUPTS(CLBR_NONE)
399 TRACE_IRQS_OFF
400 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
401 jnz sysretl_audit
402sysretl_from_sys_call:
403 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
404 RESTORE_RSI_RDI_RDX
405 movl RIP(%rsp),%ecx
406 CFI_REGISTER rip,rcx
407 movl EFLAGS(%rsp),%r11d
408 /*CFI_REGISTER rflags,r11*/
409 xorq %r10,%r10
410 xorq %r9,%r9
411 xorq %r8,%r8
412 TRACE_IRQS_ON
413 movl RSP(%rsp),%esp
414 CFI_RESTORE rsp
415 /*
416 * 64bit->32bit SYSRET restores eip from ecx,
417 * eflags from r11 (but RF and VM bits are forced to 0),
418 * cs and ss are loaded from MSRs.
419 * (Note: 32bit->32bit SYSRET is different: since r11
420 * does not exist, it merely sets eflags.IF=1).
421 *
422 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
423 * descriptor is not reinitialized. This means that we must
424 * avoid SYSRET with SS == NULL, which could happen if we schedule,
425 * exit the kernel, and re-enter using an interrupt vector. (All
426 * interrupt entries on x86_64 set SS to NULL.) We prevent that
427 * from happening by reloading SS in __switch_to.
428 */
429 USERGS_SYSRET32
430
431#ifdef CONFIG_AUDITSYSCALL
432cstar_auditsys:
433 CFI_RESTORE_STATE
434 movl %r9d,R9(%rsp) /* register to be clobbered by call */
435 auditsys_entry_common
436 movl R9(%rsp),%r9d /* reload 6th syscall arg */
437 jmp cstar_dispatch
438
439sysretl_audit:
440 auditsys_exit sysretl_from_sys_call
441#endif
442
443cstar_tracesys:
444#ifdef CONFIG_AUDITSYSCALL
445 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
446 jz cstar_auditsys
447#endif
448 xchgl %r9d,%ebp
449 SAVE_EXTRA_REGS
450 CLEAR_RREGS r9
451 movq %rsp,%rdi /* &pt_regs -> arg1 */
452 call syscall_trace_enter
453 LOAD_ARGS32 1 /* reload args from stack in case ptrace changed it */
454 RESTORE_EXTRA_REGS
455 xchgl %ebp,%r9d
456 jmp cstar_do_call
457END(ia32_cstar_target)
458
459ia32_badarg:
460 ASM_CLAC
461 movq $-EFAULT,%rax
462 jmp ia32_sysret
463 CFI_ENDPROC
464
465/*
466 * Emulated IA32 system calls via int 0x80.
467 *
468 * Arguments:
469 * eax system call number
470 * ebx arg1
471 * ecx arg2
472 * edx arg3
473 * esi arg4
474 * edi arg5
475 * ebp arg6 (note: not saved in the stack frame, should not be touched)
476 *
477 * Notes:
478 * Uses the same stack frame as the x86-64 version.
479 * All registers except eax must be saved (but ptrace may violate that).
480 * Arguments are zero extended. For system calls that want sign extension and
481 * take long arguments a wrapper is needed. Most calls can just be called
482 * directly.
483 * Assumes it is only called from user space and entered with interrupts off.
484 */
485
486ENTRY(ia32_syscall)
487 CFI_STARTPROC32 simple
488 CFI_SIGNAL_FRAME
489 CFI_DEF_CFA rsp,5*8
490 /*CFI_REL_OFFSET ss,4*8 */
491 CFI_REL_OFFSET rsp,3*8
492 /*CFI_REL_OFFSET rflags,2*8 */
493 /*CFI_REL_OFFSET cs,1*8 */
494 CFI_REL_OFFSET rip,0*8
495
496 /*
497 * Interrupts are off on entry.
498 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
499 * it is too small to ever cause noticeable irq latency.
500 */
501 PARAVIRT_ADJUST_EXCEPTION_FRAME
502 SWAPGS
503 ENABLE_INTERRUPTS(CLBR_NONE)
504
505 /* Zero-extending 32-bit regs, do not remove */
506 movl %eax,%eax
507
508 /* Construct struct pt_regs on stack (iret frame is already on stack) */
509 pushq_cfi_reg rax /* pt_regs->orig_ax */
510 pushq_cfi_reg rdi /* pt_regs->di */
511 pushq_cfi_reg rsi /* pt_regs->si */
512 pushq_cfi_reg rdx /* pt_regs->dx */
513 pushq_cfi_reg rcx /* pt_regs->cx */
514 pushq_cfi $-ENOSYS /* pt_regs->ax */
515 cld
516 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
517 CFI_ADJUST_CFA_OFFSET 10*8
518
519 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
520 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
521 jnz ia32_tracesys
522ia32_do_call:
523 /* 32bit syscall -> 64bit C ABI argument conversion */
524 movl %edi,%r8d /* arg5 */
525 movl %ebp,%r9d /* arg6 */
526 xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
527 movl %ebx,%edi /* arg1 */
528 movl %edx,%edx /* arg3 (zero extension) */
529 cmpq $(IA32_NR_syscalls-1),%rax
530 ja 1f
531 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
532ia32_sysret:
533 movq %rax,RAX(%rsp)
5341:
535ia32_ret_from_sys_call:
536 CLEAR_RREGS
537 jmp int_ret_from_sys_call
538
539ia32_tracesys:
540 SAVE_EXTRA_REGS
541 CLEAR_RREGS
542 movq %rsp,%rdi /* &pt_regs -> arg1 */
543 call syscall_trace_enter
544 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
545 RESTORE_EXTRA_REGS
546 jmp ia32_do_call
547 CFI_ENDPROC
548END(ia32_syscall)
549
550 .macro PTREGSCALL label, func
551 ALIGN
552GLOBAL(\label)
553 leaq \func(%rip),%rax
554 jmp ia32_ptregs_common
555 .endm
556
557 CFI_STARTPROC32
558
559 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
560 PTREGSCALL stub32_sigreturn, sys32_sigreturn
561 PTREGSCALL stub32_fork, sys_fork
562 PTREGSCALL stub32_vfork, sys_vfork
563
564 ALIGN
565GLOBAL(stub32_clone)
566 leaq sys_clone(%rip),%rax
567 mov %r8, %rcx
568 jmp ia32_ptregs_common
569
570 ALIGN
571ia32_ptregs_common:
572 CFI_ENDPROC
573 CFI_STARTPROC32 simple
574 CFI_SIGNAL_FRAME
575 CFI_DEF_CFA rsp,SIZEOF_PTREGS
576 CFI_REL_OFFSET rax,RAX
577 CFI_REL_OFFSET rcx,RCX
578 CFI_REL_OFFSET rdx,RDX
579 CFI_REL_OFFSET rsi,RSI
580 CFI_REL_OFFSET rdi,RDI
581 CFI_REL_OFFSET rip,RIP
582/* CFI_REL_OFFSET cs,CS*/
583/* CFI_REL_OFFSET rflags,EFLAGS*/
584 CFI_REL_OFFSET rsp,RSP
585/* CFI_REL_OFFSET ss,SS*/
586 SAVE_EXTRA_REGS 8
587 call *%rax
588 RESTORE_EXTRA_REGS 8
589 ret
590 CFI_ENDPROC
591END(ia32_ptregs_common)
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
deleted file mode 100644
index de1cdaf4d743..000000000000
--- a/arch/x86/include/asm/dwarf2.h
+++ /dev/null
@@ -1,170 +0,0 @@
1#ifndef _ASM_X86_DWARF2_H
2#define _ASM_X86_DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 * Macros for dwarf2 CFI unwind table entries.
10 * See "as.info" for details on these pseudo ops. Unfortunately
11 * they are only supported in very new binutils, so define them
12 * away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30#define CFI_ESCAPE .cfi_escape
31
32#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
33#define CFI_SIGNAL_FRAME .cfi_signal_frame
34#else
35#define CFI_SIGNAL_FRAME
36#endif
37
38#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
39 /*
40 * Emit CFI data in .debug_frame sections, not .eh_frame sections.
41 * The latter we currently just discard since we don't do DWARF
42 * unwinding at runtime. So only the offline DWARF information is
43 * useful to anyone. Note we should not use this directive if this
44 * file is used in the vDSO assembly, or if vmlinux.lds.S gets
45 * changed so it doesn't discard .eh_frame.
46 */
47 .cfi_sections .debug_frame
48#endif
49
50#else
51
52/*
53 * Due to the structure of pre-exisiting code, don't use assembler line
54 * comment character # to ignore the arguments. Instead, use a dummy macro.
55 */
56.macro cfi_ignore a=0, b=0, c=0, d=0
57.endm
58
59#define CFI_STARTPROC cfi_ignore
60#define CFI_ENDPROC cfi_ignore
61#define CFI_DEF_CFA cfi_ignore
62#define CFI_DEF_CFA_REGISTER cfi_ignore
63#define CFI_DEF_CFA_OFFSET cfi_ignore
64#define CFI_ADJUST_CFA_OFFSET cfi_ignore
65#define CFI_OFFSET cfi_ignore
66#define CFI_REL_OFFSET cfi_ignore
67#define CFI_REGISTER cfi_ignore
68#define CFI_RESTORE cfi_ignore
69#define CFI_REMEMBER_STATE cfi_ignore
70#define CFI_RESTORE_STATE cfi_ignore
71#define CFI_UNDEFINED cfi_ignore
72#define CFI_ESCAPE cfi_ignore
73#define CFI_SIGNAL_FRAME cfi_ignore
74
75#endif
76
77/*
78 * An attempt to make CFI annotations more or less
79 * correct and shorter. It is implied that you know
80 * what you're doing if you use them.
81 */
82#ifdef __ASSEMBLY__
83#ifdef CONFIG_X86_64
84 .macro pushq_cfi reg
85 pushq \reg
86 CFI_ADJUST_CFA_OFFSET 8
87 .endm
88
89 .macro pushq_cfi_reg reg
90 pushq %\reg
91 CFI_ADJUST_CFA_OFFSET 8
92 CFI_REL_OFFSET \reg, 0
93 .endm
94
95 .macro popq_cfi reg
96 popq \reg
97 CFI_ADJUST_CFA_OFFSET -8
98 .endm
99
100 .macro popq_cfi_reg reg
101 popq %\reg
102 CFI_ADJUST_CFA_OFFSET -8
103 CFI_RESTORE \reg
104 .endm
105
106 .macro pushfq_cfi
107 pushfq
108 CFI_ADJUST_CFA_OFFSET 8
109 .endm
110
111 .macro popfq_cfi
112 popfq
113 CFI_ADJUST_CFA_OFFSET -8
114 .endm
115
116 .macro movq_cfi reg offset=0
117 movq %\reg, \offset(%rsp)
118 CFI_REL_OFFSET \reg, \offset
119 .endm
120
121 .macro movq_cfi_restore offset reg
122 movq \offset(%rsp), %\reg
123 CFI_RESTORE \reg
124 .endm
125#else /*!CONFIG_X86_64*/
126 .macro pushl_cfi reg
127 pushl \reg
128 CFI_ADJUST_CFA_OFFSET 4
129 .endm
130
131 .macro pushl_cfi_reg reg
132 pushl %\reg
133 CFI_ADJUST_CFA_OFFSET 4
134 CFI_REL_OFFSET \reg, 0
135 .endm
136
137 .macro popl_cfi reg
138 popl \reg
139 CFI_ADJUST_CFA_OFFSET -4
140 .endm
141
142 .macro popl_cfi_reg reg
143 popl %\reg
144 CFI_ADJUST_CFA_OFFSET -4
145 CFI_RESTORE \reg
146 .endm
147
148 .macro pushfl_cfi
149 pushfl
150 CFI_ADJUST_CFA_OFFSET 4
151 .endm
152
153 .macro popfl_cfi
154 popfl
155 CFI_ADJUST_CFA_OFFSET -4
156 .endm
157
158 .macro movl_cfi reg offset=0
159 movl %\reg, \offset(%esp)
160 CFI_REL_OFFSET \reg, \offset
161 .endm
162
163 .macro movl_cfi_restore offset reg
164 movl \offset(%esp), %\reg
165 CFI_RESTORE \reg
166 .endm
167#endif /*!CONFIG_X86_64*/
168#endif /*__ASSEMBLY__*/
169
170#endif /* _ASM_X86_DWARF2_H */
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 3b629f47eb65..793179cf8e21 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -1,20 +1,17 @@
1#ifdef __ASSEMBLY__ 1#ifdef __ASSEMBLY__
2 2
3#include <asm/asm.h> 3#include <asm/asm.h>
4#include <asm/dwarf2.h>
5 4
6/* The annotation hides the frame from the unwinder and makes it look 5/* The annotation hides the frame from the unwinder and makes it look
7 like a ordinary ebp save/restore. This avoids some special cases for 6 like a ordinary ebp save/restore. This avoids some special cases for
8 frame pointer later */ 7 frame pointer later */
9#ifdef CONFIG_FRAME_POINTER 8#ifdef CONFIG_FRAME_POINTER
10 .macro FRAME 9 .macro FRAME
11 __ASM_SIZE(push,_cfi) %__ASM_REG(bp) 10 __ASM_SIZE(push,) %__ASM_REG(bp)
12 CFI_REL_OFFSET __ASM_REG(bp), 0
13 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) 11 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
14 .endm 12 .endm
15 .macro ENDFRAME 13 .macro ENDFRAME
16 __ASM_SIZE(pop,_cfi) %__ASM_REG(bp) 14 __ASM_SIZE(pop,) %__ASM_REG(bp)
17 CFI_RESTORE __ASM_REG(bp)
18 .endm 15 .endm
19#else 16#else
20 .macro FRAME 17 .macro FRAME
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 441ecf83d81a..e6a707eb5081 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -206,8 +206,13 @@ do { \
206 206
207#endif /* !CONFIG_PARAVIRT */ 207#endif /* !CONFIG_PARAVIRT */
208 208
209#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ 209/*
210 (u32)((val) >> 32)) 210 * 64-bit version of wrmsr_safe():
211 */
212static inline int wrmsrl_safe(u32 msr, u64 val)
213{
214 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
215}
211 216
212#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) 217#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
213 218
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index a90f8972dad5..a4a77286cb1d 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -5,12 +5,14 @@
5 5
6/* misc architecture specific prototypes */ 6/* misc architecture specific prototypes */
7 7
8void system_call(void);
9void syscall_init(void); 8void syscall_init(void);
10 9
11void ia32_syscall(void); 10void entry_SYSCALL_64(void);
12void ia32_cstar_target(void); 11void entry_SYSCALL_compat(void);
13void ia32_sysenter_target(void); 12void entry_INT80_32(void);
13void entry_INT80_compat(void);
14void entry_SYSENTER_32(void);
15void entry_SYSENTER_compat(void);
14 16
15void x86_configure_nx(void); 17void x86_configure_nx(void);
16void x86_report_nx(void); 18void x86_report_nx(void);
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 5a9856eb12ba..7d5a1929d76b 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -231,11 +231,21 @@
231#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) 231#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
232 232
233#ifdef __KERNEL__ 233#ifdef __KERNEL__
234
235/*
236 * early_idt_handler_array is an array of entry points referenced in the
237 * early IDT. For simplicity, it's a real array with one entry point
238 * every nine bytes. That leaves room for an optional 'push $0' if the
239 * vector has no error code (two bytes), a 'push $vector_number' (two
240 * bytes), and a jump to the common entry code (up to five bytes).
241 */
242#define EARLY_IDT_HANDLER_SIZE 9
243
234#ifndef __ASSEMBLY__ 244#ifndef __ASSEMBLY__
235 245
236extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; 246extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
237#ifdef CONFIG_TRACING 247#ifdef CONFIG_TRACING
238# define trace_early_idt_handlers early_idt_handlers 248# define trace_early_idt_handler_array early_idt_handler_array
239#endif 249#endif
240 250
241/* 251/*
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 9bcd0b56ca17..01663ee5f1b7 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -22,7 +22,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
22 22
23CFLAGS_irq.o := -I$(src)/../include/asm/trace 23CFLAGS_irq.o := -I$(src)/../include/asm/trace
24 24
25obj-y := process_$(BITS).o signal.o entry_$(BITS).o 25obj-y := process_$(BITS).o signal.o
26obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 26obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
27obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o 27obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
28obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o 28obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
@@ -31,9 +31,6 @@ obj-y += probe_roms.o
31obj-$(CONFIG_X86_32) += i386_ksyms_32.o 31obj-$(CONFIG_X86_32) += i386_ksyms_32.o
32obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 32obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
33obj-$(CONFIG_X86_64) += mcount_64.o 33obj-$(CONFIG_X86_64) += mcount_64.o
34obj-y += syscall_$(BITS).o vsyscall_gtod.o
35obj-$(CONFIG_IA32_EMULATION) += syscall_32.o
36obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
37obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o 34obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
38obj-$(CONFIG_SYSFS) += ksysfs.o 35obj-$(CONFIG_SYSFS) += ksysfs.o
39obj-y += bootflag.o e820.o 36obj-y += bootflag.o e820.o
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index dcaab87da629..599afcf0005f 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -66,7 +66,7 @@ int main(void)
66 DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1); 66 DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
67 DEFINE(NR_syscalls, sizeof(syscalls_64)); 67 DEFINE(NR_syscalls, sizeof(syscalls_64));
68 68
69 DEFINE(__NR_ia32_syscall_max, sizeof(syscalls_ia32) - 1); 69 DEFINE(__NR_entry_INT80_compat_max, sizeof(syscalls_ia32) - 1);
70 DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32)); 70 DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));
71 71
72 return 0; 72 return 0;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b6fe2e47f7f1..cc7f753e571d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1026,7 +1026,7 @@ void enable_sep_cpu(void)
1026 (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), 1026 (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
1027 0); 1027 0);
1028 1028
1029 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0); 1029 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1030 1030
1031out: 1031out:
1032 put_cpu(); 1032 put_cpu();
@@ -1204,10 +1204,10 @@ void syscall_init(void)
1204 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. 1204 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1205 */ 1205 */
1206 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); 1206 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
1207 wrmsrl(MSR_LSTAR, system_call); 1207 wrmsrl(MSR_LSTAR, entry_SYSCALL_64);
1208 1208
1209#ifdef CONFIG_IA32_EMULATION 1209#ifdef CONFIG_IA32_EMULATION
1210 wrmsrl(MSR_CSTAR, ia32_cstar_target); 1210 wrmsrl(MSR_CSTAR, entry_SYSCALL_compat);
1211 /* 1211 /*
1212 * This only works on Intel CPUs. 1212 * This only works on Intel CPUs.
1213 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 1213 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
@@ -1216,7 +1216,7 @@ void syscall_init(void)
1216 */ 1216 */
1217 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 1217 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1218 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1218 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1219 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); 1219 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1220#else 1220#else
1221 wrmsrl(MSR_CSTAR, ignore_sysret); 1221 wrmsrl(MSR_CSTAR, ignore_sysret);
1222 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 1222 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
deleted file mode 100644
index 1c309763e321..000000000000
--- a/arch/x86/kernel/entry_32.S
+++ /dev/null
@@ -1,1401 +0,0 @@
1/*
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 */
5
6/*
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
10 *
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 *
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
15 * on a 486.
16 *
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
22 *
23 * 0(%esp) - %ebx
24 * 4(%esp) - %ecx
25 * 8(%esp) - %edx
26 * C(%esp) - %esi
27 * 10(%esp) - %edi
28 * 14(%esp) - %ebp
29 * 18(%esp) - %eax
30 * 1C(%esp) - %ds
31 * 20(%esp) - %es
32 * 24(%esp) - %fs
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %eip
36 * 34(%esp) - %cs
37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
44#include <linux/linkage.h>
45#include <linux/err.h>
46#include <asm/thread_info.h>
47#include <asm/irqflags.h>
48#include <asm/errno.h>
49#include <asm/segment.h>
50#include <asm/smp.h>
51#include <asm/page_types.h>
52#include <asm/percpu.h>
53#include <asm/dwarf2.h>
54#include <asm/processor-flags.h>
55#include <asm/ftrace.h>
56#include <asm/irq_vectors.h>
57#include <asm/cpufeature.h>
58#include <asm/alternative-asm.h>
59#include <asm/asm.h>
60#include <asm/smap.h>
61
62/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
63#include <linux/elf-em.h>
64#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
65#define __AUDIT_ARCH_LE 0x40000000
66
67#ifndef CONFIG_AUDITSYSCALL
68#define sysenter_audit syscall_trace_entry
69#define sysexit_audit syscall_exit_work
70#endif
71
72 .section .entry.text, "ax"
73
74/*
75 * We use macros for low-level operations which need to be overridden
76 * for paravirtualization. The following will never clobber any registers:
77 * INTERRUPT_RETURN (aka. "iret")
78 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
79 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
80 *
81 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
82 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
83 * Allowing a register to be clobbered can shrink the paravirt replacement
84 * enough to patch inline, increasing performance.
85 */
86
87#ifdef CONFIG_PREEMPT
88#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
89#else
90#define preempt_stop(clobbers)
91#define resume_kernel restore_all
92#endif
93
94.macro TRACE_IRQS_IRET
95#ifdef CONFIG_TRACE_IRQFLAGS
96 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
97 jz 1f
98 TRACE_IRQS_ON
991:
100#endif
101.endm
102
103/*
104 * User gs save/restore
105 *
106 * %gs is used for userland TLS and kernel only uses it for stack
107 * canary which is required to be at %gs:20 by gcc. Read the comment
108 * at the top of stackprotector.h for more info.
109 *
110 * Local labels 98 and 99 are used.
111 */
112#ifdef CONFIG_X86_32_LAZY_GS
113
114 /* unfortunately push/pop can't be no-op */
115.macro PUSH_GS
116 pushl_cfi $0
117.endm
118.macro POP_GS pop=0
119 addl $(4 + \pop), %esp
120 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
121.endm
122.macro POP_GS_EX
123.endm
124
125 /* all the rest are no-op */
126.macro PTGS_TO_GS
127.endm
128.macro PTGS_TO_GS_EX
129.endm
130.macro GS_TO_REG reg
131.endm
132.macro REG_TO_PTGS reg
133.endm
134.macro SET_KERNEL_GS reg
135.endm
136
137#else /* CONFIG_X86_32_LAZY_GS */
138
139.macro PUSH_GS
140 pushl_cfi %gs
141 /*CFI_REL_OFFSET gs, 0*/
142.endm
143
144.macro POP_GS pop=0
14598: popl_cfi %gs
146 /*CFI_RESTORE gs*/
147 .if \pop <> 0
148 add $\pop, %esp
149 CFI_ADJUST_CFA_OFFSET -\pop
150 .endif
151.endm
152.macro POP_GS_EX
153.pushsection .fixup, "ax"
15499: movl $0, (%esp)
155 jmp 98b
156.popsection
157 _ASM_EXTABLE(98b,99b)
158.endm
159
160.macro PTGS_TO_GS
16198: mov PT_GS(%esp), %gs
162.endm
163.macro PTGS_TO_GS_EX
164.pushsection .fixup, "ax"
16599: movl $0, PT_GS(%esp)
166 jmp 98b
167.popsection
168 _ASM_EXTABLE(98b,99b)
169.endm
170
171.macro GS_TO_REG reg
172 movl %gs, \reg
173 /*CFI_REGISTER gs, \reg*/
174.endm
175.macro REG_TO_PTGS reg
176 movl \reg, PT_GS(%esp)
177 /*CFI_REL_OFFSET gs, PT_GS*/
178.endm
179.macro SET_KERNEL_GS reg
180 movl $(__KERNEL_STACK_CANARY), \reg
181 movl \reg, %gs
182.endm
183
184#endif /* CONFIG_X86_32_LAZY_GS */
185
186.macro SAVE_ALL
187 cld
188 PUSH_GS
189 pushl_cfi %fs
190 /*CFI_REL_OFFSET fs, 0;*/
191 pushl_cfi %es
192 /*CFI_REL_OFFSET es, 0;*/
193 pushl_cfi %ds
194 /*CFI_REL_OFFSET ds, 0;*/
195 pushl_cfi %eax
196 CFI_REL_OFFSET eax, 0
197 pushl_cfi %ebp
198 CFI_REL_OFFSET ebp, 0
199 pushl_cfi %edi
200 CFI_REL_OFFSET edi, 0
201 pushl_cfi %esi
202 CFI_REL_OFFSET esi, 0
203 pushl_cfi %edx
204 CFI_REL_OFFSET edx, 0
205 pushl_cfi %ecx
206 CFI_REL_OFFSET ecx, 0
207 pushl_cfi %ebx
208 CFI_REL_OFFSET ebx, 0
209 movl $(__USER_DS), %edx
210 movl %edx, %ds
211 movl %edx, %es
212 movl $(__KERNEL_PERCPU), %edx
213 movl %edx, %fs
214 SET_KERNEL_GS %edx
215.endm
216
217.macro RESTORE_INT_REGS
218 popl_cfi %ebx
219 CFI_RESTORE ebx
220 popl_cfi %ecx
221 CFI_RESTORE ecx
222 popl_cfi %edx
223 CFI_RESTORE edx
224 popl_cfi %esi
225 CFI_RESTORE esi
226 popl_cfi %edi
227 CFI_RESTORE edi
228 popl_cfi %ebp
229 CFI_RESTORE ebp
230 popl_cfi %eax
231 CFI_RESTORE eax
232.endm
233
234.macro RESTORE_REGS pop=0
235 RESTORE_INT_REGS
2361: popl_cfi %ds
237 /*CFI_RESTORE ds;*/
2382: popl_cfi %es
239 /*CFI_RESTORE es;*/
2403: popl_cfi %fs
241 /*CFI_RESTORE fs;*/
242 POP_GS \pop
243.pushsection .fixup, "ax"
2444: movl $0, (%esp)
245 jmp 1b
2465: movl $0, (%esp)
247 jmp 2b
2486: movl $0, (%esp)
249 jmp 3b
250.popsection
251 _ASM_EXTABLE(1b,4b)
252 _ASM_EXTABLE(2b,5b)
253 _ASM_EXTABLE(3b,6b)
254 POP_GS_EX
255.endm
256
257.macro RING0_INT_FRAME
258 CFI_STARTPROC simple
259 CFI_SIGNAL_FRAME
260 CFI_DEF_CFA esp, 3*4
261 /*CFI_OFFSET cs, -2*4;*/
262 CFI_OFFSET eip, -3*4
263.endm
264
265.macro RING0_EC_FRAME
266 CFI_STARTPROC simple
267 CFI_SIGNAL_FRAME
268 CFI_DEF_CFA esp, 4*4
269 /*CFI_OFFSET cs, -2*4;*/
270 CFI_OFFSET eip, -3*4
271.endm
272
273.macro RING0_PTREGS_FRAME
274 CFI_STARTPROC simple
275 CFI_SIGNAL_FRAME
276 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
277 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
278 CFI_OFFSET eip, PT_EIP-PT_OLDESP
279 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
280 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
281 CFI_OFFSET eax, PT_EAX-PT_OLDESP
282 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
283 CFI_OFFSET edi, PT_EDI-PT_OLDESP
284 CFI_OFFSET esi, PT_ESI-PT_OLDESP
285 CFI_OFFSET edx, PT_EDX-PT_OLDESP
286 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
287 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
288.endm
289
290ENTRY(ret_from_fork)
291 CFI_STARTPROC
292 pushl_cfi %eax
293 call schedule_tail
294 GET_THREAD_INFO(%ebp)
295 popl_cfi %eax
296 pushl_cfi $0x0202 # Reset kernel eflags
297 popfl_cfi
298 jmp syscall_exit
299 CFI_ENDPROC
300END(ret_from_fork)
301
302ENTRY(ret_from_kernel_thread)
303 CFI_STARTPROC
304 pushl_cfi %eax
305 call schedule_tail
306 GET_THREAD_INFO(%ebp)
307 popl_cfi %eax
308 pushl_cfi $0x0202 # Reset kernel eflags
309 popfl_cfi
310 movl PT_EBP(%esp),%eax
311 call *PT_EBX(%esp)
312 movl $0,PT_EAX(%esp)
313 jmp syscall_exit
314 CFI_ENDPROC
315ENDPROC(ret_from_kernel_thread)
316
317/*
318 * Return to user mode is not as complex as all this looks,
319 * but we want the default path for a system call return to
320 * go as quickly as possible which is why some of this is
321 * less clear than it otherwise should be.
322 */
323
324 # userspace resumption stub bypassing syscall exit tracing
325 ALIGN
326 RING0_PTREGS_FRAME
327ret_from_exception:
328 preempt_stop(CLBR_ANY)
329ret_from_intr:
330 GET_THREAD_INFO(%ebp)
331#ifdef CONFIG_VM86
332 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
333 movb PT_CS(%esp), %al
334 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
335#else
336 /*
337 * We can be coming here from child spawned by kernel_thread().
338 */
339 movl PT_CS(%esp), %eax
340 andl $SEGMENT_RPL_MASK, %eax
341#endif
342 cmpl $USER_RPL, %eax
343 jb resume_kernel # not returning to v8086 or userspace
344
345ENTRY(resume_userspace)
346 LOCKDEP_SYS_EXIT
347 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
348 # setting need_resched or sigpending
349 # between sampling and the iret
350 TRACE_IRQS_OFF
351 movl TI_flags(%ebp), %ecx
352 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
353 # int/exception return?
354 jne work_pending
355 jmp restore_all
356END(ret_from_exception)
357
358#ifdef CONFIG_PREEMPT
359ENTRY(resume_kernel)
360 DISABLE_INTERRUPTS(CLBR_ANY)
361need_resched:
362 cmpl $0,PER_CPU_VAR(__preempt_count)
363 jnz restore_all
364 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
365 jz restore_all
366 call preempt_schedule_irq
367 jmp need_resched
368END(resume_kernel)
369#endif
370 CFI_ENDPROC
371
372/* SYSENTER_RETURN points to after the "sysenter" instruction in
373 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
374
375 # sysenter call handler stub
376ENTRY(ia32_sysenter_target)
377 CFI_STARTPROC simple
378 CFI_SIGNAL_FRAME
379 CFI_DEF_CFA esp, 0
380 CFI_REGISTER esp, ebp
381 movl TSS_sysenter_sp0(%esp),%esp
382sysenter_past_esp:
383 /*
384 * Interrupts are disabled here, but we can't trace it until
385 * enough kernel state to call TRACE_IRQS_OFF can be called - but
386 * we immediately enable interrupts at that point anyway.
387 */
388 pushl_cfi $__USER_DS
389 /*CFI_REL_OFFSET ss, 0*/
390 pushl_cfi %ebp
391 CFI_REL_OFFSET esp, 0
392 pushfl_cfi
393 orl $X86_EFLAGS_IF, (%esp)
394 pushl_cfi $__USER_CS
395 /*CFI_REL_OFFSET cs, 0*/
396 /*
397 * Push current_thread_info()->sysenter_return to the stack.
398 * A tiny bit of offset fixup is necessary: TI_sysenter_return
399 * is relative to thread_info, which is at the bottom of the
400 * kernel stack page. 4*4 means the 4 words pushed above;
401 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
402 * and THREAD_SIZE takes us to the bottom.
403 */
404 pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
405 CFI_REL_OFFSET eip, 0
406
407 pushl_cfi %eax
408 SAVE_ALL
409 ENABLE_INTERRUPTS(CLBR_NONE)
410
411/*
412 * Load the potential sixth argument from user stack.
413 * Careful about security.
414 */
415 cmpl $__PAGE_OFFSET-3,%ebp
416 jae syscall_fault
417 ASM_STAC
4181: movl (%ebp),%ebp
419 ASM_CLAC
420 movl %ebp,PT_EBP(%esp)
421 _ASM_EXTABLE(1b,syscall_fault)
422
423 GET_THREAD_INFO(%ebp)
424
425 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
426 jnz sysenter_audit
427sysenter_do_call:
428 cmpl $(NR_syscalls), %eax
429 jae sysenter_badsys
430 call *sys_call_table(,%eax,4)
431sysenter_after_call:
432 movl %eax,PT_EAX(%esp)
433 LOCKDEP_SYS_EXIT
434 DISABLE_INTERRUPTS(CLBR_ANY)
435 TRACE_IRQS_OFF
436 movl TI_flags(%ebp), %ecx
437 testl $_TIF_ALLWORK_MASK, %ecx
438 jnz sysexit_audit
439sysenter_exit:
440/* if something modifies registers it must also disable sysexit */
441 movl PT_EIP(%esp), %edx
442 movl PT_OLDESP(%esp), %ecx
443 xorl %ebp,%ebp
444 TRACE_IRQS_ON
4451: mov PT_FS(%esp), %fs
446 PTGS_TO_GS
447 ENABLE_INTERRUPTS_SYSEXIT
448
449#ifdef CONFIG_AUDITSYSCALL
450sysenter_audit:
451 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
452 jnz syscall_trace_entry
453 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
454 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
455 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
456 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
457 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
458 call __audit_syscall_entry
459 popl_cfi %ecx /* get that remapped edx off the stack */
460 popl_cfi %ecx /* get that remapped esi off the stack */
461 movl PT_EAX(%esp),%eax /* reload syscall number */
462 jmp sysenter_do_call
463
464sysexit_audit:
465 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
466 jnz syscall_exit_work
467 TRACE_IRQS_ON
468 ENABLE_INTERRUPTS(CLBR_ANY)
469 movl %eax,%edx /* second arg, syscall return value */
470 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
471 setbe %al /* 1 if so, 0 if not */
472 movzbl %al,%eax /* zero-extend that */
473 call __audit_syscall_exit
474 DISABLE_INTERRUPTS(CLBR_ANY)
475 TRACE_IRQS_OFF
476 movl TI_flags(%ebp), %ecx
477 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
478 jnz syscall_exit_work
479 movl PT_EAX(%esp),%eax /* reload syscall return value */
480 jmp sysenter_exit
481#endif
482
483 CFI_ENDPROC
484.pushsection .fixup,"ax"
4852: movl $0,PT_FS(%esp)
486 jmp 1b
487.popsection
488 _ASM_EXTABLE(1b,2b)
489 PTGS_TO_GS_EX
490ENDPROC(ia32_sysenter_target)
491
492 # system call handler stub
493ENTRY(system_call)
494 RING0_INT_FRAME # can't unwind into user space anyway
495 ASM_CLAC
496 pushl_cfi %eax # save orig_eax
497 SAVE_ALL
498 GET_THREAD_INFO(%ebp)
499 # system call tracing in operation / emulation
500 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
501 jnz syscall_trace_entry
502 cmpl $(NR_syscalls), %eax
503 jae syscall_badsys
504syscall_call:
505 call *sys_call_table(,%eax,4)
506syscall_after_call:
507 movl %eax,PT_EAX(%esp) # store the return value
508syscall_exit:
509 LOCKDEP_SYS_EXIT
510 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
511 # setting need_resched or sigpending
512 # between sampling and the iret
513 TRACE_IRQS_OFF
514 movl TI_flags(%ebp), %ecx
515 testl $_TIF_ALLWORK_MASK, %ecx # current->work
516 jnz syscall_exit_work
517
518restore_all:
519 TRACE_IRQS_IRET
520restore_all_notrace:
521#ifdef CONFIG_X86_ESPFIX32
522 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
523 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
524 # are returning to the kernel.
525 # See comments in process.c:copy_thread() for details.
526 movb PT_OLDSS(%esp), %ah
527 movb PT_CS(%esp), %al
528 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
529 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
530 CFI_REMEMBER_STATE
531 je ldt_ss # returning to user-space with LDT SS
532#endif
533restore_nocheck:
534 RESTORE_REGS 4 # skip orig_eax/error_code
535irq_return:
536 INTERRUPT_RETURN
537.section .fixup,"ax"
538ENTRY(iret_exc)
539 pushl $0 # no error code
540 pushl $do_iret_error
541 jmp error_code
542.previous
543 _ASM_EXTABLE(irq_return,iret_exc)
544
545#ifdef CONFIG_X86_ESPFIX32
546 CFI_RESTORE_STATE
547ldt_ss:
548#ifdef CONFIG_PARAVIRT
549 /*
550 * The kernel can't run on a non-flat stack if paravirt mode
551 * is active. Rather than try to fixup the high bits of
552 * ESP, bypass this code entirely. This may break DOSemu
553 * and/or Wine support in a paravirt VM, although the option
554 * is still available to implement the setting of the high
555 * 16-bits in the INTERRUPT_RETURN paravirt-op.
556 */
557 cmpl $0, pv_info+PARAVIRT_enabled
558 jne restore_nocheck
559#endif
560
561/*
562 * Setup and switch to ESPFIX stack
563 *
564 * We're returning to userspace with a 16 bit stack. The CPU will not
565 * restore the high word of ESP for us on executing iret... This is an
566 * "official" bug of all the x86-compatible CPUs, which we can work
567 * around to make dosemu and wine happy. We do this by preloading the
568 * high word of ESP with the high word of the userspace ESP while
569 * compensating for the offset by changing to the ESPFIX segment with
570 * a base address that matches for the difference.
571 */
572#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
573 mov %esp, %edx /* load kernel esp */
574 mov PT_OLDESP(%esp), %eax /* load userspace esp */
575 mov %dx, %ax /* eax: new kernel esp */
576 sub %eax, %edx /* offset (low word is 0) */
577 shr $16, %edx
578 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
579 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
580 pushl_cfi $__ESPFIX_SS
581 pushl_cfi %eax /* new kernel esp */
582 /* Disable interrupts, but do not irqtrace this section: we
583 * will soon execute iret and the tracer was already set to
584 * the irqstate after the iret */
585 DISABLE_INTERRUPTS(CLBR_EAX)
586 lss (%esp), %esp /* switch to espfix segment */
587 CFI_ADJUST_CFA_OFFSET -8
588 jmp restore_nocheck
589#endif
590 CFI_ENDPROC
591ENDPROC(system_call)
592
593 # perform work that needs to be done immediately before resumption
594 ALIGN
595 RING0_PTREGS_FRAME # can't unwind into user space anyway
596work_pending:
597 testb $_TIF_NEED_RESCHED, %cl
598 jz work_notifysig
599work_resched:
600 call schedule
601 LOCKDEP_SYS_EXIT
602 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
603 # setting need_resched or sigpending
604 # between sampling and the iret
605 TRACE_IRQS_OFF
606 movl TI_flags(%ebp), %ecx
607 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
608 # than syscall tracing?
609 jz restore_all
610 testb $_TIF_NEED_RESCHED, %cl
611 jnz work_resched
612
613work_notifysig: # deal with pending signals and
614 # notify-resume requests
615#ifdef CONFIG_VM86
616 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
617 movl %esp, %eax
618 jnz work_notifysig_v86 # returning to kernel-space or
619 # vm86-space
6201:
621#else
622 movl %esp, %eax
623#endif
624 TRACE_IRQS_ON
625 ENABLE_INTERRUPTS(CLBR_NONE)
626 movb PT_CS(%esp), %bl
627 andb $SEGMENT_RPL_MASK, %bl
628 cmpb $USER_RPL, %bl
629 jb resume_kernel
630 xorl %edx, %edx
631 call do_notify_resume
632 jmp resume_userspace
633
634#ifdef CONFIG_VM86
635 ALIGN
636work_notifysig_v86:
637 pushl_cfi %ecx # save ti_flags for do_notify_resume
638 call save_v86_state # %eax contains pt_regs pointer
639 popl_cfi %ecx
640 movl %eax, %esp
641 jmp 1b
642#endif
643END(work_pending)
644
645 # perform syscall exit tracing
646 ALIGN
647syscall_trace_entry:
648 movl $-ENOSYS,PT_EAX(%esp)
649 movl %esp, %eax
650 call syscall_trace_enter
651 /* What it returned is what we'll actually use. */
652 cmpl $(NR_syscalls), %eax
653 jnae syscall_call
654 jmp syscall_exit
655END(syscall_trace_entry)
656
657 # perform syscall exit tracing
658 ALIGN
659syscall_exit_work:
660 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
661 jz work_pending
662 TRACE_IRQS_ON
663 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
664 # schedule() instead
665 movl %esp, %eax
666 call syscall_trace_leave
667 jmp resume_userspace
668END(syscall_exit_work)
669 CFI_ENDPROC
670
671 RING0_INT_FRAME # can't unwind into user space anyway
672syscall_fault:
673 ASM_CLAC
674 GET_THREAD_INFO(%ebp)
675 movl $-EFAULT,PT_EAX(%esp)
676 jmp resume_userspace
677END(syscall_fault)
678
679syscall_badsys:
680 movl $-ENOSYS,%eax
681 jmp syscall_after_call
682END(syscall_badsys)
683
684sysenter_badsys:
685 movl $-ENOSYS,%eax
686 jmp sysenter_after_call
687END(sysenter_badsys)
688 CFI_ENDPROC
689
690.macro FIXUP_ESPFIX_STACK
691/*
692 * Switch back for ESPFIX stack to the normal zerobased stack
693 *
694 * We can't call C functions using the ESPFIX stack. This code reads
695 * the high word of the segment base from the GDT and swiches to the
696 * normal stack and adjusts ESP with the matching offset.
697 */
698#ifdef CONFIG_X86_ESPFIX32
699 /* fixup the stack */
700 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
701 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
702 shl $16, %eax
703 addl %esp, %eax /* the adjusted stack pointer */
704 pushl_cfi $__KERNEL_DS
705 pushl_cfi %eax
706 lss (%esp), %esp /* switch to the normal stack segment */
707 CFI_ADJUST_CFA_OFFSET -8
708#endif
709.endm
710.macro UNWIND_ESPFIX_STACK
711#ifdef CONFIG_X86_ESPFIX32
712 movl %ss, %eax
713 /* see if on espfix stack */
714 cmpw $__ESPFIX_SS, %ax
715 jne 27f
716 movl $__KERNEL_DS, %eax
717 movl %eax, %ds
718 movl %eax, %es
719 /* switch to normal stack */
720 FIXUP_ESPFIX_STACK
72127:
722#endif
723.endm
724
725/*
726 * Build the entry stubs with some assembler magic.
727 * We pack 1 stub into every 8-byte block.
728 */
729 .align 8
730ENTRY(irq_entries_start)
731 RING0_INT_FRAME
732 vector=FIRST_EXTERNAL_VECTOR
733 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
734 pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
735 vector=vector+1
736 jmp common_interrupt
737 CFI_ADJUST_CFA_OFFSET -4
738 .align 8
739 .endr
740END(irq_entries_start)
741
742/*
743 * the CPU automatically disables interrupts when executing an IRQ vector,
744 * so IRQ-flags tracing has to follow that:
745 */
746 .p2align CONFIG_X86_L1_CACHE_SHIFT
747common_interrupt:
748 ASM_CLAC
749 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
750 SAVE_ALL
751 TRACE_IRQS_OFF
752 movl %esp,%eax
753 call do_IRQ
754 jmp ret_from_intr
755ENDPROC(common_interrupt)
756 CFI_ENDPROC
757
758#define BUILD_INTERRUPT3(name, nr, fn) \
759ENTRY(name) \
760 RING0_INT_FRAME; \
761 ASM_CLAC; \
762 pushl_cfi $~(nr); \
763 SAVE_ALL; \
764 TRACE_IRQS_OFF \
765 movl %esp,%eax; \
766 call fn; \
767 jmp ret_from_intr; \
768 CFI_ENDPROC; \
769ENDPROC(name)
770
771
772#ifdef CONFIG_TRACING
773#define TRACE_BUILD_INTERRUPT(name, nr) \
774 BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
775#else
776#define TRACE_BUILD_INTERRUPT(name, nr)
777#endif
778
779#define BUILD_INTERRUPT(name, nr) \
780 BUILD_INTERRUPT3(name, nr, smp_##name); \
781 TRACE_BUILD_INTERRUPT(name, nr)
782
783/* The include is where all of the SMP etc. interrupts come from */
784#include <asm/entry_arch.h>
785
786ENTRY(coprocessor_error)
787 RING0_INT_FRAME
788 ASM_CLAC
789 pushl_cfi $0
790 pushl_cfi $do_coprocessor_error
791 jmp error_code
792 CFI_ENDPROC
793END(coprocessor_error)
794
795ENTRY(simd_coprocessor_error)
796 RING0_INT_FRAME
797 ASM_CLAC
798 pushl_cfi $0
799#ifdef CONFIG_X86_INVD_BUG
800 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
801 ALTERNATIVE "pushl_cfi $do_general_protection", \
802 "pushl $do_simd_coprocessor_error", \
803 X86_FEATURE_XMM
804#else
805 pushl_cfi $do_simd_coprocessor_error
806#endif
807 jmp error_code
808 CFI_ENDPROC
809END(simd_coprocessor_error)
810
811ENTRY(device_not_available)
812 RING0_INT_FRAME
813 ASM_CLAC
814 pushl_cfi $-1 # mark this as an int
815 pushl_cfi $do_device_not_available
816 jmp error_code
817 CFI_ENDPROC
818END(device_not_available)
819
820#ifdef CONFIG_PARAVIRT
821ENTRY(native_iret)
822 iret
823 _ASM_EXTABLE(native_iret, iret_exc)
824END(native_iret)
825
826ENTRY(native_irq_enable_sysexit)
827 sti
828 sysexit
829END(native_irq_enable_sysexit)
830#endif
831
832ENTRY(overflow)
833 RING0_INT_FRAME
834 ASM_CLAC
835 pushl_cfi $0
836 pushl_cfi $do_overflow
837 jmp error_code
838 CFI_ENDPROC
839END(overflow)
840
841ENTRY(bounds)
842 RING0_INT_FRAME
843 ASM_CLAC
844 pushl_cfi $0
845 pushl_cfi $do_bounds
846 jmp error_code
847 CFI_ENDPROC
848END(bounds)
849
850ENTRY(invalid_op)
851 RING0_INT_FRAME
852 ASM_CLAC
853 pushl_cfi $0
854 pushl_cfi $do_invalid_op
855 jmp error_code
856 CFI_ENDPROC
857END(invalid_op)
858
859ENTRY(coprocessor_segment_overrun)
860 RING0_INT_FRAME
861 ASM_CLAC
862 pushl_cfi $0
863 pushl_cfi $do_coprocessor_segment_overrun
864 jmp error_code
865 CFI_ENDPROC
866END(coprocessor_segment_overrun)
867
868ENTRY(invalid_TSS)
869 RING0_EC_FRAME
870 ASM_CLAC
871 pushl_cfi $do_invalid_TSS
872 jmp error_code
873 CFI_ENDPROC
874END(invalid_TSS)
875
876ENTRY(segment_not_present)
877 RING0_EC_FRAME
878 ASM_CLAC
879 pushl_cfi $do_segment_not_present
880 jmp error_code
881 CFI_ENDPROC
882END(segment_not_present)
883
884ENTRY(stack_segment)
885 RING0_EC_FRAME
886 ASM_CLAC
887 pushl_cfi $do_stack_segment
888 jmp error_code
889 CFI_ENDPROC
890END(stack_segment)
891
892ENTRY(alignment_check)
893 RING0_EC_FRAME
894 ASM_CLAC
895 pushl_cfi $do_alignment_check
896 jmp error_code
897 CFI_ENDPROC
898END(alignment_check)
899
900ENTRY(divide_error)
901 RING0_INT_FRAME
902 ASM_CLAC
903 pushl_cfi $0 # no error code
904 pushl_cfi $do_divide_error
905 jmp error_code
906 CFI_ENDPROC
907END(divide_error)
908
909#ifdef CONFIG_X86_MCE
910ENTRY(machine_check)
911 RING0_INT_FRAME
912 ASM_CLAC
913 pushl_cfi $0
914 pushl_cfi machine_check_vector
915 jmp error_code
916 CFI_ENDPROC
917END(machine_check)
918#endif
919
920ENTRY(spurious_interrupt_bug)
921 RING0_INT_FRAME
922 ASM_CLAC
923 pushl_cfi $0
924 pushl_cfi $do_spurious_interrupt_bug
925 jmp error_code
926 CFI_ENDPROC
927END(spurious_interrupt_bug)
928
929#ifdef CONFIG_XEN
930/* Xen doesn't set %esp to be precisely what the normal sysenter
931 entrypoint expects, so fix it up before using the normal path. */
932ENTRY(xen_sysenter_target)
933 RING0_INT_FRAME
934 addl $5*4, %esp /* remove xen-provided frame */
935 CFI_ADJUST_CFA_OFFSET -5*4
936 jmp sysenter_past_esp
937 CFI_ENDPROC
938
939ENTRY(xen_hypervisor_callback)
940 CFI_STARTPROC
941 pushl_cfi $-1 /* orig_ax = -1 => not a system call */
942 SAVE_ALL
943 TRACE_IRQS_OFF
944
945 /* Check to see if we got the event in the critical
946 region in xen_iret_direct, after we've reenabled
947 events and checked for pending events. This simulates
948 iret instruction's behaviour where it delivers a
949 pending interrupt when enabling interrupts. */
950 movl PT_EIP(%esp),%eax
951 cmpl $xen_iret_start_crit,%eax
952 jb 1f
953 cmpl $xen_iret_end_crit,%eax
954 jae 1f
955
956 jmp xen_iret_crit_fixup
957
958ENTRY(xen_do_upcall)
9591: mov %esp, %eax
960 call xen_evtchn_do_upcall
961#ifndef CONFIG_PREEMPT
962 call xen_maybe_preempt_hcall
963#endif
964 jmp ret_from_intr
965 CFI_ENDPROC
966ENDPROC(xen_hypervisor_callback)
967
968# Hypervisor uses this for application faults while it executes.
969# We get here for two reasons:
970# 1. Fault while reloading DS, ES, FS or GS
971# 2. Fault while executing IRET
972# Category 1 we fix up by reattempting the load, and zeroing the segment
973# register if the load fails.
974# Category 2 we fix up by jumping to do_iret_error. We cannot use the
975# normal Linux return path in this case because if we use the IRET hypercall
976# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
977# We distinguish between categories by maintaining a status value in EAX.
978ENTRY(xen_failsafe_callback)
979 CFI_STARTPROC
980 pushl_cfi %eax
981 movl $1,%eax
9821: mov 4(%esp),%ds
9832: mov 8(%esp),%es
9843: mov 12(%esp),%fs
9854: mov 16(%esp),%gs
986 /* EAX == 0 => Category 1 (Bad segment)
987 EAX != 0 => Category 2 (Bad IRET) */
988 testl %eax,%eax
989 popl_cfi %eax
990 lea 16(%esp),%esp
991 CFI_ADJUST_CFA_OFFSET -16
992 jz 5f
993 jmp iret_exc
9945: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
995 SAVE_ALL
996 jmp ret_from_exception
997 CFI_ENDPROC
998
999.section .fixup,"ax"
10006: xorl %eax,%eax
1001 movl %eax,4(%esp)
1002 jmp 1b
10037: xorl %eax,%eax
1004 movl %eax,8(%esp)
1005 jmp 2b
10068: xorl %eax,%eax
1007 movl %eax,12(%esp)
1008 jmp 3b
10099: xorl %eax,%eax
1010 movl %eax,16(%esp)
1011 jmp 4b
1012.previous
1013 _ASM_EXTABLE(1b,6b)
1014 _ASM_EXTABLE(2b,7b)
1015 _ASM_EXTABLE(3b,8b)
1016 _ASM_EXTABLE(4b,9b)
1017ENDPROC(xen_failsafe_callback)
1018
1019BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1020 xen_evtchn_do_upcall)
1021
1022#endif /* CONFIG_XEN */
1023
1024#if IS_ENABLED(CONFIG_HYPERV)
1025
1026BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1027 hyperv_vector_handler)
1028
1029#endif /* CONFIG_HYPERV */
1030
1031#ifdef CONFIG_FUNCTION_TRACER
1032#ifdef CONFIG_DYNAMIC_FTRACE
1033
1034ENTRY(mcount)
1035 ret
1036END(mcount)
1037
1038ENTRY(ftrace_caller)
1039 pushl %eax
1040 pushl %ecx
1041 pushl %edx
1042 pushl $0 /* Pass NULL as regs pointer */
1043 movl 4*4(%esp), %eax
1044 movl 0x4(%ebp), %edx
1045 movl function_trace_op, %ecx
1046 subl $MCOUNT_INSN_SIZE, %eax
1047
1048.globl ftrace_call
1049ftrace_call:
1050 call ftrace_stub
1051
1052 addl $4,%esp /* skip NULL pointer */
1053 popl %edx
1054 popl %ecx
1055 popl %eax
1056ftrace_ret:
1057#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1058.globl ftrace_graph_call
1059ftrace_graph_call:
1060 jmp ftrace_stub
1061#endif
1062
1063.globl ftrace_stub
1064ftrace_stub:
1065 ret
1066END(ftrace_caller)
1067
1068ENTRY(ftrace_regs_caller)
1069 pushf /* push flags before compare (in cs location) */
1070
1071 /*
1072 * i386 does not save SS and ESP when coming from kernel.
1073 * Instead, to get sp, &regs->sp is used (see ptrace.h).
1074 * Unfortunately, that means eflags must be at the same location
1075 * as the current return ip is. We move the return ip into the
1076 * ip location, and move flags into the return ip location.
1077 */
1078 pushl 4(%esp) /* save return ip into ip slot */
1079
1080 pushl $0 /* Load 0 into orig_ax */
1081 pushl %gs
1082 pushl %fs
1083 pushl %es
1084 pushl %ds
1085 pushl %eax
1086 pushl %ebp
1087 pushl %edi
1088 pushl %esi
1089 pushl %edx
1090 pushl %ecx
1091 pushl %ebx
1092
1093 movl 13*4(%esp), %eax /* Get the saved flags */
1094 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
1095 /* clobbering return ip */
1096 movl $__KERNEL_CS,13*4(%esp)
1097
1098 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
1099 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
1100 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
1101 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
1102 pushl %esp /* Save pt_regs as 4th parameter */
1103
1104GLOBAL(ftrace_regs_call)
1105 call ftrace_stub
1106
1107 addl $4, %esp /* Skip pt_regs */
1108 movl 14*4(%esp), %eax /* Move flags back into cs */
1109 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
1110 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
1111 movl %eax, 14*4(%esp) /* Put return ip back for ret */
1112
1113 popl %ebx
1114 popl %ecx
1115 popl %edx
1116 popl %esi
1117 popl %edi
1118 popl %ebp
1119 popl %eax
1120 popl %ds
1121 popl %es
1122 popl %fs
1123 popl %gs
1124 addl $8, %esp /* Skip orig_ax and ip */
1125 popf /* Pop flags at end (no addl to corrupt flags) */
1126 jmp ftrace_ret
1127
1128 popf
1129 jmp ftrace_stub
1130#else /* ! CONFIG_DYNAMIC_FTRACE */
1131
1132ENTRY(mcount)
1133 cmpl $__PAGE_OFFSET, %esp
1134 jb ftrace_stub /* Paging not enabled yet? */
1135
1136 cmpl $ftrace_stub, ftrace_trace_function
1137 jnz trace
1138#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1139 cmpl $ftrace_stub, ftrace_graph_return
1140 jnz ftrace_graph_caller
1141
1142 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1143 jnz ftrace_graph_caller
1144#endif
1145.globl ftrace_stub
1146ftrace_stub:
1147 ret
1148
1149 /* taken from glibc */
1150trace:
1151 pushl %eax
1152 pushl %ecx
1153 pushl %edx
1154 movl 0xc(%esp), %eax
1155 movl 0x4(%ebp), %edx
1156 subl $MCOUNT_INSN_SIZE, %eax
1157
1158 call *ftrace_trace_function
1159
1160 popl %edx
1161 popl %ecx
1162 popl %eax
1163 jmp ftrace_stub
1164END(mcount)
1165#endif /* CONFIG_DYNAMIC_FTRACE */
1166#endif /* CONFIG_FUNCTION_TRACER */
1167
1168#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1169ENTRY(ftrace_graph_caller)
1170 pushl %eax
1171 pushl %ecx
1172 pushl %edx
1173 movl 0xc(%esp), %eax
1174 lea 0x4(%ebp), %edx
1175 movl (%ebp), %ecx
1176 subl $MCOUNT_INSN_SIZE, %eax
1177 call prepare_ftrace_return
1178 popl %edx
1179 popl %ecx
1180 popl %eax
1181 ret
1182END(ftrace_graph_caller)
1183
1184.globl return_to_handler
1185return_to_handler:
1186 pushl %eax
1187 pushl %edx
1188 movl %ebp, %eax
1189 call ftrace_return_to_handler
1190 movl %eax, %ecx
1191 popl %edx
1192 popl %eax
1193 jmp *%ecx
1194#endif
1195
1196#ifdef CONFIG_TRACING
1197ENTRY(trace_page_fault)
1198 RING0_EC_FRAME
1199 ASM_CLAC
1200 pushl_cfi $trace_do_page_fault
1201 jmp error_code
1202 CFI_ENDPROC
1203END(trace_page_fault)
1204#endif
1205
1206ENTRY(page_fault)
1207 RING0_EC_FRAME
1208 ASM_CLAC
1209 pushl_cfi $do_page_fault
1210 ALIGN
1211error_code:
1212 /* the function address is in %gs's slot on the stack */
1213 pushl_cfi %fs
1214 /*CFI_REL_OFFSET fs, 0*/
1215 pushl_cfi %es
1216 /*CFI_REL_OFFSET es, 0*/
1217 pushl_cfi %ds
1218 /*CFI_REL_OFFSET ds, 0*/
1219 pushl_cfi_reg eax
1220 pushl_cfi_reg ebp
1221 pushl_cfi_reg edi
1222 pushl_cfi_reg esi
1223 pushl_cfi_reg edx
1224 pushl_cfi_reg ecx
1225 pushl_cfi_reg ebx
1226 cld
1227 movl $(__KERNEL_PERCPU), %ecx
1228 movl %ecx, %fs
1229 UNWIND_ESPFIX_STACK
1230 GS_TO_REG %ecx
1231 movl PT_GS(%esp), %edi # get the function address
1232 movl PT_ORIG_EAX(%esp), %edx # get the error code
1233 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1234 REG_TO_PTGS %ecx
1235 SET_KERNEL_GS %ecx
1236 movl $(__USER_DS), %ecx
1237 movl %ecx, %ds
1238 movl %ecx, %es
1239 TRACE_IRQS_OFF
1240 movl %esp,%eax # pt_regs pointer
1241 call *%edi
1242 jmp ret_from_exception
1243 CFI_ENDPROC
1244END(page_fault)
1245
1246/*
1247 * Debug traps and NMI can happen at the one SYSENTER instruction
1248 * that sets up the real kernel stack. Check here, since we can't
1249 * allow the wrong stack to be used.
1250 *
1251 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1252 * already pushed 3 words if it hits on the sysenter instruction:
1253 * eflags, cs and eip.
1254 *
1255 * We just load the right stack, and push the three (known) values
1256 * by hand onto the new stack - while updating the return eip past
1257 * the instruction that would have done it for sysenter.
1258 */
1259.macro FIX_STACK offset ok label
1260 cmpw $__KERNEL_CS, 4(%esp)
1261 jne \ok
1262\label:
1263 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1264 CFI_DEF_CFA esp, 0
1265 CFI_UNDEFINED eip
1266 pushfl_cfi
1267 pushl_cfi $__KERNEL_CS
1268 pushl_cfi $sysenter_past_esp
1269 CFI_REL_OFFSET eip, 0
1270.endm
1271
1272ENTRY(debug)
1273 RING0_INT_FRAME
1274 ASM_CLAC
1275 cmpl $ia32_sysenter_target,(%esp)
1276 jne debug_stack_correct
1277 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1278debug_stack_correct:
1279 pushl_cfi $-1 # mark this as an int
1280 SAVE_ALL
1281 TRACE_IRQS_OFF
1282 xorl %edx,%edx # error code 0
1283 movl %esp,%eax # pt_regs pointer
1284 call do_debug
1285 jmp ret_from_exception
1286 CFI_ENDPROC
1287END(debug)
1288
1289/*
1290 * NMI is doubly nasty. It can happen _while_ we're handling
1291 * a debug fault, and the debug fault hasn't yet been able to
1292 * clear up the stack. So we first check whether we got an
1293 * NMI on the sysenter entry path, but after that we need to
1294 * check whether we got an NMI on the debug path where the debug
1295 * fault happened on the sysenter path.
1296 */
1297ENTRY(nmi)
1298 RING0_INT_FRAME
1299 ASM_CLAC
1300#ifdef CONFIG_X86_ESPFIX32
1301 pushl_cfi %eax
1302 movl %ss, %eax
1303 cmpw $__ESPFIX_SS, %ax
1304 popl_cfi %eax
1305 je nmi_espfix_stack
1306#endif
1307 cmpl $ia32_sysenter_target,(%esp)
1308 je nmi_stack_fixup
1309 pushl_cfi %eax
1310 movl %esp,%eax
1311 /* Do not access memory above the end of our stack page,
1312 * it might not exist.
1313 */
1314 andl $(THREAD_SIZE-1),%eax
1315 cmpl $(THREAD_SIZE-20),%eax
1316 popl_cfi %eax
1317 jae nmi_stack_correct
1318 cmpl $ia32_sysenter_target,12(%esp)
1319 je nmi_debug_stack_check
1320nmi_stack_correct:
1321 /* We have a RING0_INT_FRAME here */
1322 pushl_cfi %eax
1323 SAVE_ALL
1324 xorl %edx,%edx # zero error code
1325 movl %esp,%eax # pt_regs pointer
1326 call do_nmi
1327 jmp restore_all_notrace
1328 CFI_ENDPROC
1329
1330nmi_stack_fixup:
1331 RING0_INT_FRAME
1332 FIX_STACK 12, nmi_stack_correct, 1
1333 jmp nmi_stack_correct
1334
1335nmi_debug_stack_check:
1336 /* We have a RING0_INT_FRAME here */
1337 cmpw $__KERNEL_CS,16(%esp)
1338 jne nmi_stack_correct
1339 cmpl $debug,(%esp)
1340 jb nmi_stack_correct
1341 cmpl $debug_esp_fix_insn,(%esp)
1342 ja nmi_stack_correct
1343 FIX_STACK 24, nmi_stack_correct, 1
1344 jmp nmi_stack_correct
1345
1346#ifdef CONFIG_X86_ESPFIX32
1347nmi_espfix_stack:
1348 /* We have a RING0_INT_FRAME here.
1349 *
1350 * create the pointer to lss back
1351 */
1352 pushl_cfi %ss
1353 pushl_cfi %esp
1354 addl $4, (%esp)
1355 /* copy the iret frame of 12 bytes */
1356 .rept 3
1357 pushl_cfi 16(%esp)
1358 .endr
1359 pushl_cfi %eax
1360 SAVE_ALL
1361 FIXUP_ESPFIX_STACK # %eax == %esp
1362 xorl %edx,%edx # zero error code
1363 call do_nmi
1364 RESTORE_REGS
1365 lss 12+4(%esp), %esp # back to espfix stack
1366 CFI_ADJUST_CFA_OFFSET -24
1367 jmp irq_return
1368#endif
1369 CFI_ENDPROC
1370END(nmi)
1371
1372ENTRY(int3)
1373 RING0_INT_FRAME
1374 ASM_CLAC
1375 pushl_cfi $-1 # mark this as an int
1376 SAVE_ALL
1377 TRACE_IRQS_OFF
1378 xorl %edx,%edx # zero error code
1379 movl %esp,%eax # pt_regs pointer
1380 call do_int3
1381 jmp ret_from_exception
1382 CFI_ENDPROC
1383END(int3)
1384
1385ENTRY(general_protection)
1386 RING0_EC_FRAME
1387 pushl_cfi $do_general_protection
1388 jmp error_code
1389 CFI_ENDPROC
1390END(general_protection)
1391
1392#ifdef CONFIG_KVM_GUEST
1393ENTRY(async_page_fault)
1394 RING0_EC_FRAME
1395 ASM_CLAC
1396 pushl_cfi $do_async_page_fault
1397 jmp error_code
1398 CFI_ENDPROC
1399END(async_page_fault)
1400#endif
1401
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 2b55ee6db053..5a4668136e98 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
167 clear_bss(); 167 clear_bss();
168 168
169 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 169 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
170 set_intr_gate(i, early_idt_handlers[i]); 170 set_intr_gate(i, early_idt_handler_array[i]);
171 load_idt((const struct desc_ptr *)&idt_descr); 171 load_idt((const struct desc_ptr *)&idt_descr);
172 172
173 copy_bootdata(__va(real_mode_data)); 173 copy_bootdata(__va(real_mode_data));
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 02d257256200..544dec4cc605 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -478,21 +478,22 @@ is486:
478__INIT 478__INIT
479setup_once: 479setup_once:
480 /* 480 /*
481 * Set up a idt with 256 entries pointing to ignore_int, 481 * Set up a idt with 256 interrupt gates that push zero if there
482 * interrupt gates. It doesn't actually load idt - that needs 482 * is no error code and then jump to early_idt_handler_common.
483 * to be done on each CPU. Interrupts are enabled elsewhere, 483 * It doesn't actually load the idt - that needs to be done on
484 * when we can be relatively sure everything is ok. 484 * each CPU. Interrupts are enabled elsewhere, when we can be
485 * relatively sure everything is ok.
485 */ 486 */
486 487
487 movl $idt_table,%edi 488 movl $idt_table,%edi
488 movl $early_idt_handlers,%eax 489 movl $early_idt_handler_array,%eax
489 movl $NUM_EXCEPTION_VECTORS,%ecx 490 movl $NUM_EXCEPTION_VECTORS,%ecx
4901: 4911:
491 movl %eax,(%edi) 492 movl %eax,(%edi)
492 movl %eax,4(%edi) 493 movl %eax,4(%edi)
493 /* interrupt gate, dpl=0, present */ 494 /* interrupt gate, dpl=0, present */
494 movl $(0x8E000000 + __KERNEL_CS),2(%edi) 495 movl $(0x8E000000 + __KERNEL_CS),2(%edi)
495 addl $9,%eax 496 addl $EARLY_IDT_HANDLER_SIZE,%eax
496 addl $8,%edi 497 addl $8,%edi
497 loop 1b 498 loop 1b
498 499
@@ -524,26 +525,28 @@ setup_once:
524 andl $0,setup_once_ref /* Once is enough, thanks */ 525 andl $0,setup_once_ref /* Once is enough, thanks */
525 ret 526 ret
526 527
527ENTRY(early_idt_handlers) 528ENTRY(early_idt_handler_array)
528 # 36(%esp) %eflags 529 # 36(%esp) %eflags
529 # 32(%esp) %cs 530 # 32(%esp) %cs
530 # 28(%esp) %eip 531 # 28(%esp) %eip
531 # 24(%rsp) error code 532 # 24(%rsp) error code
532 i = 0 533 i = 0
533 .rept NUM_EXCEPTION_VECTORS 534 .rept NUM_EXCEPTION_VECTORS
534 .if (EXCEPTION_ERRCODE_MASK >> i) & 1 535 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
535 ASM_NOP2
536 .else
537 pushl $0 # Dummy error code, to make stack frame uniform 536 pushl $0 # Dummy error code, to make stack frame uniform
538 .endif 537 .endif
539 pushl $i # 20(%esp) Vector number 538 pushl $i # 20(%esp) Vector number
540 jmp early_idt_handler 539 jmp early_idt_handler_common
541 i = i + 1 540 i = i + 1
541 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
542 .endr 542 .endr
543ENDPROC(early_idt_handlers) 543ENDPROC(early_idt_handler_array)
544 544
545 /* This is global to keep gas from relaxing the jumps */ 545early_idt_handler_common:
546ENTRY(early_idt_handler) 546 /*
547 * The stack is the hardware frame, an error code or zero, and the
548 * vector number.
549 */
547 cld 550 cld
548 551
549 cmpl $2,(%esp) # X86_TRAP_NMI 552 cmpl $2,(%esp) # X86_TRAP_NMI
@@ -603,7 +606,7 @@ ex_entry:
603.Lis_nmi: 606.Lis_nmi:
604 addl $8,%esp /* drop vector number and error code */ 607 addl $8,%esp /* drop vector number and error code */
605 iret 608 iret
606ENDPROC(early_idt_handler) 609ENDPROC(early_idt_handler_common)
607 610
608/* This is the default interrupt "handler" :-) */ 611/* This is the default interrupt "handler" :-) */
609 ALIGN 612 ALIGN
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 43eafc8afb69..e5c27f729a38 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -321,26 +321,28 @@ bad_address:
321 jmp bad_address 321 jmp bad_address
322 322
323 __INIT 323 __INIT
324 .globl early_idt_handlers 324ENTRY(early_idt_handler_array)
325early_idt_handlers:
326 # 104(%rsp) %rflags 325 # 104(%rsp) %rflags
327 # 96(%rsp) %cs 326 # 96(%rsp) %cs
328 # 88(%rsp) %rip 327 # 88(%rsp) %rip
329 # 80(%rsp) error code 328 # 80(%rsp) error code
330 i = 0 329 i = 0
331 .rept NUM_EXCEPTION_VECTORS 330 .rept NUM_EXCEPTION_VECTORS
332 .if (EXCEPTION_ERRCODE_MASK >> i) & 1 331 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
333 ASM_NOP2
334 .else
335 pushq $0 # Dummy error code, to make stack frame uniform 332 pushq $0 # Dummy error code, to make stack frame uniform
336 .endif 333 .endif
337 pushq $i # 72(%rsp) Vector number 334 pushq $i # 72(%rsp) Vector number
338 jmp early_idt_handler 335 jmp early_idt_handler_common
339 i = i + 1 336 i = i + 1
337 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
340 .endr 338 .endr
339ENDPROC(early_idt_handler_array)
341 340
342/* This is global to keep gas from relaxing the jumps */ 341early_idt_handler_common:
343ENTRY(early_idt_handler) 342 /*
343 * The stack is the hardware frame, an error code or zero, and the
344 * vector number.
345 */
344 cld 346 cld
345 347
346 cmpl $2,(%rsp) # X86_TRAP_NMI 348 cmpl $2,(%rsp) # X86_TRAP_NMI
@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
412.Lis_nmi: 414.Lis_nmi:
413 addq $16,%rsp # drop vector number and error code 415 addq $16,%rsp # drop vector number and error code
414 INTERRUPT_RETURN 416 INTERRUPT_RETURN
415ENDPROC(early_idt_handler) 417ENDPROC(early_idt_handler_common)
416 418
417 __INITDATA 419 __INITDATA
418 420
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c30bbb5d56e2..de379366f6d1 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -72,8 +72,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
72#else 72#else
73#include <asm/processor-flags.h> 73#include <asm/processor-flags.h>
74#include <asm/setup.h> 74#include <asm/setup.h>
75 75#include <asm/proto.h>
76asmlinkage int system_call(void);
77#endif 76#endif
78 77
79/* Must be page-aligned because the real IDT is used in a fixmap. */ 78/* Must be page-aligned because the real IDT is used in a fixmap. */
@@ -980,12 +979,12 @@ void __init trap_init(void)
980 set_bit(i, used_vectors); 979 set_bit(i, used_vectors);
981 980
982#ifdef CONFIG_IA32_EMULATION 981#ifdef CONFIG_IA32_EMULATION
983 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 982 set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
984 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 983 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
985#endif 984#endif
986 985
987#ifdef CONFIG_X86_32 986#ifdef CONFIG_X86_32
988 set_system_trap_gate(IA32_SYSCALL_VECTOR, &system_call); 987 set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
989 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 988 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
990#endif 989#endif
991 990
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 982989d282ff..f2587888d987 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -17,7 +17,6 @@ clean-files := inat-tables.c
17obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o 17obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
18 18
19lib-y := delay.o misc.o cmdline.o 19lib-y := delay.o misc.o cmdline.o
20lib-y += thunk_$(BITS).o
21lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o 20lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
22lib-y += memcpy_$(BITS).o 21lib-y += memcpy_$(BITS).o
23lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 22lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 00933d5e992f..9b0ca8fe80fc 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -11,26 +11,23 @@
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/alternative-asm.h> 13#include <asm/alternative-asm.h>
14#include <asm/dwarf2.h>
15 14
16/* if you want SMP support, implement these with real spinlocks */ 15/* if you want SMP support, implement these with real spinlocks */
17.macro LOCK reg 16.macro LOCK reg
18 pushfl_cfi 17 pushfl
19 cli 18 cli
20.endm 19.endm
21 20
22.macro UNLOCK reg 21.macro UNLOCK reg
23 popfl_cfi 22 popfl
24.endm 23.endm
25 24
26#define BEGIN(op) \ 25#define BEGIN(op) \
27.macro endp; \ 26.macro endp; \
28 CFI_ENDPROC; \
29ENDPROC(atomic64_##op##_386); \ 27ENDPROC(atomic64_##op##_386); \
30.purgem endp; \ 28.purgem endp; \
31.endm; \ 29.endm; \
32ENTRY(atomic64_##op##_386); \ 30ENTRY(atomic64_##op##_386); \
33 CFI_STARTPROC; \
34 LOCK v; 31 LOCK v;
35 32
36#define ENDP endp 33#define ENDP endp
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 082a85167a5b..db3ae85440ff 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -11,7 +11,6 @@
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/alternative-asm.h> 13#include <asm/alternative-asm.h>
14#include <asm/dwarf2.h>
15 14
16.macro read64 reg 15.macro read64 reg
17 movl %ebx, %eax 16 movl %ebx, %eax
@@ -22,16 +21,11 @@
22.endm 21.endm
23 22
24ENTRY(atomic64_read_cx8) 23ENTRY(atomic64_read_cx8)
25 CFI_STARTPROC
26
27 read64 %ecx 24 read64 %ecx
28 ret 25 ret
29 CFI_ENDPROC
30ENDPROC(atomic64_read_cx8) 26ENDPROC(atomic64_read_cx8)
31 27
32ENTRY(atomic64_set_cx8) 28ENTRY(atomic64_set_cx8)
33 CFI_STARTPROC
34
351: 291:
36/* we don't need LOCK_PREFIX since aligned 64-bit writes 30/* we don't need LOCK_PREFIX since aligned 64-bit writes
37 * are atomic on 586 and newer */ 31 * are atomic on 586 and newer */
@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
39 jne 1b 33 jne 1b
40 34
41 ret 35 ret
42 CFI_ENDPROC
43ENDPROC(atomic64_set_cx8) 36ENDPROC(atomic64_set_cx8)
44 37
45ENTRY(atomic64_xchg_cx8) 38ENTRY(atomic64_xchg_cx8)
46 CFI_STARTPROC
47
481: 391:
49 LOCK_PREFIX 40 LOCK_PREFIX
50 cmpxchg8b (%esi) 41 cmpxchg8b (%esi)
51 jne 1b 42 jne 1b
52 43
53 ret 44 ret
54 CFI_ENDPROC
55ENDPROC(atomic64_xchg_cx8) 45ENDPROC(atomic64_xchg_cx8)
56 46
57.macro addsub_return func ins insc 47.macro addsub_return func ins insc
58ENTRY(atomic64_\func\()_return_cx8) 48ENTRY(atomic64_\func\()_return_cx8)
59 CFI_STARTPROC 49 pushl %ebp
60 pushl_cfi_reg ebp 50 pushl %ebx
61 pushl_cfi_reg ebx 51 pushl %esi
62 pushl_cfi_reg esi 52 pushl %edi
63 pushl_cfi_reg edi
64 53
65 movl %eax, %esi 54 movl %eax, %esi
66 movl %edx, %edi 55 movl %edx, %edi
@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
7910: 6810:
80 movl %ebx, %eax 69 movl %ebx, %eax
81 movl %ecx, %edx 70 movl %ecx, %edx
82 popl_cfi_reg edi 71 popl %edi
83 popl_cfi_reg esi 72 popl %esi
84 popl_cfi_reg ebx 73 popl %ebx
85 popl_cfi_reg ebp 74 popl %ebp
86 ret 75 ret
87 CFI_ENDPROC
88ENDPROC(atomic64_\func\()_return_cx8) 76ENDPROC(atomic64_\func\()_return_cx8)
89.endm 77.endm
90 78
@@ -93,8 +81,7 @@ addsub_return sub sub sbb
93 81
94.macro incdec_return func ins insc 82.macro incdec_return func ins insc
95ENTRY(atomic64_\func\()_return_cx8) 83ENTRY(atomic64_\func\()_return_cx8)
96 CFI_STARTPROC 84 pushl %ebx
97 pushl_cfi_reg ebx
98 85
99 read64 %esi 86 read64 %esi
1001: 871:
@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
10910: 9610:
110 movl %ebx, %eax 97 movl %ebx, %eax
111 movl %ecx, %edx 98 movl %ecx, %edx
112 popl_cfi_reg ebx 99 popl %ebx
113 ret 100 ret
114 CFI_ENDPROC
115ENDPROC(atomic64_\func\()_return_cx8) 101ENDPROC(atomic64_\func\()_return_cx8)
116.endm 102.endm
117 103
@@ -119,8 +105,7 @@ incdec_return inc add adc
119incdec_return dec sub sbb 105incdec_return dec sub sbb
120 106
121ENTRY(atomic64_dec_if_positive_cx8) 107ENTRY(atomic64_dec_if_positive_cx8)
122 CFI_STARTPROC 108 pushl %ebx
123 pushl_cfi_reg ebx
124 109
125 read64 %esi 110 read64 %esi
1261: 1111:
@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
1362: 1212:
137 movl %ebx, %eax 122 movl %ebx, %eax
138 movl %ecx, %edx 123 movl %ecx, %edx
139 popl_cfi_reg ebx 124 popl %ebx
140 ret 125 ret
141 CFI_ENDPROC
142ENDPROC(atomic64_dec_if_positive_cx8) 126ENDPROC(atomic64_dec_if_positive_cx8)
143 127
144ENTRY(atomic64_add_unless_cx8) 128ENTRY(atomic64_add_unless_cx8)
145 CFI_STARTPROC 129 pushl %ebp
146 pushl_cfi_reg ebp 130 pushl %ebx
147 pushl_cfi_reg ebx
148/* these just push these two parameters on the stack */ 131/* these just push these two parameters on the stack */
149 pushl_cfi_reg edi 132 pushl %edi
150 pushl_cfi_reg ecx 133 pushl %ecx
151 134
152 movl %eax, %ebp 135 movl %eax, %ebp
153 movl %edx, %edi 136 movl %edx, %edi
@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
168 movl $1, %eax 151 movl $1, %eax
1693: 1523:
170 addl $8, %esp 153 addl $8, %esp
171 CFI_ADJUST_CFA_OFFSET -8 154 popl %ebx
172 popl_cfi_reg ebx 155 popl %ebp
173 popl_cfi_reg ebp
174 ret 156 ret
1754: 1574:
176 cmpl %edx, 4(%esp) 158 cmpl %edx, 4(%esp)
177 jne 2b 159 jne 2b
178 xorl %eax, %eax 160 xorl %eax, %eax
179 jmp 3b 161 jmp 3b
180 CFI_ENDPROC
181ENDPROC(atomic64_add_unless_cx8) 162ENDPROC(atomic64_add_unless_cx8)
182 163
183ENTRY(atomic64_inc_not_zero_cx8) 164ENTRY(atomic64_inc_not_zero_cx8)
184 CFI_STARTPROC 165 pushl %ebx
185 pushl_cfi_reg ebx
186 166
187 read64 %esi 167 read64 %esi
1881: 1681:
@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
199 179
200 movl $1, %eax 180 movl $1, %eax
2013: 1813:
202 popl_cfi_reg ebx 182 popl %ebx
203 ret 183 ret
204 CFI_ENDPROC
205ENDPROC(atomic64_inc_not_zero_cx8) 184ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 9bc944a91274..c1e623209853 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -26,7 +26,6 @@
26 */ 26 */
27 27
28#include <linux/linkage.h> 28#include <linux/linkage.h>
29#include <asm/dwarf2.h>
30#include <asm/errno.h> 29#include <asm/errno.h>
31#include <asm/asm.h> 30#include <asm/asm.h>
32 31
@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
50 * alignment for the unrolled loop. 49 * alignment for the unrolled loop.
51 */ 50 */
52ENTRY(csum_partial) 51ENTRY(csum_partial)
53 CFI_STARTPROC 52 pushl %esi
54 pushl_cfi_reg esi 53 pushl %ebx
55 pushl_cfi_reg ebx
56 movl 20(%esp),%eax # Function arg: unsigned int sum 54 movl 20(%esp),%eax # Function arg: unsigned int sum
57 movl 16(%esp),%ecx # Function arg: int len 55 movl 16(%esp),%ecx # Function arg: int len
58 movl 12(%esp),%esi # Function arg: unsigned char *buff 56 movl 12(%esp),%esi # Function arg: unsigned char *buff
@@ -129,10 +127,9 @@ ENTRY(csum_partial)
129 jz 8f 127 jz 8f
130 roll $8, %eax 128 roll $8, %eax
1318: 1298:
132 popl_cfi_reg ebx 130 popl %ebx
133 popl_cfi_reg esi 131 popl %esi
134 ret 132 ret
135 CFI_ENDPROC
136ENDPROC(csum_partial) 133ENDPROC(csum_partial)
137 134
138#else 135#else
@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
140/* Version for PentiumII/PPro */ 137/* Version for PentiumII/PPro */
141 138
142ENTRY(csum_partial) 139ENTRY(csum_partial)
143 CFI_STARTPROC 140 pushl %esi
144 pushl_cfi_reg esi 141 pushl %ebx
145 pushl_cfi_reg ebx
146 movl 20(%esp),%eax # Function arg: unsigned int sum 142 movl 20(%esp),%eax # Function arg: unsigned int sum
147 movl 16(%esp),%ecx # Function arg: int len 143 movl 16(%esp),%ecx # Function arg: int len
148 movl 12(%esp),%esi # Function arg: const unsigned char *buf 144 movl 12(%esp),%esi # Function arg: const unsigned char *buf
@@ -249,10 +245,9 @@ ENTRY(csum_partial)
249 jz 90f 245 jz 90f
250 roll $8, %eax 246 roll $8, %eax
25190: 24790:
252 popl_cfi_reg ebx 248 popl %ebx
253 popl_cfi_reg esi 249 popl %esi
254 ret 250 ret
255 CFI_ENDPROC
256ENDPROC(csum_partial) 251ENDPROC(csum_partial)
257 252
258#endif 253#endif
@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
287#define FP 12 282#define FP 12
288 283
289ENTRY(csum_partial_copy_generic) 284ENTRY(csum_partial_copy_generic)
290 CFI_STARTPROC
291 subl $4,%esp 285 subl $4,%esp
292 CFI_ADJUST_CFA_OFFSET 4 286 pushl %edi
293 pushl_cfi_reg edi 287 pushl %esi
294 pushl_cfi_reg esi 288 pushl %ebx
295 pushl_cfi_reg ebx
296 movl ARGBASE+16(%esp),%eax # sum 289 movl ARGBASE+16(%esp),%eax # sum
297 movl ARGBASE+12(%esp),%ecx # len 290 movl ARGBASE+12(%esp),%ecx # len
298 movl ARGBASE+4(%esp),%esi # src 291 movl ARGBASE+4(%esp),%esi # src
@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) )
401 394
402.previous 395.previous
403 396
404 popl_cfi_reg ebx 397 popl %ebx
405 popl_cfi_reg esi 398 popl %esi
406 popl_cfi_reg edi 399 popl %edi
407 popl_cfi %ecx # equivalent to addl $4,%esp 400 popl %ecx # equivalent to addl $4,%esp
408 ret 401 ret
409 CFI_ENDPROC
410ENDPROC(csum_partial_copy_generic) 402ENDPROC(csum_partial_copy_generic)
411 403
412#else 404#else
@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
426#define ARGBASE 12 418#define ARGBASE 12
427 419
428ENTRY(csum_partial_copy_generic) 420ENTRY(csum_partial_copy_generic)
429 CFI_STARTPROC 421 pushl %ebx
430 pushl_cfi_reg ebx 422 pushl %edi
431 pushl_cfi_reg edi 423 pushl %esi
432 pushl_cfi_reg esi
433 movl ARGBASE+4(%esp),%esi #src 424 movl ARGBASE+4(%esp),%esi #src
434 movl ARGBASE+8(%esp),%edi #dst 425 movl ARGBASE+8(%esp),%edi #dst
435 movl ARGBASE+12(%esp),%ecx #len 426 movl ARGBASE+12(%esp),%ecx #len
@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) )
489 jmp 7b 480 jmp 7b
490.previous 481.previous
491 482
492 popl_cfi_reg esi 483 popl %esi
493 popl_cfi_reg edi 484 popl %edi
494 popl_cfi_reg ebx 485 popl %ebx
495 ret 486 ret
496 CFI_ENDPROC
497ENDPROC(csum_partial_copy_generic) 487ENDPROC(csum_partial_copy_generic)
498 488
499#undef ROUND 489#undef ROUND
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index e67e579c93bd..a2fe51b00cce 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,5 +1,4 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/dwarf2.h>
3#include <asm/cpufeature.h> 2#include <asm/cpufeature.h>
4#include <asm/alternative-asm.h> 3#include <asm/alternative-asm.h>
5 4
@@ -15,7 +14,6 @@
15 * %rdi - page 14 * %rdi - page
16 */ 15 */
17ENTRY(clear_page) 16ENTRY(clear_page)
18 CFI_STARTPROC
19 17
20 ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \ 18 ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
21 "jmp clear_page_c_e", X86_FEATURE_ERMS 19 "jmp clear_page_c_e", X86_FEATURE_ERMS
@@ -24,11 +22,9 @@ ENTRY(clear_page)
24 xorl %eax,%eax 22 xorl %eax,%eax
25 rep stosq 23 rep stosq
26 ret 24 ret
27 CFI_ENDPROC
28ENDPROC(clear_page) 25ENDPROC(clear_page)
29 26
30ENTRY(clear_page_orig) 27ENTRY(clear_page_orig)
31 CFI_STARTPROC
32 28
33 xorl %eax,%eax 29 xorl %eax,%eax
34 movl $4096/64,%ecx 30 movl $4096/64,%ecx
@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
48 jnz .Lloop 44 jnz .Lloop
49 nop 45 nop
50 ret 46 ret
51 CFI_ENDPROC
52ENDPROC(clear_page_orig) 47ENDPROC(clear_page_orig)
53 48
54ENTRY(clear_page_c_e) 49ENTRY(clear_page_c_e)
55 CFI_STARTPROC
56 movl $4096,%ecx 50 movl $4096,%ecx
57 xorl %eax,%eax 51 xorl %eax,%eax
58 rep stosb 52 rep stosb
59 ret 53 ret
60 CFI_ENDPROC
61ENDPROC(clear_page_c_e) 54ENDPROC(clear_page_c_e)
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index 40a172541ee2..9b330242e740 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -6,7 +6,6 @@
6 * 6 *
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h>
10#include <asm/percpu.h> 9#include <asm/percpu.h>
11 10
12.text 11.text
@@ -21,7 +20,6 @@
21 * %al : Operation successful 20 * %al : Operation successful
22 */ 21 */
23ENTRY(this_cpu_cmpxchg16b_emu) 22ENTRY(this_cpu_cmpxchg16b_emu)
24CFI_STARTPROC
25 23
26# 24#
27# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not 25# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
@@ -32,7 +30,7 @@ CFI_STARTPROC
32# *atomic* on a single cpu (as provided by the this_cpu_xx class of 30# *atomic* on a single cpu (as provided by the this_cpu_xx class of
33# macros). 31# macros).
34# 32#
35 pushfq_cfi 33 pushfq
36 cli 34 cli
37 35
38 cmpq PER_CPU_VAR((%rsi)), %rax 36 cmpq PER_CPU_VAR((%rsi)), %rax
@@ -43,17 +41,13 @@ CFI_STARTPROC
43 movq %rbx, PER_CPU_VAR((%rsi)) 41 movq %rbx, PER_CPU_VAR((%rsi))
44 movq %rcx, PER_CPU_VAR(8(%rsi)) 42 movq %rcx, PER_CPU_VAR(8(%rsi))
45 43
46 CFI_REMEMBER_STATE 44 popfq
47 popfq_cfi
48 mov $1, %al 45 mov $1, %al
49 ret 46 ret
50 47
51 CFI_RESTORE_STATE
52.Lnot_same: 48.Lnot_same:
53 popfq_cfi 49 popfq
54 xor %al,%al 50 xor %al,%al
55 ret 51 ret
56 52
57CFI_ENDPROC
58
59ENDPROC(this_cpu_cmpxchg16b_emu) 53ENDPROC(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index b4807fce5177..ad5349778490 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11 10
12.text 11.text
13 12
@@ -20,14 +19,13 @@
20 * %ecx : high 32 bits of new value 19 * %ecx : high 32 bits of new value
21 */ 20 */
22ENTRY(cmpxchg8b_emu) 21ENTRY(cmpxchg8b_emu)
23CFI_STARTPROC
24 22
25# 23#
26# Emulate 'cmpxchg8b (%esi)' on UP except we don't 24# Emulate 'cmpxchg8b (%esi)' on UP except we don't
27# set the whole ZF thing (caller will just compare 25# set the whole ZF thing (caller will just compare
28# eax:edx with the expected value) 26# eax:edx with the expected value)
29# 27#
30 pushfl_cfi 28 pushfl
31 cli 29 cli
32 30
33 cmpl (%esi), %eax 31 cmpl (%esi), %eax
@@ -38,18 +36,15 @@ CFI_STARTPROC
38 movl %ebx, (%esi) 36 movl %ebx, (%esi)
39 movl %ecx, 4(%esi) 37 movl %ecx, 4(%esi)
40 38
41 CFI_REMEMBER_STATE 39 popfl
42 popfl_cfi
43 ret 40 ret
44 41
45 CFI_RESTORE_STATE
46.Lnot_same: 42.Lnot_same:
47 movl (%esi), %eax 43 movl (%esi), %eax
48.Lhalf_same: 44.Lhalf_same:
49 movl 4(%esi), %edx 45 movl 4(%esi), %edx
50 46
51 popfl_cfi 47 popfl
52 ret 48 ret
53 49
54CFI_ENDPROC
55ENDPROC(cmpxchg8b_emu) 50ENDPROC(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 8239dbcbf984..009f98216b7e 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -1,7 +1,6 @@
1/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ 1/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/dwarf2.h>
5#include <asm/cpufeature.h> 4#include <asm/cpufeature.h>
6#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
7 6
@@ -13,22 +12,16 @@
13 */ 12 */
14 ALIGN 13 ALIGN
15ENTRY(copy_page) 14ENTRY(copy_page)
16 CFI_STARTPROC
17 ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD 15 ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
18 movl $4096/8, %ecx 16 movl $4096/8, %ecx
19 rep movsq 17 rep movsq
20 ret 18 ret
21 CFI_ENDPROC
22ENDPROC(copy_page) 19ENDPROC(copy_page)
23 20
24ENTRY(copy_page_regs) 21ENTRY(copy_page_regs)
25 CFI_STARTPROC
26 subq $2*8, %rsp 22 subq $2*8, %rsp
27 CFI_ADJUST_CFA_OFFSET 2*8
28 movq %rbx, (%rsp) 23 movq %rbx, (%rsp)
29 CFI_REL_OFFSET rbx, 0
30 movq %r12, 1*8(%rsp) 24 movq %r12, 1*8(%rsp)
31 CFI_REL_OFFSET r12, 1*8
32 25
33 movl $(4096/64)-5, %ecx 26 movl $(4096/64)-5, %ecx
34 .p2align 4 27 .p2align 4
@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
87 jnz .Loop2 80 jnz .Loop2
88 81
89 movq (%rsp), %rbx 82 movq (%rsp), %rbx
90 CFI_RESTORE rbx
91 movq 1*8(%rsp), %r12 83 movq 1*8(%rsp), %r12
92 CFI_RESTORE r12
93 addq $2*8, %rsp 84 addq $2*8, %rsp
94 CFI_ADJUST_CFA_OFFSET -2*8
95 ret 85 ret
96 CFI_ENDPROC
97ENDPROC(copy_page_regs) 86ENDPROC(copy_page_regs)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index e4b3beee83bd..982ce34f4a9b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11#include <asm/current.h> 10#include <asm/current.h>
12#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
13#include <asm/thread_info.h> 12#include <asm/thread_info.h>
@@ -18,7 +17,6 @@
18 17
19/* Standard copy_to_user with segment limit checking */ 18/* Standard copy_to_user with segment limit checking */
20ENTRY(_copy_to_user) 19ENTRY(_copy_to_user)
21 CFI_STARTPROC
22 GET_THREAD_INFO(%rax) 20 GET_THREAD_INFO(%rax)
23 movq %rdi,%rcx 21 movq %rdi,%rcx
24 addq %rdx,%rcx 22 addq %rdx,%rcx
@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
30 X86_FEATURE_REP_GOOD, \ 28 X86_FEATURE_REP_GOOD, \
31 "jmp copy_user_enhanced_fast_string", \ 29 "jmp copy_user_enhanced_fast_string", \
32 X86_FEATURE_ERMS 30 X86_FEATURE_ERMS
33 CFI_ENDPROC
34ENDPROC(_copy_to_user) 31ENDPROC(_copy_to_user)
35 32
36/* Standard copy_from_user with segment limit checking */ 33/* Standard copy_from_user with segment limit checking */
37ENTRY(_copy_from_user) 34ENTRY(_copy_from_user)
38 CFI_STARTPROC
39 GET_THREAD_INFO(%rax) 35 GET_THREAD_INFO(%rax)
40 movq %rsi,%rcx 36 movq %rsi,%rcx
41 addq %rdx,%rcx 37 addq %rdx,%rcx
@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
47 X86_FEATURE_REP_GOOD, \ 43 X86_FEATURE_REP_GOOD, \
48 "jmp copy_user_enhanced_fast_string", \ 44 "jmp copy_user_enhanced_fast_string", \
49 X86_FEATURE_ERMS 45 X86_FEATURE_ERMS
50 CFI_ENDPROC
51ENDPROC(_copy_from_user) 46ENDPROC(_copy_from_user)
52 47
53 .section .fixup,"ax" 48 .section .fixup,"ax"
54 /* must zero dest */ 49 /* must zero dest */
55ENTRY(bad_from_user) 50ENTRY(bad_from_user)
56bad_from_user: 51bad_from_user:
57 CFI_STARTPROC
58 movl %edx,%ecx 52 movl %edx,%ecx
59 xorl %eax,%eax 53 xorl %eax,%eax
60 rep 54 rep
@@ -62,7 +56,6 @@ bad_from_user:
62bad_to_user: 56bad_to_user:
63 movl %edx,%eax 57 movl %edx,%eax
64 ret 58 ret
65 CFI_ENDPROC
66ENDPROC(bad_from_user) 59ENDPROC(bad_from_user)
67 .previous 60 .previous
68 61
@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
80 * eax uncopied bytes or 0 if successful. 73 * eax uncopied bytes or 0 if successful.
81 */ 74 */
82ENTRY(copy_user_generic_unrolled) 75ENTRY(copy_user_generic_unrolled)
83 CFI_STARTPROC
84 ASM_STAC 76 ASM_STAC
85 cmpl $8,%edx 77 cmpl $8,%edx
86 jb 20f /* less then 8 bytes, go to byte copy loop */ 78 jb 20f /* less then 8 bytes, go to byte copy loop */
@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
162 _ASM_EXTABLE(19b,40b) 154 _ASM_EXTABLE(19b,40b)
163 _ASM_EXTABLE(21b,50b) 155 _ASM_EXTABLE(21b,50b)
164 _ASM_EXTABLE(22b,50b) 156 _ASM_EXTABLE(22b,50b)
165 CFI_ENDPROC
166ENDPROC(copy_user_generic_unrolled) 157ENDPROC(copy_user_generic_unrolled)
167 158
168/* Some CPUs run faster using the string copy instructions. 159/* Some CPUs run faster using the string copy instructions.
@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
184 * eax uncopied bytes or 0 if successful. 175 * eax uncopied bytes or 0 if successful.
185 */ 176 */
186ENTRY(copy_user_generic_string) 177ENTRY(copy_user_generic_string)
187 CFI_STARTPROC
188 ASM_STAC 178 ASM_STAC
189 cmpl $8,%edx 179 cmpl $8,%edx
190 jb 2f /* less than 8 bytes, go to byte copy loop */ 180 jb 2f /* less than 8 bytes, go to byte copy loop */
@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
209 199
210 _ASM_EXTABLE(1b,11b) 200 _ASM_EXTABLE(1b,11b)
211 _ASM_EXTABLE(3b,12b) 201 _ASM_EXTABLE(3b,12b)
212 CFI_ENDPROC
213ENDPROC(copy_user_generic_string) 202ENDPROC(copy_user_generic_string)
214 203
215/* 204/*
@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
225 * eax uncopied bytes or 0 if successful. 214 * eax uncopied bytes or 0 if successful.
226 */ 215 */
227ENTRY(copy_user_enhanced_fast_string) 216ENTRY(copy_user_enhanced_fast_string)
228 CFI_STARTPROC
229 ASM_STAC 217 ASM_STAC
230 movl %edx,%ecx 218 movl %edx,%ecx
2311: rep 2191: rep
@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
240 .previous 228 .previous
241 229
242 _ASM_EXTABLE(1b,12b) 230 _ASM_EXTABLE(1b,12b)
243 CFI_ENDPROC
244ENDPROC(copy_user_enhanced_fast_string) 231ENDPROC(copy_user_enhanced_fast_string)
245 232
246/* 233/*
@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
248 * This will force destination/source out of cache for more performance. 235 * This will force destination/source out of cache for more performance.
249 */ 236 */
250ENTRY(__copy_user_nocache) 237ENTRY(__copy_user_nocache)
251 CFI_STARTPROC
252 ASM_STAC 238 ASM_STAC
253 cmpl $8,%edx 239 cmpl $8,%edx
254 jb 20f /* less then 8 bytes, go to byte copy loop */ 240 jb 20f /* less then 8 bytes, go to byte copy loop */
@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
332 _ASM_EXTABLE(19b,40b) 318 _ASM_EXTABLE(19b,40b)
333 _ASM_EXTABLE(21b,50b) 319 _ASM_EXTABLE(21b,50b)
334 _ASM_EXTABLE(22b,50b) 320 _ASM_EXTABLE(22b,50b)
335 CFI_ENDPROC
336ENDPROC(__copy_user_nocache) 321ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 9734182966f3..7e48807b2fa1 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -6,7 +6,6 @@
6 * for more details. No warranty for anything given at all. 6 * for more details. No warranty for anything given at all.
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h>
10#include <asm/errno.h> 9#include <asm/errno.h>
11#include <asm/asm.h> 10#include <asm/asm.h>
12 11
@@ -47,23 +46,16 @@
47 46
48 47
49ENTRY(csum_partial_copy_generic) 48ENTRY(csum_partial_copy_generic)
50 CFI_STARTPROC
51 cmpl $3*64, %edx 49 cmpl $3*64, %edx
52 jle .Lignore 50 jle .Lignore
53 51
54.Lignore: 52.Lignore:
55 subq $7*8, %rsp 53 subq $7*8, %rsp
56 CFI_ADJUST_CFA_OFFSET 7*8
57 movq %rbx, 2*8(%rsp) 54 movq %rbx, 2*8(%rsp)
58 CFI_REL_OFFSET rbx, 2*8
59 movq %r12, 3*8(%rsp) 55 movq %r12, 3*8(%rsp)
60 CFI_REL_OFFSET r12, 3*8
61 movq %r14, 4*8(%rsp) 56 movq %r14, 4*8(%rsp)
62 CFI_REL_OFFSET r14, 4*8
63 movq %r13, 5*8(%rsp) 57 movq %r13, 5*8(%rsp)
64 CFI_REL_OFFSET r13, 5*8
65 movq %rbp, 6*8(%rsp) 58 movq %rbp, 6*8(%rsp)
66 CFI_REL_OFFSET rbp, 6*8
67 59
68 movq %r8, (%rsp) 60 movq %r8, (%rsp)
69 movq %r9, 1*8(%rsp) 61 movq %r9, 1*8(%rsp)
@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
206 addl %ebx, %eax 198 addl %ebx, %eax
207 adcl %r9d, %eax /* carry */ 199 adcl %r9d, %eax /* carry */
208 200
209 CFI_REMEMBER_STATE
210.Lende: 201.Lende:
211 movq 2*8(%rsp), %rbx 202 movq 2*8(%rsp), %rbx
212 CFI_RESTORE rbx
213 movq 3*8(%rsp), %r12 203 movq 3*8(%rsp), %r12
214 CFI_RESTORE r12
215 movq 4*8(%rsp), %r14 204 movq 4*8(%rsp), %r14
216 CFI_RESTORE r14
217 movq 5*8(%rsp), %r13 205 movq 5*8(%rsp), %r13
218 CFI_RESTORE r13
219 movq 6*8(%rsp), %rbp 206 movq 6*8(%rsp), %rbp
220 CFI_RESTORE rbp
221 addq $7*8, %rsp 207 addq $7*8, %rsp
222 CFI_ADJUST_CFA_OFFSET -7*8
223 ret 208 ret
224 CFI_RESTORE_STATE
225 209
226 /* Exception handlers. Very simple, zeroing is done in the wrappers */ 210 /* Exception handlers. Very simple, zeroing is done in the wrappers */
227.Lbad_source: 211.Lbad_source:
@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
237 jz .Lende 221 jz .Lende
238 movl $-EFAULT, (%rax) 222 movl $-EFAULT, (%rax)
239 jmp .Lende 223 jmp .Lende
240 CFI_ENDPROC
241ENDPROC(csum_partial_copy_generic) 224ENDPROC(csum_partial_copy_generic)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index a4512359656a..46668cda4ffd 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -26,7 +26,6 @@
26 */ 26 */
27 27
28#include <linux/linkage.h> 28#include <linux/linkage.h>
29#include <asm/dwarf2.h>
30#include <asm/page_types.h> 29#include <asm/page_types.h>
31#include <asm/errno.h> 30#include <asm/errno.h>
32#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
@@ -36,7 +35,6 @@
36 35
37 .text 36 .text
38ENTRY(__get_user_1) 37ENTRY(__get_user_1)
39 CFI_STARTPROC
40 GET_THREAD_INFO(%_ASM_DX) 38 GET_THREAD_INFO(%_ASM_DX)
41 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX 39 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
42 jae bad_get_user 40 jae bad_get_user
@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
45 xor %eax,%eax 43 xor %eax,%eax
46 ASM_CLAC 44 ASM_CLAC
47 ret 45 ret
48 CFI_ENDPROC
49ENDPROC(__get_user_1) 46ENDPROC(__get_user_1)
50 47
51ENTRY(__get_user_2) 48ENTRY(__get_user_2)
52 CFI_STARTPROC
53 add $1,%_ASM_AX 49 add $1,%_ASM_AX
54 jc bad_get_user 50 jc bad_get_user
55 GET_THREAD_INFO(%_ASM_DX) 51 GET_THREAD_INFO(%_ASM_DX)
@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
60 xor %eax,%eax 56 xor %eax,%eax
61 ASM_CLAC 57 ASM_CLAC
62 ret 58 ret
63 CFI_ENDPROC
64ENDPROC(__get_user_2) 59ENDPROC(__get_user_2)
65 60
66ENTRY(__get_user_4) 61ENTRY(__get_user_4)
67 CFI_STARTPROC
68 add $3,%_ASM_AX 62 add $3,%_ASM_AX
69 jc bad_get_user 63 jc bad_get_user
70 GET_THREAD_INFO(%_ASM_DX) 64 GET_THREAD_INFO(%_ASM_DX)
@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
75 xor %eax,%eax 69 xor %eax,%eax
76 ASM_CLAC 70 ASM_CLAC
77 ret 71 ret
78 CFI_ENDPROC
79ENDPROC(__get_user_4) 72ENDPROC(__get_user_4)
80 73
81ENTRY(__get_user_8) 74ENTRY(__get_user_8)
82 CFI_STARTPROC
83#ifdef CONFIG_X86_64 75#ifdef CONFIG_X86_64
84 add $7,%_ASM_AX 76 add $7,%_ASM_AX
85 jc bad_get_user 77 jc bad_get_user
@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
104 ASM_CLAC 96 ASM_CLAC
105 ret 97 ret
106#endif 98#endif
107 CFI_ENDPROC
108ENDPROC(__get_user_8) 99ENDPROC(__get_user_8)
109 100
110 101
111bad_get_user: 102bad_get_user:
112 CFI_STARTPROC
113 xor %edx,%edx 103 xor %edx,%edx
114 mov $(-EFAULT),%_ASM_AX 104 mov $(-EFAULT),%_ASM_AX
115 ASM_CLAC 105 ASM_CLAC
116 ret 106 ret
117 CFI_ENDPROC
118END(bad_get_user) 107END(bad_get_user)
119 108
120#ifdef CONFIG_X86_32 109#ifdef CONFIG_X86_32
121bad_get_user_8: 110bad_get_user_8:
122 CFI_STARTPROC
123 xor %edx,%edx 111 xor %edx,%edx
124 xor %ecx,%ecx 112 xor %ecx,%ecx
125 mov $(-EFAULT),%_ASM_AX 113 mov $(-EFAULT),%_ASM_AX
126 ASM_CLAC 114 ASM_CLAC
127 ret 115 ret
128 CFI_ENDPROC
129END(bad_get_user_8) 116END(bad_get_user_8)
130#endif 117#endif
131 118
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
index 05a95e713da8..33147fef3452 100644
--- a/arch/x86/lib/iomap_copy_64.S
+++ b/arch/x86/lib/iomap_copy_64.S
@@ -16,15 +16,12 @@
16 */ 16 */
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <asm/dwarf2.h>
20 19
21/* 20/*
22 * override generic version in lib/iomap_copy.c 21 * override generic version in lib/iomap_copy.c
23 */ 22 */
24ENTRY(__iowrite32_copy) 23ENTRY(__iowrite32_copy)
25 CFI_STARTPROC
26 movl %edx,%ecx 24 movl %edx,%ecx
27 rep movsd 25 rep movsd
28 ret 26 ret
29 CFI_ENDPROC
30ENDPROC(__iowrite32_copy) 27ENDPROC(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index b046664f5a1c..16698bba87de 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -2,7 +2,6 @@
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/cpufeature.h> 4#include <asm/cpufeature.h>
5#include <asm/dwarf2.h>
6#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
7 6
8/* 7/*
@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
53ENDPROC(memcpy_erms) 52ENDPROC(memcpy_erms)
54 53
55ENTRY(memcpy_orig) 54ENTRY(memcpy_orig)
56 CFI_STARTPROC
57 movq %rdi, %rax 55 movq %rdi, %rax
58 56
59 cmpq $0x20, %rdx 57 cmpq $0x20, %rdx
@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
178 176
179.Lend: 177.Lend:
180 retq 178 retq
181 CFI_ENDPROC
182ENDPROC(memcpy_orig) 179ENDPROC(memcpy_orig)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 0f8a0d0331b9..ca2afdd6d98e 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -6,7 +6,6 @@
6 * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> 6 * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h>
10#include <asm/cpufeature.h> 9#include <asm/cpufeature.h>
11#include <asm/alternative-asm.h> 10#include <asm/alternative-asm.h>
12 11
@@ -27,7 +26,6 @@
27 26
28ENTRY(memmove) 27ENTRY(memmove)
29ENTRY(__memmove) 28ENTRY(__memmove)
30 CFI_STARTPROC
31 29
32 /* Handle more 32 bytes in loop */ 30 /* Handle more 32 bytes in loop */
33 mov %rdi, %rax 31 mov %rdi, %rax
@@ -207,6 +205,5 @@ ENTRY(__memmove)
207 movb %r11b, (%rdi) 205 movb %r11b, (%rdi)
20813: 20613:
209 retq 207 retq
210 CFI_ENDPROC
211ENDPROC(__memmove) 208ENDPROC(__memmove)
212ENDPROC(memmove) 209ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 93118fb23976..2661fad05827 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -1,7 +1,6 @@
1/* Copyright 2002 Andi Kleen, SuSE Labs */ 1/* Copyright 2002 Andi Kleen, SuSE Labs */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/dwarf2.h>
5#include <asm/cpufeature.h> 4#include <asm/cpufeature.h>
6#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
7 6
@@ -66,7 +65,6 @@ ENTRY(memset_erms)
66ENDPROC(memset_erms) 65ENDPROC(memset_erms)
67 66
68ENTRY(memset_orig) 67ENTRY(memset_orig)
69 CFI_STARTPROC
70 movq %rdi,%r10 68 movq %rdi,%r10
71 69
72 /* expand byte value */ 70 /* expand byte value */
@@ -78,7 +76,6 @@ ENTRY(memset_orig)
78 movl %edi,%r9d 76 movl %edi,%r9d
79 andl $7,%r9d 77 andl $7,%r9d
80 jnz .Lbad_alignment 78 jnz .Lbad_alignment
81 CFI_REMEMBER_STATE
82.Lafter_bad_alignment: 79.Lafter_bad_alignment:
83 80
84 movq %rdx,%rcx 81 movq %rdx,%rcx
@@ -128,7 +125,6 @@ ENTRY(memset_orig)
128 movq %r10,%rax 125 movq %r10,%rax
129 ret 126 ret
130 127
131 CFI_RESTORE_STATE
132.Lbad_alignment: 128.Lbad_alignment:
133 cmpq $7,%rdx 129 cmpq $7,%rdx
134 jbe .Lhandle_7 130 jbe .Lhandle_7
@@ -139,5 +135,4 @@ ENTRY(memset_orig)
139 subq %r8,%rdx 135 subq %r8,%rdx
140 jmp .Lafter_bad_alignment 136 jmp .Lafter_bad_alignment
141.Lfinal: 137.Lfinal:
142 CFI_ENDPROC
143ENDPROC(memset_orig) 138ENDPROC(memset_orig)
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index 3ca5218fbece..c81556409bbb 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -1,6 +1,5 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <linux/errno.h> 2#include <linux/errno.h>
3#include <asm/dwarf2.h>
4#include <asm/asm.h> 3#include <asm/asm.h>
5#include <asm/msr.h> 4#include <asm/msr.h>
6 5
@@ -13,9 +12,8 @@
13 */ 12 */
14.macro op_safe_regs op 13.macro op_safe_regs op
15ENTRY(\op\()_safe_regs) 14ENTRY(\op\()_safe_regs)
16 CFI_STARTPROC 15 pushq %rbx
17 pushq_cfi_reg rbx 16 pushq %rbp
18 pushq_cfi_reg rbp
19 movq %rdi, %r10 /* Save pointer */ 17 movq %rdi, %r10 /* Save pointer */
20 xorl %r11d, %r11d /* Return value */ 18 xorl %r11d, %r11d /* Return value */
21 movl (%rdi), %eax 19 movl (%rdi), %eax
@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
25 movl 20(%rdi), %ebp 23 movl 20(%rdi), %ebp
26 movl 24(%rdi), %esi 24 movl 24(%rdi), %esi
27 movl 28(%rdi), %edi 25 movl 28(%rdi), %edi
28 CFI_REMEMBER_STATE
291: \op 261: \op
302: movl %eax, (%r10) 272: movl %eax, (%r10)
31 movl %r11d, %eax /* Return value */ 28 movl %r11d, %eax /* Return value */
@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
35 movl %ebp, 20(%r10) 32 movl %ebp, 20(%r10)
36 movl %esi, 24(%r10) 33 movl %esi, 24(%r10)
37 movl %edi, 28(%r10) 34 movl %edi, 28(%r10)
38 popq_cfi_reg rbp 35 popq %rbp
39 popq_cfi_reg rbx 36 popq %rbx
40 ret 37 ret
413: 383:
42 CFI_RESTORE_STATE
43 movl $-EIO, %r11d 39 movl $-EIO, %r11d
44 jmp 2b 40 jmp 2b
45 41
46 _ASM_EXTABLE(1b, 3b) 42 _ASM_EXTABLE(1b, 3b)
47 CFI_ENDPROC
48ENDPROC(\op\()_safe_regs) 43ENDPROC(\op\()_safe_regs)
49.endm 44.endm
50 45
@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
52 47
53.macro op_safe_regs op 48.macro op_safe_regs op
54ENTRY(\op\()_safe_regs) 49ENTRY(\op\()_safe_regs)
55 CFI_STARTPROC 50 pushl %ebx
56 pushl_cfi_reg ebx 51 pushl %ebp
57 pushl_cfi_reg ebp 52 pushl %esi
58 pushl_cfi_reg esi 53 pushl %edi
59 pushl_cfi_reg edi 54 pushl $0 /* Return value */
60 pushl_cfi $0 /* Return value */ 55 pushl %eax
61 pushl_cfi %eax
62 movl 4(%eax), %ecx 56 movl 4(%eax), %ecx
63 movl 8(%eax), %edx 57 movl 8(%eax), %edx
64 movl 12(%eax), %ebx 58 movl 12(%eax), %ebx
@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
66 movl 24(%eax), %esi 60 movl 24(%eax), %esi
67 movl 28(%eax), %edi 61 movl 28(%eax), %edi
68 movl (%eax), %eax 62 movl (%eax), %eax
69 CFI_REMEMBER_STATE
701: \op 631: \op
712: pushl_cfi %eax 642: pushl %eax
72 movl 4(%esp), %eax 65 movl 4(%esp), %eax
73 popl_cfi (%eax) 66 popl (%eax)
74 addl $4, %esp 67 addl $4, %esp
75 CFI_ADJUST_CFA_OFFSET -4
76 movl %ecx, 4(%eax) 68 movl %ecx, 4(%eax)
77 movl %edx, 8(%eax) 69 movl %edx, 8(%eax)
78 movl %ebx, 12(%eax) 70 movl %ebx, 12(%eax)
79 movl %ebp, 20(%eax) 71 movl %ebp, 20(%eax)
80 movl %esi, 24(%eax) 72 movl %esi, 24(%eax)
81 movl %edi, 28(%eax) 73 movl %edi, 28(%eax)
82 popl_cfi %eax 74 popl %eax
83 popl_cfi_reg edi 75 popl %edi
84 popl_cfi_reg esi 76 popl %esi
85 popl_cfi_reg ebp 77 popl %ebp
86 popl_cfi_reg ebx 78 popl %ebx
87 ret 79 ret
883: 803:
89 CFI_RESTORE_STATE
90 movl $-EIO, 4(%esp) 81 movl $-EIO, 4(%esp)
91 jmp 2b 82 jmp 2b
92 83
93 _ASM_EXTABLE(1b, 3b) 84 _ASM_EXTABLE(1b, 3b)
94 CFI_ENDPROC
95ENDPROC(\op\()_safe_regs) 85ENDPROC(\op\()_safe_regs)
96.endm 86.endm
97 87
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index fc6ba17a7eec..e0817a12d323 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -11,7 +11,6 @@
11 * return value. 11 * return value.
12 */ 12 */
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/dwarf2.h>
15#include <asm/thread_info.h> 14#include <asm/thread_info.h>
16#include <asm/errno.h> 15#include <asm/errno.h>
17#include <asm/asm.h> 16#include <asm/asm.h>
@@ -30,11 +29,9 @@
30 * as they get called from within inline assembly. 29 * as they get called from within inline assembly.
31 */ 30 */
32 31
33#define ENTER CFI_STARTPROC ; \ 32#define ENTER GET_THREAD_INFO(%_ASM_BX)
34 GET_THREAD_INFO(%_ASM_BX)
35#define EXIT ASM_CLAC ; \ 33#define EXIT ASM_CLAC ; \
36 ret ; \ 34 ret
37 CFI_ENDPROC
38 35
39.text 36.text
40ENTRY(__put_user_1) 37ENTRY(__put_user_1)
@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
87ENDPROC(__put_user_8) 84ENDPROC(__put_user_8)
88 85
89bad_put_user: 86bad_put_user:
90 CFI_STARTPROC
91 movl $-EFAULT,%eax 87 movl $-EFAULT,%eax
92 EXIT 88 EXIT
93END(bad_put_user) 89END(bad_put_user)
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index 2322abe4da3b..40027db99140 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -15,7 +15,6 @@
15 15
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/alternative-asm.h> 17#include <asm/alternative-asm.h>
18#include <asm/dwarf2.h>
19 18
20#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) 19#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
21#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) 20#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
@@ -34,10 +33,10 @@
34 */ 33 */
35 34
36#define save_common_regs \ 35#define save_common_regs \
37 pushl_cfi_reg ecx 36 pushl %ecx
38 37
39#define restore_common_regs \ 38#define restore_common_regs \
40 popl_cfi_reg ecx 39 popl %ecx
41 40
42 /* Avoid uglifying the argument copying x86-64 needs to do. */ 41 /* Avoid uglifying the argument copying x86-64 needs to do. */
43 .macro movq src, dst 42 .macro movq src, dst
@@ -64,50 +63,45 @@
64 */ 63 */
65 64
66#define save_common_regs \ 65#define save_common_regs \
67 pushq_cfi_reg rdi; \ 66 pushq %rdi; \
68 pushq_cfi_reg rsi; \ 67 pushq %rsi; \
69 pushq_cfi_reg rcx; \ 68 pushq %rcx; \
70 pushq_cfi_reg r8; \ 69 pushq %r8; \
71 pushq_cfi_reg r9; \ 70 pushq %r9; \
72 pushq_cfi_reg r10; \ 71 pushq %r10; \
73 pushq_cfi_reg r11 72 pushq %r11
74 73
75#define restore_common_regs \ 74#define restore_common_regs \
76 popq_cfi_reg r11; \ 75 popq %r11; \
77 popq_cfi_reg r10; \ 76 popq %r10; \
78 popq_cfi_reg r9; \ 77 popq %r9; \
79 popq_cfi_reg r8; \ 78 popq %r8; \
80 popq_cfi_reg rcx; \ 79 popq %rcx; \
81 popq_cfi_reg rsi; \ 80 popq %rsi; \
82 popq_cfi_reg rdi 81 popq %rdi
83 82
84#endif 83#endif
85 84
86/* Fix up special calling conventions */ 85/* Fix up special calling conventions */
87ENTRY(call_rwsem_down_read_failed) 86ENTRY(call_rwsem_down_read_failed)
88 CFI_STARTPROC
89 save_common_regs 87 save_common_regs
90 __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 88 __ASM_SIZE(push,) %__ASM_REG(dx)
91 movq %rax,%rdi 89 movq %rax,%rdi
92 call rwsem_down_read_failed 90 call rwsem_down_read_failed
93 __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 91 __ASM_SIZE(pop,) %__ASM_REG(dx)
94 restore_common_regs 92 restore_common_regs
95 ret 93 ret
96 CFI_ENDPROC
97ENDPROC(call_rwsem_down_read_failed) 94ENDPROC(call_rwsem_down_read_failed)
98 95
99ENTRY(call_rwsem_down_write_failed) 96ENTRY(call_rwsem_down_write_failed)
100 CFI_STARTPROC
101 save_common_regs 97 save_common_regs
102 movq %rax,%rdi 98 movq %rax,%rdi
103 call rwsem_down_write_failed 99 call rwsem_down_write_failed
104 restore_common_regs 100 restore_common_regs
105 ret 101 ret
106 CFI_ENDPROC
107ENDPROC(call_rwsem_down_write_failed) 102ENDPROC(call_rwsem_down_write_failed)
108 103
109ENTRY(call_rwsem_wake) 104ENTRY(call_rwsem_wake)
110 CFI_STARTPROC
111 /* do nothing if still outstanding active readers */ 105 /* do nothing if still outstanding active readers */
112 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) 106 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
113 jnz 1f 107 jnz 1f
@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
116 call rwsem_wake 110 call rwsem_wake
117 restore_common_regs 111 restore_common_regs
1181: ret 1121: ret
119 CFI_ENDPROC
120ENDPROC(call_rwsem_wake) 113ENDPROC(call_rwsem_wake)
121 114
122ENTRY(call_rwsem_downgrade_wake) 115ENTRY(call_rwsem_downgrade_wake)
123 CFI_STARTPROC
124 save_common_regs 116 save_common_regs
125 __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 117 __ASM_SIZE(push,) %__ASM_REG(dx)
126 movq %rax,%rdi 118 movq %rax,%rdi
127 call rwsem_downgrade_wake 119 call rwsem_downgrade_wake
128 __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 120 __ASM_SIZE(pop,) %__ASM_REG(dx)
129 restore_common_regs 121 restore_common_regs
130 ret 122 ret
131 CFI_ENDPROC
132ENDPROC(call_rwsem_downgrade_wake) 123ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 6440221ced0d..4093216b3791 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -8,7 +8,6 @@
8 * of the License. 8 * of the License.
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/dwarf2.h>
12 11
13/* 12/*
14 * Calling convention : 13 * Calling convention :
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index acb384d24669..a8fecc226946 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -26,7 +26,7 @@ else
26 26
27obj-y += syscalls_64.o vdso/ 27obj-y += syscalls_64.o vdso/
28 28
29subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../lib/thunk_64.o \ 29subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
30 ../lib/rwsem.o 30 ../lib/rwsem.o
31 31
32endif 32endif
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 04529e620559..f22667abf7b9 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -114,7 +114,7 @@ RELOC(xen_sysret32, 1b+1)
114/* Normal 64-bit system call target */ 114/* Normal 64-bit system call target */
115ENTRY(xen_syscall_target) 115ENTRY(xen_syscall_target)
116 undo_xen_syscall 116 undo_xen_syscall
117 jmp system_call_after_swapgs 117 jmp entry_SYSCALL_64_after_swapgs
118ENDPROC(xen_syscall_target) 118ENDPROC(xen_syscall_target)
119 119
120#ifdef CONFIG_IA32_EMULATION 120#ifdef CONFIG_IA32_EMULATION
@@ -122,13 +122,13 @@ ENDPROC(xen_syscall_target)
122/* 32-bit compat syscall target */ 122/* 32-bit compat syscall target */
123ENTRY(xen_syscall32_target) 123ENTRY(xen_syscall32_target)
124 undo_xen_syscall 124 undo_xen_syscall
125 jmp ia32_cstar_target 125 jmp entry_SYSCALL_compat
126ENDPROC(xen_syscall32_target) 126ENDPROC(xen_syscall32_target)
127 127
128/* 32-bit compat sysenter target */ 128/* 32-bit compat sysenter target */
129ENTRY(xen_sysenter_target) 129ENTRY(xen_sysenter_target)
130 undo_xen_syscall 130 undo_xen_syscall
131 jmp ia32_sysenter_target 131 jmp entry_SYSENTER_compat
132ENDPROC(xen_sysenter_target) 132ENDPROC(xen_sysenter_target)
133 133
134#else /* !CONFIG_IA32_EMULATION */ 134#else /* !CONFIG_IA32_EMULATION */
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index 5b3add31f9f1..2c9082ba6137 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -212,5 +212,5 @@ EOF
212 ) 212 )
213} 213}
214 214
215(ignore_list && syscall_list $(dirname $0)/../arch/x86/syscalls/syscall_32.tbl) | \ 215(ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \
216$* -E -x c - > /dev/null 216$* -E -x c - > /dev/null